ARMFastISel.cpp revision d054eda44114df411a2749e7b6b85d27509a0af1
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMTargetMachine.h" 20#include "ARMSubtarget.h" 21#include "ARMConstantPoolValue.h" 22#include "MCTargetDesc/ARMAddressingModes.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/Operator.h" 30#include "llvm/CodeGen/Analysis.h" 31#include "llvm/CodeGen/FastISel.h" 32#include "llvm/CodeGen/FunctionLoweringInfo.h" 33#include "llvm/CodeGen/MachineInstrBuilder.h" 34#include "llvm/CodeGen/MachineModuleInfo.h" 35#include "llvm/CodeGen/MachineConstantPool.h" 36#include "llvm/CodeGen/MachineFrameInfo.h" 37#include "llvm/CodeGen/MachineMemOperand.h" 38#include "llvm/CodeGen/MachineRegisterInfo.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/DataLayout.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50extern cl::opt<bool> EnableARMLongCalls; 51 52namespace { 53 54 // All possible address modes, plus some. 55 typedef struct Address { 56 enum { 57 RegBase, 58 FrameIndexBase 59 } BaseType; 60 61 union { 62 unsigned Reg; 63 int FI; 64 } Base; 65 66 int Offset; 67 68 // Innocuous defaults for our address. 69 Address() 70 : BaseType(RegBase), Offset(0) { 71 Base.Reg = 0; 72 } 73 } Address; 74 75class ARMFastISel : public FastISel { 76 77 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 78 /// make the right decision when generating code for different targets. 79 const ARMSubtarget *Subtarget; 80 const TargetMachine &TM; 81 const TargetInstrInfo &TII; 82 const TargetLowering &TLI; 83 ARMFunctionInfo *AFI; 84 85 // Convenience variables to avoid some queries. 86 bool isThumb2; 87 LLVMContext *Context; 88 89 public: 90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo, 91 const TargetLibraryInfo *libInfo) 92 : FastISel(funcInfo, libInfo), 93 TM(funcInfo.MF->getTarget()), 94 TII(*TM.getInstrInfo()), 95 TLI(*TM.getTargetLowering()) { 96 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 97 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 98 isThumb2 = AFI->isThumbFunction(); 99 Context = &funcInfo.Fn->getContext(); 100 } 101 102 // Code from FastISel.cpp. 103 private: 104 unsigned FastEmitInst_(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC); 106 unsigned FastEmitInst_r(unsigned MachineInstOpcode, 107 const TargetRegisterClass *RC, 108 unsigned Op0, bool Op0IsKill); 109 unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC, 111 unsigned Op0, bool Op0IsKill, 112 unsigned Op1, bool Op1IsKill); 113 unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 114 const TargetRegisterClass *RC, 115 unsigned Op0, bool Op0IsKill, 116 unsigned Op1, bool Op1IsKill, 117 unsigned Op2, bool Op2IsKill); 118 unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 uint64_t Imm); 122 unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 const ConstantFP *FPImm); 126 unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 unsigned Op0, bool Op0IsKill, 129 unsigned Op1, bool Op1IsKill, 130 uint64_t Imm); 131 unsigned FastEmitInst_i(unsigned MachineInstOpcode, 132 const TargetRegisterClass *RC, 133 uint64_t Imm); 134 unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 135 const TargetRegisterClass *RC, 136 uint64_t Imm1, uint64_t Imm2); 137 138 unsigned FastEmitInst_extractsubreg(MVT RetVT, 139 unsigned Op0, bool Op0IsKill, 140 uint32_t Idx); 141 142 // Backend specific FastISel code. 143 private: 144 virtual bool TargetSelectInstruction(const Instruction *I); 145 virtual unsigned TargetMaterializeConstant(const Constant *C); 146 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 147 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 148 const LoadInst *LI); 149 private: 150 #include "ARMGenFastISel.inc" 151 152 // Instruction selection routines. 153 private: 154 bool SelectLoad(const Instruction *I); 155 bool SelectStore(const Instruction *I); 156 bool SelectBranch(const Instruction *I); 157 bool SelectIndirectBr(const Instruction *I); 158 bool SelectCmp(const Instruction *I); 159 bool SelectFPExt(const Instruction *I); 160 bool SelectFPTrunc(const Instruction *I); 161 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 162 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 163 bool SelectIToFP(const Instruction *I, bool isSigned); 164 bool SelectFPToI(const Instruction *I, bool isSigned); 165 bool SelectDiv(const Instruction *I, bool isSigned); 166 bool SelectRem(const Instruction *I, bool isSigned); 167 bool SelectCall(const Instruction *I, const char *IntrMemName); 168 bool SelectIntrinsicCall(const IntrinsicInst &I); 169 bool SelectSelect(const Instruction *I); 170 bool SelectRet(const Instruction *I); 171 bool SelectTrunc(const Instruction *I); 172 bool SelectIntExt(const Instruction *I); 173 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy); 174 175 // Utility routines. 176 private: 177 bool isTypeLegal(Type *Ty, MVT &VT); 178 bool isLoadTypeLegal(Type *Ty, MVT &VT); 179 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 180 bool isZExt); 181 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 182 unsigned Alignment = 0, bool isZExt = true, 183 bool allocReg = true); 184 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 185 unsigned Alignment = 0); 186 bool ARMComputeAddress(const Value *Obj, Address &Addr); 187 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 188 bool ARMIsMemCpySmall(uint64_t Len); 189 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 190 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 191 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 192 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 193 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 194 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 195 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 196 unsigned ARMSelectCallOp(bool UseReg); 197 unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, EVT VT); 198 199 // Call handling routines. 200 private: 201 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, 202 bool Return, 203 bool isVarArg); 204 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 205 SmallVectorImpl<unsigned> &ArgRegs, 206 SmallVectorImpl<MVT> &ArgVTs, 207 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 208 SmallVectorImpl<unsigned> &RegArgs, 209 CallingConv::ID CC, 210 unsigned &NumBytes, 211 bool isVarArg); 212 unsigned getLibcallReg(const Twine &Name); 213 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 214 const Instruction *I, CallingConv::ID CC, 215 unsigned &NumBytes, bool isVarArg); 216 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 217 218 // OptionalDef handling routines. 219 private: 220 bool isARMNEONPred(const MachineInstr *MI); 221 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 222 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 223 void AddLoadStoreOperands(EVT VT, Address &Addr, 224 const MachineInstrBuilder &MIB, 225 unsigned Flags, bool useAM3); 226}; 227 228} // end anonymous namespace 229 230#include "ARMGenCallingConv.inc" 231 232// DefinesOptionalPredicate - This is different from DefinesPredicate in that 233// we don't care about implicit defs here, just places we'll need to add a 234// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 235bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 236 if (!MI->hasOptionalDef()) 237 return false; 238 239 // Look to see if our OptionalDef is defining CPSR or CCR. 240 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 241 const MachineOperand &MO = MI->getOperand(i); 242 if (!MO.isReg() || !MO.isDef()) continue; 243 if (MO.getReg() == ARM::CPSR) 244 *CPSR = true; 245 } 246 return true; 247} 248 249bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 250 const MCInstrDesc &MCID = MI->getDesc(); 251 252 // If we're a thumb2 or not NEON function we were handled via isPredicable. 253 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 254 AFI->isThumb2Function()) 255 return false; 256 257 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 258 if (MCID.OpInfo[i].isPredicate()) 259 return true; 260 261 return false; 262} 263 264// If the machine is predicable go ahead and add the predicate operands, if 265// it needs default CC operands add those. 266// TODO: If we want to support thumb1 then we'll need to deal with optional 267// CPSR defs that need to be added before the remaining operands. See s_cc_out 268// for descriptions why. 269const MachineInstrBuilder & 270ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 271 MachineInstr *MI = &*MIB; 272 273 // Do we use a predicate? or... 274 // Are we NEON in ARM mode and have a predicate operand? If so, I know 275 // we're not predicable but add it anyways. 276 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 277 AddDefaultPred(MIB); 278 279 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 280 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 281 bool CPSR = false; 282 if (DefinesOptionalPredicate(MI, &CPSR)) { 283 if (CPSR) 284 AddDefaultT1CC(MIB); 285 else 286 AddDefaultCC(MIB); 287 } 288 return MIB; 289} 290 291unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 292 const TargetRegisterClass* RC) { 293 unsigned ResultReg = createResultReg(RC); 294 const MCInstrDesc &II = TII.get(MachineInstOpcode); 295 296 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 297 return ResultReg; 298} 299 300unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 301 const TargetRegisterClass *RC, 302 unsigned Op0, bool Op0IsKill) { 303 unsigned ResultReg = createResultReg(RC); 304 const MCInstrDesc &II = TII.get(MachineInstOpcode); 305 306 if (II.getNumDefs() >= 1) { 307 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 308 .addReg(Op0, Op0IsKill * RegState::Kill)); 309 } else { 310 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 311 .addReg(Op0, Op0IsKill * RegState::Kill)); 312 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 313 TII.get(TargetOpcode::COPY), ResultReg) 314 .addReg(II.ImplicitDefs[0])); 315 } 316 return ResultReg; 317} 318 319unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 320 const TargetRegisterClass *RC, 321 unsigned Op0, bool Op0IsKill, 322 unsigned Op1, bool Op1IsKill) { 323 unsigned ResultReg = createResultReg(RC); 324 const MCInstrDesc &II = TII.get(MachineInstOpcode); 325 326 if (II.getNumDefs() >= 1) { 327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 328 .addReg(Op0, Op0IsKill * RegState::Kill) 329 .addReg(Op1, Op1IsKill * RegState::Kill)); 330 } else { 331 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 332 .addReg(Op0, Op0IsKill * RegState::Kill) 333 .addReg(Op1, Op1IsKill * RegState::Kill)); 334 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 335 TII.get(TargetOpcode::COPY), ResultReg) 336 .addReg(II.ImplicitDefs[0])); 337 } 338 return ResultReg; 339} 340 341unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 342 const TargetRegisterClass *RC, 343 unsigned Op0, bool Op0IsKill, 344 unsigned Op1, bool Op1IsKill, 345 unsigned Op2, bool Op2IsKill) { 346 unsigned ResultReg = createResultReg(RC); 347 const MCInstrDesc &II = TII.get(MachineInstOpcode); 348 349 if (II.getNumDefs() >= 1) { 350 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 351 .addReg(Op0, Op0IsKill * RegState::Kill) 352 .addReg(Op1, Op1IsKill * RegState::Kill) 353 .addReg(Op2, Op2IsKill * RegState::Kill)); 354 } else { 355 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 356 .addReg(Op0, Op0IsKill * RegState::Kill) 357 .addReg(Op1, Op1IsKill * RegState::Kill) 358 .addReg(Op2, Op2IsKill * RegState::Kill)); 359 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 360 TII.get(TargetOpcode::COPY), ResultReg) 361 .addReg(II.ImplicitDefs[0])); 362 } 363 return ResultReg; 364} 365 366unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 367 const TargetRegisterClass *RC, 368 unsigned Op0, bool Op0IsKill, 369 uint64_t Imm) { 370 unsigned ResultReg = createResultReg(RC); 371 const MCInstrDesc &II = TII.get(MachineInstOpcode); 372 373 if (II.getNumDefs() >= 1) { 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 375 .addReg(Op0, Op0IsKill * RegState::Kill) 376 .addImm(Imm)); 377 } else { 378 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 379 .addReg(Op0, Op0IsKill * RegState::Kill) 380 .addImm(Imm)); 381 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 382 TII.get(TargetOpcode::COPY), ResultReg) 383 .addReg(II.ImplicitDefs[0])); 384 } 385 return ResultReg; 386} 387 388unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 389 const TargetRegisterClass *RC, 390 unsigned Op0, bool Op0IsKill, 391 const ConstantFP *FPImm) { 392 unsigned ResultReg = createResultReg(RC); 393 const MCInstrDesc &II = TII.get(MachineInstOpcode); 394 395 if (II.getNumDefs() >= 1) { 396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 397 .addReg(Op0, Op0IsKill * RegState::Kill) 398 .addFPImm(FPImm)); 399 } else { 400 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 401 .addReg(Op0, Op0IsKill * RegState::Kill) 402 .addFPImm(FPImm)); 403 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 404 TII.get(TargetOpcode::COPY), ResultReg) 405 .addReg(II.ImplicitDefs[0])); 406 } 407 return ResultReg; 408} 409 410unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 411 const TargetRegisterClass *RC, 412 unsigned Op0, bool Op0IsKill, 413 unsigned Op1, bool Op1IsKill, 414 uint64_t Imm) { 415 unsigned ResultReg = createResultReg(RC); 416 const MCInstrDesc &II = TII.get(MachineInstOpcode); 417 418 if (II.getNumDefs() >= 1) { 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 420 .addReg(Op0, Op0IsKill * RegState::Kill) 421 .addReg(Op1, Op1IsKill * RegState::Kill) 422 .addImm(Imm)); 423 } else { 424 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 425 .addReg(Op0, Op0IsKill * RegState::Kill) 426 .addReg(Op1, Op1IsKill * RegState::Kill) 427 .addImm(Imm)); 428 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 429 TII.get(TargetOpcode::COPY), ResultReg) 430 .addReg(II.ImplicitDefs[0])); 431 } 432 return ResultReg; 433} 434 435unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 436 const TargetRegisterClass *RC, 437 uint64_t Imm) { 438 unsigned ResultReg = createResultReg(RC); 439 const MCInstrDesc &II = TII.get(MachineInstOpcode); 440 441 if (II.getNumDefs() >= 1) { 442 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 443 .addImm(Imm)); 444 } else { 445 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 446 .addImm(Imm)); 447 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 448 TII.get(TargetOpcode::COPY), ResultReg) 449 .addReg(II.ImplicitDefs[0])); 450 } 451 return ResultReg; 452} 453 454unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 455 const TargetRegisterClass *RC, 456 uint64_t Imm1, uint64_t Imm2) { 457 unsigned ResultReg = createResultReg(RC); 458 const MCInstrDesc &II = TII.get(MachineInstOpcode); 459 460 if (II.getNumDefs() >= 1) { 461 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 462 .addImm(Imm1).addImm(Imm2)); 463 } else { 464 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 465 .addImm(Imm1).addImm(Imm2)); 466 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 467 TII.get(TargetOpcode::COPY), 468 ResultReg) 469 .addReg(II.ImplicitDefs[0])); 470 } 471 return ResultReg; 472} 473 474unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 475 unsigned Op0, bool Op0IsKill, 476 uint32_t Idx) { 477 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 478 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 479 "Cannot yet extract from physregs"); 480 481 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 482 DL, TII.get(TargetOpcode::COPY), ResultReg) 483 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 484 return ResultReg; 485} 486 487// TODO: Don't worry about 64-bit now, but when this is fixed remove the 488// checks from the various callers. 489unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 490 if (VT == MVT::f64) return 0; 491 492 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 493 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 494 TII.get(ARM::VMOVSR), MoveReg) 495 .addReg(SrcReg)); 496 return MoveReg; 497} 498 499unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 500 if (VT == MVT::i64) return 0; 501 502 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 503 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 504 TII.get(ARM::VMOVRS), MoveReg) 505 .addReg(SrcReg)); 506 return MoveReg; 507} 508 509// For double width floating point we need to materialize two constants 510// (the high and the low) into integer registers then use a move to get 511// the combined constant into an FP reg. 512unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 513 const APFloat Val = CFP->getValueAPF(); 514 bool is64bit = VT == MVT::f64; 515 516 // This checks to see if we can use VFP3 instructions to materialize 517 // a constant, otherwise we have to go through the constant pool. 518 if (TLI.isFPImmLegal(Val, VT)) { 519 int Imm; 520 unsigned Opc; 521 if (is64bit) { 522 Imm = ARM_AM::getFP64Imm(Val); 523 Opc = ARM::FCONSTD; 524 } else { 525 Imm = ARM_AM::getFP32Imm(Val); 526 Opc = ARM::FCONSTS; 527 } 528 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 529 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 530 DestReg) 531 .addImm(Imm)); 532 return DestReg; 533 } 534 535 // Require VFP2 for loading fp constants. 536 if (!Subtarget->hasVFP2()) return false; 537 538 // MachineConstantPool wants an explicit alignment. 539 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 540 if (Align == 0) { 541 // TODO: Figure out if this is correct. 542 Align = TD.getTypeAllocSize(CFP->getType()); 543 } 544 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 545 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 546 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 547 548 // The extra reg is for addrmode5. 549 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 550 DestReg) 551 .addConstantPoolIndex(Idx) 552 .addReg(0)); 553 return DestReg; 554} 555 556unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 557 558 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 559 return false; 560 561 // If we can do this in a single instruction without a constant pool entry 562 // do so now. 563 const ConstantInt *CI = cast<ConstantInt>(C); 564 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 565 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 566 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 567 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 568 TII.get(Opc), ImmReg) 569 .addImm(CI->getZExtValue())); 570 return ImmReg; 571 } 572 573 // Use MVN to emit negative constants. 574 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 575 unsigned Imm = (unsigned)~(CI->getSExtValue()); 576 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 577 (ARM_AM::getSOImmVal(Imm) != -1); 578 if (UseImm) { 579 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 580 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 581 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 582 TII.get(Opc), ImmReg) 583 .addImm(Imm)); 584 return ImmReg; 585 } 586 } 587 588 // Load from constant pool. For now 32-bit only. 589 if (VT != MVT::i32) 590 return false; 591 592 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 593 594 // MachineConstantPool wants an explicit alignment. 595 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 596 if (Align == 0) { 597 // TODO: Figure out if this is correct. 598 Align = TD.getTypeAllocSize(C->getType()); 599 } 600 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 601 602 if (isThumb2) 603 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 604 TII.get(ARM::t2LDRpci), DestReg) 605 .addConstantPoolIndex(Idx)); 606 else 607 // The extra immediate is for addrmode2. 608 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 609 TII.get(ARM::LDRcp), DestReg) 610 .addConstantPoolIndex(Idx) 611 .addImm(0)); 612 613 return DestReg; 614} 615 616unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 617 // For now 32-bit only. 618 if (VT != MVT::i32) return 0; 619 620 Reloc::Model RelocM = TM.getRelocationModel(); 621 bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); 622 const TargetRegisterClass *RC = isThumb2 ? 623 (const TargetRegisterClass*)&ARM::rGPRRegClass : 624 (const TargetRegisterClass*)&ARM::GPRRegClass; 625 unsigned DestReg = createResultReg(RC); 626 627 // Use movw+movt when possible, it avoids constant pool entries. 628 // Darwin targets don't support movt with Reloc::Static, see 629 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 630 // static movt relocations. 631 if (Subtarget->useMovt() && 632 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 633 unsigned Opc; 634 switch (RelocM) { 635 case Reloc::PIC_: 636 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 637 break; 638 case Reloc::DynamicNoPIC: 639 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 640 break; 641 default: 642 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 643 break; 644 } 645 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 646 DestReg).addGlobalAddress(GV)); 647 } else { 648 // MachineConstantPool wants an explicit alignment. 649 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 650 if (Align == 0) { 651 // TODO: Figure out if this is correct. 652 Align = TD.getTypeAllocSize(GV->getType()); 653 } 654 655 if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) 656 return ARMLowerPICELF(GV, Align, VT); 657 658 // Grab index. 659 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 660 (Subtarget->isThumb() ? 4 : 8); 661 unsigned Id = AFI->createPICLabelUId(); 662 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 663 ARMCP::CPValue, 664 PCAdj); 665 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 666 667 // Load value. 668 MachineInstrBuilder MIB; 669 if (isThumb2) { 670 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 671 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 672 .addConstantPoolIndex(Idx); 673 if (RelocM == Reloc::PIC_) 674 MIB.addImm(Id); 675 AddOptionalDefs(MIB); 676 } else { 677 // The extra immediate is for addrmode2. 678 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 679 DestReg) 680 .addConstantPoolIndex(Idx) 681 .addImm(0); 682 AddOptionalDefs(MIB); 683 684 if (RelocM == Reloc::PIC_) { 685 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; 686 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 687 688 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 689 DL, TII.get(Opc), NewDestReg) 690 .addReg(DestReg) 691 .addImm(Id); 692 AddOptionalDefs(MIB); 693 return NewDestReg; 694 } 695 } 696 } 697 698 if (IsIndirect) { 699 MachineInstrBuilder MIB; 700 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 701 if (isThumb2) 702 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 703 TII.get(ARM::t2LDRi12), NewDestReg) 704 .addReg(DestReg) 705 .addImm(0); 706 else 707 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 708 NewDestReg) 709 .addReg(DestReg) 710 .addImm(0); 711 DestReg = NewDestReg; 712 AddOptionalDefs(MIB); 713 } 714 715 return DestReg; 716} 717 718unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 719 EVT VT = TLI.getValueType(C->getType(), true); 720 721 // Only handle simple types. 722 if (!VT.isSimple()) return 0; 723 724 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 725 return ARMMaterializeFP(CFP, VT); 726 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 727 return ARMMaterializeGV(GV, VT); 728 else if (isa<ConstantInt>(C)) 729 return ARMMaterializeInt(C, VT); 730 731 return 0; 732} 733 734// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 735 736unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 737 // Don't handle dynamic allocas. 738 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 739 740 MVT VT; 741 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 742 743 DenseMap<const AllocaInst*, int>::iterator SI = 744 FuncInfo.StaticAllocaMap.find(AI); 745 746 // This will get lowered later into the correct offsets and registers 747 // via rewriteXFrameIndex. 748 if (SI != FuncInfo.StaticAllocaMap.end()) { 749 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 750 unsigned ResultReg = createResultReg(RC); 751 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 752 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 753 TII.get(Opc), ResultReg) 754 .addFrameIndex(SI->second) 755 .addImm(0)); 756 return ResultReg; 757 } 758 759 return 0; 760} 761 762bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 763 EVT evt = TLI.getValueType(Ty, true); 764 765 // Only handle simple types. 766 if (evt == MVT::Other || !evt.isSimple()) return false; 767 VT = evt.getSimpleVT(); 768 769 // Handle all legal types, i.e. a register that will directly hold this 770 // value. 771 return TLI.isTypeLegal(VT); 772} 773 774bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 775 if (isTypeLegal(Ty, VT)) return true; 776 777 // If this is a type than can be sign or zero-extended to a basic operation 778 // go ahead and accept it now. 779 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 780 return true; 781 782 return false; 783} 784 785// Computes the address to get to an object. 786bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 787 // Some boilerplate from the X86 FastISel. 788 const User *U = NULL; 789 unsigned Opcode = Instruction::UserOp1; 790 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 791 // Don't walk into other basic blocks unless the object is an alloca from 792 // another block, otherwise it may not have a virtual register assigned. 793 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 794 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 795 Opcode = I->getOpcode(); 796 U = I; 797 } 798 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 799 Opcode = C->getOpcode(); 800 U = C; 801 } 802 803 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 804 if (Ty->getAddressSpace() > 255) 805 // Fast instruction selection doesn't support the special 806 // address spaces. 807 return false; 808 809 switch (Opcode) { 810 default: 811 break; 812 case Instruction::BitCast: { 813 // Look through bitcasts. 814 return ARMComputeAddress(U->getOperand(0), Addr); 815 } 816 case Instruction::IntToPtr: { 817 // Look past no-op inttoptrs. 818 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 819 return ARMComputeAddress(U->getOperand(0), Addr); 820 break; 821 } 822 case Instruction::PtrToInt: { 823 // Look past no-op ptrtoints. 824 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 825 return ARMComputeAddress(U->getOperand(0), Addr); 826 break; 827 } 828 case Instruction::GetElementPtr: { 829 Address SavedAddr = Addr; 830 int TmpOffset = Addr.Offset; 831 832 // Iterate through the GEP folding the constants into offsets where 833 // we can. 834 gep_type_iterator GTI = gep_type_begin(U); 835 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 836 i != e; ++i, ++GTI) { 837 const Value *Op = *i; 838 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 839 const StructLayout *SL = TD.getStructLayout(STy); 840 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 841 TmpOffset += SL->getElementOffset(Idx); 842 } else { 843 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 844 for (;;) { 845 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 846 // Constant-offset addressing. 847 TmpOffset += CI->getSExtValue() * S; 848 break; 849 } 850 if (isa<AddOperator>(Op) && 851 (!isa<Instruction>(Op) || 852 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 853 == FuncInfo.MBB) && 854 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 855 // An add (in the same block) with a constant operand. Fold the 856 // constant. 857 ConstantInt *CI = 858 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 859 TmpOffset += CI->getSExtValue() * S; 860 // Iterate on the other operand. 861 Op = cast<AddOperator>(Op)->getOperand(0); 862 continue; 863 } 864 // Unsupported 865 goto unsupported_gep; 866 } 867 } 868 } 869 870 // Try to grab the base operand now. 871 Addr.Offset = TmpOffset; 872 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 873 874 // We failed, restore everything and try the other options. 875 Addr = SavedAddr; 876 877 unsupported_gep: 878 break; 879 } 880 case Instruction::Alloca: { 881 const AllocaInst *AI = cast<AllocaInst>(Obj); 882 DenseMap<const AllocaInst*, int>::iterator SI = 883 FuncInfo.StaticAllocaMap.find(AI); 884 if (SI != FuncInfo.StaticAllocaMap.end()) { 885 Addr.BaseType = Address::FrameIndexBase; 886 Addr.Base.FI = SI->second; 887 return true; 888 } 889 break; 890 } 891 } 892 893 // Try to get this in a register if nothing else has worked. 894 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 895 return Addr.Base.Reg != 0; 896} 897 898void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 899 900 assert(VT.isSimple() && "Non-simple types are invalid here!"); 901 902 bool needsLowering = false; 903 switch (VT.getSimpleVT().SimpleTy) { 904 default: llvm_unreachable("Unhandled load/store type!"); 905 case MVT::i1: 906 case MVT::i8: 907 case MVT::i16: 908 case MVT::i32: 909 if (!useAM3) { 910 // Integer loads/stores handle 12-bit offsets. 911 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 912 // Handle negative offsets. 913 if (needsLowering && isThumb2) 914 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 915 Addr.Offset > -256); 916 } else { 917 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 918 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 919 } 920 break; 921 case MVT::f32: 922 case MVT::f64: 923 // Floating point operands handle 8-bit offsets. 924 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 925 break; 926 } 927 928 // If this is a stack pointer and the offset needs to be simplified then 929 // put the alloca address into a register, set the base type back to 930 // register and continue. This should almost never happen. 931 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 932 const TargetRegisterClass *RC = isThumb2 ? 933 (const TargetRegisterClass*)&ARM::tGPRRegClass : 934 (const TargetRegisterClass*)&ARM::GPRRegClass; 935 unsigned ResultReg = createResultReg(RC); 936 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 937 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 938 TII.get(Opc), ResultReg) 939 .addFrameIndex(Addr.Base.FI) 940 .addImm(0)); 941 Addr.Base.Reg = ResultReg; 942 Addr.BaseType = Address::RegBase; 943 } 944 945 // Since the offset is too large for the load/store instruction 946 // get the reg+offset into a register. 947 if (needsLowering) { 948 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 949 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 950 Addr.Offset = 0; 951 } 952} 953 954void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 955 const MachineInstrBuilder &MIB, 956 unsigned Flags, bool useAM3) { 957 // addrmode5 output depends on the selection dag addressing dividing the 958 // offset by 4 that it then later multiplies. Do this here as well. 959 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 960 VT.getSimpleVT().SimpleTy == MVT::f64) 961 Addr.Offset /= 4; 962 963 // Frame base works a bit differently. Handle it separately. 964 if (Addr.BaseType == Address::FrameIndexBase) { 965 int FI = Addr.Base.FI; 966 int Offset = Addr.Offset; 967 MachineMemOperand *MMO = 968 FuncInfo.MF->getMachineMemOperand( 969 MachinePointerInfo::getFixedStack(FI, Offset), 970 Flags, 971 MFI.getObjectSize(FI), 972 MFI.getObjectAlignment(FI)); 973 // Now add the rest of the operands. 974 MIB.addFrameIndex(FI); 975 976 // ARM halfword load/stores and signed byte loads need an additional 977 // operand. 978 if (useAM3) { 979 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 980 MIB.addReg(0); 981 MIB.addImm(Imm); 982 } else { 983 MIB.addImm(Addr.Offset); 984 } 985 MIB.addMemOperand(MMO); 986 } else { 987 // Now add the rest of the operands. 988 MIB.addReg(Addr.Base.Reg); 989 990 // ARM halfword load/stores and signed byte loads need an additional 991 // operand. 992 if (useAM3) { 993 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 994 MIB.addReg(0); 995 MIB.addImm(Imm); 996 } else { 997 MIB.addImm(Addr.Offset); 998 } 999 } 1000 AddOptionalDefs(MIB); 1001} 1002 1003bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 1004 unsigned Alignment, bool isZExt, bool allocReg) { 1005 assert(VT.isSimple() && "Non-simple types are invalid here!"); 1006 unsigned Opc; 1007 bool useAM3 = false; 1008 bool needVMOV = false; 1009 const TargetRegisterClass *RC; 1010 switch (VT.getSimpleVT().SimpleTy) { 1011 // This is mostly going to be Neon/vector support. 1012 default: return false; 1013 case MVT::i1: 1014 case MVT::i8: 1015 if (isThumb2) { 1016 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1017 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 1018 else 1019 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 1020 } else { 1021 if (isZExt) { 1022 Opc = ARM::LDRBi12; 1023 } else { 1024 Opc = ARM::LDRSB; 1025 useAM3 = true; 1026 } 1027 } 1028 RC = &ARM::GPRRegClass; 1029 break; 1030 case MVT::i16: 1031 if (Alignment && Alignment < 2 && (!Subtarget->allowsUnalignedMem() || 1032 TM.Options.StrictAlign)) 1033 return false; 1034 1035 if (isThumb2) { 1036 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1037 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1038 else 1039 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1040 } else { 1041 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1042 useAM3 = true; 1043 } 1044 RC = &ARM::GPRRegClass; 1045 break; 1046 case MVT::i32: 1047 if (Alignment && Alignment < 4 && (!Subtarget->allowsUnalignedMem() || 1048 TM.Options.StrictAlign)) 1049 return false; 1050 1051 if (isThumb2) { 1052 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1053 Opc = ARM::t2LDRi8; 1054 else 1055 Opc = ARM::t2LDRi12; 1056 } else { 1057 Opc = ARM::LDRi12; 1058 } 1059 RC = &ARM::GPRRegClass; 1060 break; 1061 case MVT::f32: 1062 if (!Subtarget->hasVFP2()) return false; 1063 // Unaligned loads need special handling. Floats require word-alignment. 1064 if (Alignment && Alignment < 4) { 1065 needVMOV = true; 1066 VT = MVT::i32; 1067 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1068 RC = &ARM::GPRRegClass; 1069 } else { 1070 Opc = ARM::VLDRS; 1071 RC = TLI.getRegClassFor(VT); 1072 } 1073 break; 1074 case MVT::f64: 1075 if (!Subtarget->hasVFP2()) return false; 1076 // FIXME: Unaligned loads need special handling. Doublewords require 1077 // word-alignment. 1078 if (Alignment && Alignment < 4) 1079 return false; 1080 1081 Opc = ARM::VLDRD; 1082 RC = TLI.getRegClassFor(VT); 1083 break; 1084 } 1085 // Simplify this down to something we can handle. 1086 ARMSimplifyAddress(Addr, VT, useAM3); 1087 1088 // Create the base instruction, then add the operands. 1089 if (allocReg) 1090 ResultReg = createResultReg(RC); 1091 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1092 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1093 TII.get(Opc), ResultReg); 1094 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1095 1096 // If we had an unaligned load of a float we've converted it to an regular 1097 // load. Now we must move from the GRP to the FP register. 1098 if (needVMOV) { 1099 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1100 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1101 TII.get(ARM::VMOVSR), MoveReg) 1102 .addReg(ResultReg)); 1103 ResultReg = MoveReg; 1104 } 1105 return true; 1106} 1107 1108bool ARMFastISel::SelectLoad(const Instruction *I) { 1109 // Atomic loads need special handling. 1110 if (cast<LoadInst>(I)->isAtomic()) 1111 return false; 1112 1113 // Verify we have a legal type before going any further. 1114 MVT VT; 1115 if (!isLoadTypeLegal(I->getType(), VT)) 1116 return false; 1117 1118 // See if we can handle this address. 1119 Address Addr; 1120 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1121 1122 unsigned ResultReg; 1123 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1124 return false; 1125 UpdateValueMap(I, ResultReg); 1126 return true; 1127} 1128 1129bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1130 unsigned Alignment) { 1131 unsigned StrOpc; 1132 bool useAM3 = false; 1133 switch (VT.getSimpleVT().SimpleTy) { 1134 // This is mostly going to be Neon/vector support. 1135 default: return false; 1136 case MVT::i1: { 1137 unsigned Res = createResultReg(isThumb2 ? 1138 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1139 (const TargetRegisterClass*)&ARM::GPRRegClass); 1140 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1141 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1142 TII.get(Opc), Res) 1143 .addReg(SrcReg).addImm(1)); 1144 SrcReg = Res; 1145 } // Fallthrough here. 1146 case MVT::i8: 1147 if (isThumb2) { 1148 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1149 StrOpc = ARM::t2STRBi8; 1150 else 1151 StrOpc = ARM::t2STRBi12; 1152 } else { 1153 StrOpc = ARM::STRBi12; 1154 } 1155 break; 1156 case MVT::i16: 1157 if (Alignment && Alignment < 2 && (!Subtarget->allowsUnalignedMem() || 1158 TM.Options.StrictAlign)) 1159 return false; 1160 1161 if (isThumb2) { 1162 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1163 StrOpc = ARM::t2STRHi8; 1164 else 1165 StrOpc = ARM::t2STRHi12; 1166 } else { 1167 StrOpc = ARM::STRH; 1168 useAM3 = true; 1169 } 1170 break; 1171 case MVT::i32: 1172 if (Alignment && Alignment < 4 && (!Subtarget->allowsUnalignedMem() || 1173 TM.Options.StrictAlign)) 1174 return false; 1175 1176 if (isThumb2) { 1177 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1178 StrOpc = ARM::t2STRi8; 1179 else 1180 StrOpc = ARM::t2STRi12; 1181 } else { 1182 StrOpc = ARM::STRi12; 1183 } 1184 break; 1185 case MVT::f32: 1186 if (!Subtarget->hasVFP2()) return false; 1187 // Unaligned stores need special handling. Floats require word-alignment. 1188 if (Alignment && Alignment < 4) { 1189 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1190 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1191 TII.get(ARM::VMOVRS), MoveReg) 1192 .addReg(SrcReg)); 1193 SrcReg = MoveReg; 1194 VT = MVT::i32; 1195 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1196 } else { 1197 StrOpc = ARM::VSTRS; 1198 } 1199 break; 1200 case MVT::f64: 1201 if (!Subtarget->hasVFP2()) return false; 1202 // FIXME: Unaligned stores need special handling. Doublewords require 1203 // word-alignment. 1204 if (Alignment && Alignment < 4) 1205 return false; 1206 1207 StrOpc = ARM::VSTRD; 1208 break; 1209 } 1210 // Simplify this down to something we can handle. 1211 ARMSimplifyAddress(Addr, VT, useAM3); 1212 1213 // Create the base instruction, then add the operands. 1214 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1215 TII.get(StrOpc)) 1216 .addReg(SrcReg); 1217 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1218 return true; 1219} 1220 1221bool ARMFastISel::SelectStore(const Instruction *I) { 1222 Value *Op0 = I->getOperand(0); 1223 unsigned SrcReg = 0; 1224 1225 // Atomic stores need special handling. 1226 if (cast<StoreInst>(I)->isAtomic()) 1227 return false; 1228 1229 // Verify we have a legal type before going any further. 1230 MVT VT; 1231 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1232 return false; 1233 1234 // Get the value to be stored into a register. 1235 SrcReg = getRegForValue(Op0); 1236 if (SrcReg == 0) return false; 1237 1238 // See if we can handle this address. 1239 Address Addr; 1240 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1241 return false; 1242 1243 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1244 return false; 1245 return true; 1246} 1247 1248static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1249 switch (Pred) { 1250 // Needs two compares... 1251 case CmpInst::FCMP_ONE: 1252 case CmpInst::FCMP_UEQ: 1253 default: 1254 // AL is our "false" for now. The other two need more compares. 1255 return ARMCC::AL; 1256 case CmpInst::ICMP_EQ: 1257 case CmpInst::FCMP_OEQ: 1258 return ARMCC::EQ; 1259 case CmpInst::ICMP_SGT: 1260 case CmpInst::FCMP_OGT: 1261 return ARMCC::GT; 1262 case CmpInst::ICMP_SGE: 1263 case CmpInst::FCMP_OGE: 1264 return ARMCC::GE; 1265 case CmpInst::ICMP_UGT: 1266 case CmpInst::FCMP_UGT: 1267 return ARMCC::HI; 1268 case CmpInst::FCMP_OLT: 1269 return ARMCC::MI; 1270 case CmpInst::ICMP_ULE: 1271 case CmpInst::FCMP_OLE: 1272 return ARMCC::LS; 1273 case CmpInst::FCMP_ORD: 1274 return ARMCC::VC; 1275 case CmpInst::FCMP_UNO: 1276 return ARMCC::VS; 1277 case CmpInst::FCMP_UGE: 1278 return ARMCC::PL; 1279 case CmpInst::ICMP_SLT: 1280 case CmpInst::FCMP_ULT: 1281 return ARMCC::LT; 1282 case CmpInst::ICMP_SLE: 1283 case CmpInst::FCMP_ULE: 1284 return ARMCC::LE; 1285 case CmpInst::FCMP_UNE: 1286 case CmpInst::ICMP_NE: 1287 return ARMCC::NE; 1288 case CmpInst::ICMP_UGE: 1289 return ARMCC::HS; 1290 case CmpInst::ICMP_ULT: 1291 return ARMCC::LO; 1292 } 1293} 1294 1295bool ARMFastISel::SelectBranch(const Instruction *I) { 1296 const BranchInst *BI = cast<BranchInst>(I); 1297 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1298 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1299 1300 // Simple branch support. 1301 1302 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1303 // behavior. 1304 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1305 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1306 1307 // Get the compare predicate. 1308 // Try to take advantage of fallthrough opportunities. 1309 CmpInst::Predicate Predicate = CI->getPredicate(); 1310 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1311 std::swap(TBB, FBB); 1312 Predicate = CmpInst::getInversePredicate(Predicate); 1313 } 1314 1315 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1316 1317 // We may not handle every CC for now. 1318 if (ARMPred == ARMCC::AL) return false; 1319 1320 // Emit the compare. 1321 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1322 return false; 1323 1324 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1325 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1326 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1327 FastEmitBranch(FBB, DL); 1328 FuncInfo.MBB->addSuccessor(TBB); 1329 return true; 1330 } 1331 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1332 MVT SourceVT; 1333 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1334 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1335 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1336 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1337 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1338 TII.get(TstOpc)) 1339 .addReg(OpReg).addImm(1)); 1340 1341 unsigned CCMode = ARMCC::NE; 1342 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1343 std::swap(TBB, FBB); 1344 CCMode = ARMCC::EQ; 1345 } 1346 1347 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1348 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1349 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1350 1351 FastEmitBranch(FBB, DL); 1352 FuncInfo.MBB->addSuccessor(TBB); 1353 return true; 1354 } 1355 } else if (const ConstantInt *CI = 1356 dyn_cast<ConstantInt>(BI->getCondition())) { 1357 uint64_t Imm = CI->getZExtValue(); 1358 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1359 FastEmitBranch(Target, DL); 1360 return true; 1361 } 1362 1363 unsigned CmpReg = getRegForValue(BI->getCondition()); 1364 if (CmpReg == 0) return false; 1365 1366 // We've been divorced from our compare! Our block was split, and 1367 // now our compare lives in a predecessor block. We musn't 1368 // re-compare here, as the children of the compare aren't guaranteed 1369 // live across the block boundary (we *could* check for this). 1370 // Regardless, the compare has been done in the predecessor block, 1371 // and it left a value for us in a virtual register. Ergo, we test 1372 // the one-bit value left in the virtual register. 1373 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1375 .addReg(CmpReg).addImm(1)); 1376 1377 unsigned CCMode = ARMCC::NE; 1378 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1379 std::swap(TBB, FBB); 1380 CCMode = ARMCC::EQ; 1381 } 1382 1383 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1384 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1385 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1386 FastEmitBranch(FBB, DL); 1387 FuncInfo.MBB->addSuccessor(TBB); 1388 return true; 1389} 1390 1391bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1392 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1393 if (AddrReg == 0) return false; 1394 1395 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1397 .addReg(AddrReg)); 1398 1399 const IndirectBrInst *IB = cast<IndirectBrInst>(I); 1400 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i) 1401 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]); 1402 1403 return true; 1404} 1405 1406bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1407 bool isZExt) { 1408 Type *Ty = Src1Value->getType(); 1409 EVT SrcVT = TLI.getValueType(Ty, true); 1410 if (!SrcVT.isSimple()) return false; 1411 1412 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1413 if (isFloat && !Subtarget->hasVFP2()) 1414 return false; 1415 1416 // Check to see if the 2nd operand is a constant that we can encode directly 1417 // in the compare. 1418 int Imm = 0; 1419 bool UseImm = false; 1420 bool isNegativeImm = false; 1421 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1422 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1423 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1424 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1425 SrcVT == MVT::i1) { 1426 const APInt &CIVal = ConstInt->getValue(); 1427 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1428 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1429 // then a cmn, because there is no way to represent 2147483648 as a 1430 // signed 32-bit int. 1431 if (Imm < 0 && Imm != (int)0x80000000) { 1432 isNegativeImm = true; 1433 Imm = -Imm; 1434 } 1435 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1436 (ARM_AM::getSOImmVal(Imm) != -1); 1437 } 1438 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1439 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1440 if (ConstFP->isZero() && !ConstFP->isNegative()) 1441 UseImm = true; 1442 } 1443 1444 unsigned CmpOpc; 1445 bool isICmp = true; 1446 bool needsExt = false; 1447 switch (SrcVT.getSimpleVT().SimpleTy) { 1448 default: return false; 1449 // TODO: Verify compares. 1450 case MVT::f32: 1451 isICmp = false; 1452 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1453 break; 1454 case MVT::f64: 1455 isICmp = false; 1456 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1457 break; 1458 case MVT::i1: 1459 case MVT::i8: 1460 case MVT::i16: 1461 needsExt = true; 1462 // Intentional fall-through. 1463 case MVT::i32: 1464 if (isThumb2) { 1465 if (!UseImm) 1466 CmpOpc = ARM::t2CMPrr; 1467 else 1468 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri; 1469 } else { 1470 if (!UseImm) 1471 CmpOpc = ARM::CMPrr; 1472 else 1473 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri; 1474 } 1475 break; 1476 } 1477 1478 unsigned SrcReg1 = getRegForValue(Src1Value); 1479 if (SrcReg1 == 0) return false; 1480 1481 unsigned SrcReg2 = 0; 1482 if (!UseImm) { 1483 SrcReg2 = getRegForValue(Src2Value); 1484 if (SrcReg2 == 0) return false; 1485 } 1486 1487 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1488 if (needsExt) { 1489 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1490 if (SrcReg1 == 0) return false; 1491 if (!UseImm) { 1492 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1493 if (SrcReg2 == 0) return false; 1494 } 1495 } 1496 1497 if (!UseImm) { 1498 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1499 TII.get(CmpOpc)) 1500 .addReg(SrcReg1).addReg(SrcReg2)); 1501 } else { 1502 MachineInstrBuilder MIB; 1503 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1504 .addReg(SrcReg1); 1505 1506 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1507 if (isICmp) 1508 MIB.addImm(Imm); 1509 AddOptionalDefs(MIB); 1510 } 1511 1512 // For floating point we need to move the result to a comparison register 1513 // that we can then use for branches. 1514 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1515 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1516 TII.get(ARM::FMSTAT))); 1517 return true; 1518} 1519 1520bool ARMFastISel::SelectCmp(const Instruction *I) { 1521 const CmpInst *CI = cast<CmpInst>(I); 1522 1523 // Get the compare predicate. 1524 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1525 1526 // We may not handle every CC for now. 1527 if (ARMPred == ARMCC::AL) return false; 1528 1529 // Emit the compare. 1530 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1531 return false; 1532 1533 // Now set a register based on the comparison. Explicitly set the predicates 1534 // here. 1535 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1536 const TargetRegisterClass *RC = isThumb2 ? 1537 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1538 (const TargetRegisterClass*)&ARM::GPRRegClass; 1539 unsigned DestReg = createResultReg(RC); 1540 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1541 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1542 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1543 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1544 .addReg(ZeroReg).addImm(1) 1545 .addImm(ARMPred).addReg(ARM::CPSR); 1546 1547 UpdateValueMap(I, DestReg); 1548 return true; 1549} 1550 1551bool ARMFastISel::SelectFPExt(const Instruction *I) { 1552 // Make sure we have VFP and that we're extending float to double. 1553 if (!Subtarget->hasVFP2()) return false; 1554 1555 Value *V = I->getOperand(0); 1556 if (!I->getType()->isDoubleTy() || 1557 !V->getType()->isFloatTy()) return false; 1558 1559 unsigned Op = getRegForValue(V); 1560 if (Op == 0) return false; 1561 1562 unsigned Result = createResultReg(&ARM::DPRRegClass); 1563 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1564 TII.get(ARM::VCVTDS), Result) 1565 .addReg(Op)); 1566 UpdateValueMap(I, Result); 1567 return true; 1568} 1569 1570bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1571 // Make sure we have VFP and that we're truncating double to float. 1572 if (!Subtarget->hasVFP2()) return false; 1573 1574 Value *V = I->getOperand(0); 1575 if (!(I->getType()->isFloatTy() && 1576 V->getType()->isDoubleTy())) return false; 1577 1578 unsigned Op = getRegForValue(V); 1579 if (Op == 0) return false; 1580 1581 unsigned Result = createResultReg(&ARM::SPRRegClass); 1582 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1583 TII.get(ARM::VCVTSD), Result) 1584 .addReg(Op)); 1585 UpdateValueMap(I, Result); 1586 return true; 1587} 1588 1589bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1590 // Make sure we have VFP. 1591 if (!Subtarget->hasVFP2()) return false; 1592 1593 MVT DstVT; 1594 Type *Ty = I->getType(); 1595 if (!isTypeLegal(Ty, DstVT)) 1596 return false; 1597 1598 Value *Src = I->getOperand(0); 1599 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1600 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1601 return false; 1602 1603 unsigned SrcReg = getRegForValue(Src); 1604 if (SrcReg == 0) return false; 1605 1606 // Handle sign-extension. 1607 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1608 EVT DestVT = MVT::i32; 1609 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, 1610 /*isZExt*/!isSigned); 1611 if (SrcReg == 0) return false; 1612 } 1613 1614 // The conversion routine works on fp-reg to fp-reg and the operand above 1615 // was an integer, move it to the fp registers if possible. 1616 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1617 if (FP == 0) return false; 1618 1619 unsigned Opc; 1620 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1621 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1622 else return false; 1623 1624 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1625 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1626 ResultReg) 1627 .addReg(FP)); 1628 UpdateValueMap(I, ResultReg); 1629 return true; 1630} 1631 1632bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1633 // Make sure we have VFP. 1634 if (!Subtarget->hasVFP2()) return false; 1635 1636 MVT DstVT; 1637 Type *RetTy = I->getType(); 1638 if (!isTypeLegal(RetTy, DstVT)) 1639 return false; 1640 1641 unsigned Op = getRegForValue(I->getOperand(0)); 1642 if (Op == 0) return false; 1643 1644 unsigned Opc; 1645 Type *OpTy = I->getOperand(0)->getType(); 1646 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1647 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1648 else return false; 1649 1650 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1651 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1652 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1653 ResultReg) 1654 .addReg(Op)); 1655 1656 // This result needs to be in an integer register, but the conversion only 1657 // takes place in fp-regs. 1658 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1659 if (IntReg == 0) return false; 1660 1661 UpdateValueMap(I, IntReg); 1662 return true; 1663} 1664 1665bool ARMFastISel::SelectSelect(const Instruction *I) { 1666 MVT VT; 1667 if (!isTypeLegal(I->getType(), VT)) 1668 return false; 1669 1670 // Things need to be register sized for register moves. 1671 if (VT != MVT::i32) return false; 1672 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1673 1674 unsigned CondReg = getRegForValue(I->getOperand(0)); 1675 if (CondReg == 0) return false; 1676 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1677 if (Op1Reg == 0) return false; 1678 1679 // Check to see if we can use an immediate in the conditional move. 1680 int Imm = 0; 1681 bool UseImm = false; 1682 bool isNegativeImm = false; 1683 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1684 assert (VT == MVT::i32 && "Expecting an i32."); 1685 Imm = (int)ConstInt->getValue().getZExtValue(); 1686 if (Imm < 0) { 1687 isNegativeImm = true; 1688 Imm = ~Imm; 1689 } 1690 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1691 (ARM_AM::getSOImmVal(Imm) != -1); 1692 } 1693 1694 unsigned Op2Reg = 0; 1695 if (!UseImm) { 1696 Op2Reg = getRegForValue(I->getOperand(2)); 1697 if (Op2Reg == 0) return false; 1698 } 1699 1700 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1701 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1702 .addReg(CondReg).addImm(0)); 1703 1704 unsigned MovCCOpc; 1705 if (!UseImm) { 1706 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1707 } else { 1708 if (!isNegativeImm) { 1709 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1710 } else { 1711 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1712 } 1713 } 1714 unsigned ResultReg = createResultReg(RC); 1715 if (!UseImm) 1716 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1717 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1718 else 1719 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1720 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1721 UpdateValueMap(I, ResultReg); 1722 return true; 1723} 1724 1725bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1726 MVT VT; 1727 Type *Ty = I->getType(); 1728 if (!isTypeLegal(Ty, VT)) 1729 return false; 1730 1731 // If we have integer div support we should have selected this automagically. 1732 // In case we have a real miss go ahead and return false and we'll pick 1733 // it up later. 1734 if (Subtarget->hasDivide()) return false; 1735 1736 // Otherwise emit a libcall. 1737 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1738 if (VT == MVT::i8) 1739 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1740 else if (VT == MVT::i16) 1741 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1742 else if (VT == MVT::i32) 1743 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1744 else if (VT == MVT::i64) 1745 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1746 else if (VT == MVT::i128) 1747 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1748 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1749 1750 return ARMEmitLibcall(I, LC); 1751} 1752 1753bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1754 MVT VT; 1755 Type *Ty = I->getType(); 1756 if (!isTypeLegal(Ty, VT)) 1757 return false; 1758 1759 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1760 if (VT == MVT::i8) 1761 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1762 else if (VT == MVT::i16) 1763 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1764 else if (VT == MVT::i32) 1765 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1766 else if (VT == MVT::i64) 1767 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1768 else if (VT == MVT::i128) 1769 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1770 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1771 1772 return ARMEmitLibcall(I, LC); 1773} 1774 1775bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1776 EVT DestVT = TLI.getValueType(I->getType(), true); 1777 1778 // We can get here in the case when we have a binary operation on a non-legal 1779 // type and the target independent selector doesn't know how to handle it. 1780 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1781 return false; 1782 1783 unsigned Opc; 1784 switch (ISDOpcode) { 1785 default: return false; 1786 case ISD::ADD: 1787 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1788 break; 1789 case ISD::OR: 1790 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1791 break; 1792 case ISD::SUB: 1793 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1794 break; 1795 } 1796 1797 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1798 if (SrcReg1 == 0) return false; 1799 1800 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1801 // in the instruction, rather then materializing the value in a register. 1802 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1803 if (SrcReg2 == 0) return false; 1804 1805 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1806 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1807 TII.get(Opc), ResultReg) 1808 .addReg(SrcReg1).addReg(SrcReg2)); 1809 UpdateValueMap(I, ResultReg); 1810 return true; 1811} 1812 1813bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1814 EVT VT = TLI.getValueType(I->getType(), true); 1815 1816 // We can get here in the case when we want to use NEON for our fp 1817 // operations, but can't figure out how to. Just use the vfp instructions 1818 // if we have them. 1819 // FIXME: It'd be nice to use NEON instructions. 1820 Type *Ty = I->getType(); 1821 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1822 if (isFloat && !Subtarget->hasVFP2()) 1823 return false; 1824 1825 unsigned Opc; 1826 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1827 switch (ISDOpcode) { 1828 default: return false; 1829 case ISD::FADD: 1830 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1831 break; 1832 case ISD::FSUB: 1833 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1834 break; 1835 case ISD::FMUL: 1836 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1837 break; 1838 } 1839 unsigned Op1 = getRegForValue(I->getOperand(0)); 1840 if (Op1 == 0) return false; 1841 1842 unsigned Op2 = getRegForValue(I->getOperand(1)); 1843 if (Op2 == 0) return false; 1844 1845 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1846 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1847 TII.get(Opc), ResultReg) 1848 .addReg(Op1).addReg(Op2)); 1849 UpdateValueMap(I, ResultReg); 1850 return true; 1851} 1852 1853// Call Handling Code 1854 1855// This is largely taken directly from CCAssignFnForNode 1856// TODO: We may not support all of this. 1857CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, 1858 bool Return, 1859 bool isVarArg) { 1860 switch (CC) { 1861 default: 1862 llvm_unreachable("Unsupported calling convention"); 1863 case CallingConv::Fast: 1864 if (Subtarget->hasVFP2() && !isVarArg) { 1865 if (!Subtarget->isAAPCS_ABI()) 1866 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1867 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1868 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1869 } 1870 // Fallthrough 1871 case CallingConv::C: 1872 // Use target triple & subtarget features to do actual dispatch. 1873 if (Subtarget->isAAPCS_ABI()) { 1874 if (Subtarget->hasVFP2() && 1875 TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) 1876 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1877 else 1878 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1879 } else 1880 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1881 case CallingConv::ARM_AAPCS_VFP: 1882 if (!isVarArg) 1883 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1884 // Fall through to soft float variant, variadic functions don't 1885 // use hard floating point ABI. 1886 case CallingConv::ARM_AAPCS: 1887 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1888 case CallingConv::ARM_APCS: 1889 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1890 case CallingConv::GHC: 1891 if (Return) 1892 llvm_unreachable("Can't return in GHC call convention"); 1893 else 1894 return CC_ARM_APCS_GHC; 1895 } 1896} 1897 1898bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1899 SmallVectorImpl<unsigned> &ArgRegs, 1900 SmallVectorImpl<MVT> &ArgVTs, 1901 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1902 SmallVectorImpl<unsigned> &RegArgs, 1903 CallingConv::ID CC, 1904 unsigned &NumBytes, 1905 bool isVarArg) { 1906 SmallVector<CCValAssign, 16> ArgLocs; 1907 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); 1908 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, 1909 CCAssignFnForCall(CC, false, isVarArg)); 1910 1911 // Check that we can handle all of the arguments. If we can't, then bail out 1912 // now before we add code to the MBB. 1913 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1914 CCValAssign &VA = ArgLocs[i]; 1915 MVT ArgVT = ArgVTs[VA.getValNo()]; 1916 1917 // We don't handle NEON/vector parameters yet. 1918 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1919 return false; 1920 1921 // Now copy/store arg to correct locations. 1922 if (VA.isRegLoc() && !VA.needsCustom()) { 1923 continue; 1924 } else if (VA.needsCustom()) { 1925 // TODO: We need custom lowering for vector (v2f64) args. 1926 if (VA.getLocVT() != MVT::f64 || 1927 // TODO: Only handle register args for now. 1928 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1929 return false; 1930 } else { 1931 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1932 default: 1933 return false; 1934 case MVT::i1: 1935 case MVT::i8: 1936 case MVT::i16: 1937 case MVT::i32: 1938 break; 1939 case MVT::f32: 1940 if (!Subtarget->hasVFP2()) 1941 return false; 1942 break; 1943 case MVT::f64: 1944 if (!Subtarget->hasVFP2()) 1945 return false; 1946 break; 1947 } 1948 } 1949 } 1950 1951 // At the point, we are able to handle the call's arguments in fast isel. 1952 1953 // Get a count of how many bytes are to be pushed on the stack. 1954 NumBytes = CCInfo.getNextStackOffset(); 1955 1956 // Issue CALLSEQ_START 1957 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1958 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1959 TII.get(AdjStackDown)) 1960 .addImm(NumBytes)); 1961 1962 // Process the args. 1963 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1964 CCValAssign &VA = ArgLocs[i]; 1965 unsigned Arg = ArgRegs[VA.getValNo()]; 1966 MVT ArgVT = ArgVTs[VA.getValNo()]; 1967 1968 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1969 "We don't handle NEON/vector parameters yet."); 1970 1971 // Handle arg promotion, etc. 1972 switch (VA.getLocInfo()) { 1973 case CCValAssign::Full: break; 1974 case CCValAssign::SExt: { 1975 MVT DestVT = VA.getLocVT(); 1976 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1977 assert (Arg != 0 && "Failed to emit a sext"); 1978 ArgVT = DestVT; 1979 break; 1980 } 1981 case CCValAssign::AExt: 1982 // Intentional fall-through. Handle AExt and ZExt. 1983 case CCValAssign::ZExt: { 1984 MVT DestVT = VA.getLocVT(); 1985 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1986 assert (Arg != 0 && "Failed to emit a sext"); 1987 ArgVT = DestVT; 1988 break; 1989 } 1990 case CCValAssign::BCvt: { 1991 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1992 /*TODO: Kill=*/false); 1993 assert(BC != 0 && "Failed to emit a bitcast!"); 1994 Arg = BC; 1995 ArgVT = VA.getLocVT(); 1996 break; 1997 } 1998 default: llvm_unreachable("Unknown arg promotion!"); 1999 } 2000 2001 // Now copy/store arg to correct locations. 2002 if (VA.isRegLoc() && !VA.needsCustom()) { 2003 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2004 VA.getLocReg()) 2005 .addReg(Arg); 2006 RegArgs.push_back(VA.getLocReg()); 2007 } else if (VA.needsCustom()) { 2008 // TODO: We need custom lowering for vector (v2f64) args. 2009 assert(VA.getLocVT() == MVT::f64 && 2010 "Custom lowering for v2f64 args not available"); 2011 2012 CCValAssign &NextVA = ArgLocs[++i]; 2013 2014 assert(VA.isRegLoc() && NextVA.isRegLoc() && 2015 "We only handle register args!"); 2016 2017 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2018 TII.get(ARM::VMOVRRD), VA.getLocReg()) 2019 .addReg(NextVA.getLocReg(), RegState::Define) 2020 .addReg(Arg)); 2021 RegArgs.push_back(VA.getLocReg()); 2022 RegArgs.push_back(NextVA.getLocReg()); 2023 } else { 2024 assert(VA.isMemLoc()); 2025 // Need to store on the stack. 2026 Address Addr; 2027 Addr.BaseType = Address::RegBase; 2028 Addr.Base.Reg = ARM::SP; 2029 Addr.Offset = VA.getLocMemOffset(); 2030 2031 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 2032 assert(EmitRet && "Could not emit a store for argument!"); 2033 } 2034 } 2035 2036 return true; 2037} 2038 2039bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 2040 const Instruction *I, CallingConv::ID CC, 2041 unsigned &NumBytes, bool isVarArg) { 2042 // Issue CALLSEQ_END 2043 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 2044 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2045 TII.get(AdjStackUp)) 2046 .addImm(NumBytes).addImm(0)); 2047 2048 // Now the return value. 2049 if (RetVT != MVT::isVoid) { 2050 SmallVector<CCValAssign, 16> RVLocs; 2051 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2052 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2053 2054 // Copy all of the result registers out of their specified physreg. 2055 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 2056 // For this move we copy into two registers and then move into the 2057 // double fp reg we want. 2058 EVT DestVT = RVLocs[0].getValVT(); 2059 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2060 unsigned ResultReg = createResultReg(DstRC); 2061 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2062 TII.get(ARM::VMOVDRR), ResultReg) 2063 .addReg(RVLocs[0].getLocReg()) 2064 .addReg(RVLocs[1].getLocReg())); 2065 2066 UsedRegs.push_back(RVLocs[0].getLocReg()); 2067 UsedRegs.push_back(RVLocs[1].getLocReg()); 2068 2069 // Finally update the result. 2070 UpdateValueMap(I, ResultReg); 2071 } else { 2072 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2073 EVT CopyVT = RVLocs[0].getValVT(); 2074 2075 // Special handling for extended integers. 2076 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2077 CopyVT = MVT::i32; 2078 2079 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2080 2081 unsigned ResultReg = createResultReg(DstRC); 2082 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2083 ResultReg).addReg(RVLocs[0].getLocReg()); 2084 UsedRegs.push_back(RVLocs[0].getLocReg()); 2085 2086 // Finally update the result. 2087 UpdateValueMap(I, ResultReg); 2088 } 2089 } 2090 2091 return true; 2092} 2093 2094bool ARMFastISel::SelectRet(const Instruction *I) { 2095 const ReturnInst *Ret = cast<ReturnInst>(I); 2096 const Function &F = *I->getParent()->getParent(); 2097 2098 if (!FuncInfo.CanLowerReturn) 2099 return false; 2100 2101 CallingConv::ID CC = F.getCallingConv(); 2102 if (Ret->getNumOperands() > 0) { 2103 SmallVector<ISD::OutputArg, 4> Outs; 2104 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 2105 Outs, TLI); 2106 2107 // Analyze operands of the call, assigning locations to each operand. 2108 SmallVector<CCValAssign, 16> ValLocs; 2109 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2110 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, 2111 F.isVarArg())); 2112 2113 const Value *RV = Ret->getOperand(0); 2114 unsigned Reg = getRegForValue(RV); 2115 if (Reg == 0) 2116 return false; 2117 2118 // Only handle a single return value for now. 2119 if (ValLocs.size() != 1) 2120 return false; 2121 2122 CCValAssign &VA = ValLocs[0]; 2123 2124 // Don't bother handling odd stuff for now. 2125 if (VA.getLocInfo() != CCValAssign::Full) 2126 return false; 2127 // Only handle register returns for now. 2128 if (!VA.isRegLoc()) 2129 return false; 2130 2131 unsigned SrcReg = Reg + VA.getValNo(); 2132 EVT RVVT = TLI.getValueType(RV->getType()); 2133 EVT DestVT = VA.getValVT(); 2134 // Special handling for extended integers. 2135 if (RVVT != DestVT) { 2136 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2137 return false; 2138 2139 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2140 2141 // Perform extension if flagged as either zext or sext. Otherwise, do 2142 // nothing. 2143 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2144 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2145 if (SrcReg == 0) return false; 2146 } 2147 } 2148 2149 // Make the copy. 2150 unsigned DstReg = VA.getLocReg(); 2151 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2152 // Avoid a cross-class copy. This is very unlikely. 2153 if (!SrcRC->contains(DstReg)) 2154 return false; 2155 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2156 DstReg).addReg(SrcReg); 2157 2158 // Mark the register as live out of the function. 2159 MRI.addLiveOut(VA.getLocReg()); 2160 } 2161 2162 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2163 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2164 TII.get(RetOpc))); 2165 return true; 2166} 2167 2168unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) { 2169 if (UseReg) 2170 return isThumb2 ? ARM::tBLXr : ARM::BLX; 2171 else 2172 return isThumb2 ? ARM::tBL : ARM::BL; 2173} 2174 2175unsigned ARMFastISel::getLibcallReg(const Twine &Name) { 2176 GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, 2177 GlobalValue::ExternalLinkage, 0, Name); 2178 return ARMMaterializeGV(GV, TLI.getValueType(GV->getType())); 2179} 2180 2181// A quick function that will emit a call for a named libcall in F with the 2182// vector of passed arguments for the Instruction in I. We can assume that we 2183// can emit a call for any libcall we can produce. This is an abridged version 2184// of the full call infrastructure since we won't need to worry about things 2185// like computed function pointers or strange arguments at call sites. 2186// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2187// with X86. 2188bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2189 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2190 2191 // Handle *simple* calls for now. 2192 Type *RetTy = I->getType(); 2193 MVT RetVT; 2194 if (RetTy->isVoidTy()) 2195 RetVT = MVT::isVoid; 2196 else if (!isTypeLegal(RetTy, RetVT)) 2197 return false; 2198 2199 // Can't handle non-double multi-reg retvals. 2200 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2201 SmallVector<CCValAssign, 16> RVLocs; 2202 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2203 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); 2204 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2205 return false; 2206 } 2207 2208 // Set up the argument vectors. 2209 SmallVector<Value*, 8> Args; 2210 SmallVector<unsigned, 8> ArgRegs; 2211 SmallVector<MVT, 8> ArgVTs; 2212 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2213 Args.reserve(I->getNumOperands()); 2214 ArgRegs.reserve(I->getNumOperands()); 2215 ArgVTs.reserve(I->getNumOperands()); 2216 ArgFlags.reserve(I->getNumOperands()); 2217 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2218 Value *Op = I->getOperand(i); 2219 unsigned Arg = getRegForValue(Op); 2220 if (Arg == 0) return false; 2221 2222 Type *ArgTy = Op->getType(); 2223 MVT ArgVT; 2224 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2225 2226 ISD::ArgFlagsTy Flags; 2227 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2228 Flags.setOrigAlign(OriginalAlignment); 2229 2230 Args.push_back(Op); 2231 ArgRegs.push_back(Arg); 2232 ArgVTs.push_back(ArgVT); 2233 ArgFlags.push_back(Flags); 2234 } 2235 2236 // Handle the arguments now that we've gotten them. 2237 SmallVector<unsigned, 4> RegArgs; 2238 unsigned NumBytes; 2239 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2240 RegArgs, CC, NumBytes, false)) 2241 return false; 2242 2243 unsigned CalleeReg = 0; 2244 if (EnableARMLongCalls) { 2245 CalleeReg = getLibcallReg(TLI.getLibcallName(Call)); 2246 if (CalleeReg == 0) return false; 2247 } 2248 2249 // Issue the call. 2250 unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); 2251 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2252 DL, TII.get(CallOpc)); 2253 // BL / BLX don't take a predicate, but tBL / tBLX do. 2254 if (isThumb2) 2255 AddDefaultPred(MIB); 2256 if (EnableARMLongCalls) 2257 MIB.addReg(CalleeReg); 2258 else 2259 MIB.addExternalSymbol(TLI.getLibcallName(Call)); 2260 2261 // Add implicit physical register uses to the call. 2262 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2263 MIB.addReg(RegArgs[i], RegState::Implicit); 2264 2265 // Add a register mask with the call-preserved registers. 2266 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2267 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2268 2269 // Finish off the call including any return values. 2270 SmallVector<unsigned, 4> UsedRegs; 2271 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false; 2272 2273 // Set all unused physreg defs as dead. 2274 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2275 2276 return true; 2277} 2278 2279bool ARMFastISel::SelectCall(const Instruction *I, 2280 const char *IntrMemName = 0) { 2281 const CallInst *CI = cast<CallInst>(I); 2282 const Value *Callee = CI->getCalledValue(); 2283 2284 // Can't handle inline asm. 2285 if (isa<InlineAsm>(Callee)) return false; 2286 2287 // Check the calling convention. 2288 ImmutableCallSite CS(CI); 2289 CallingConv::ID CC = CS.getCallingConv(); 2290 2291 // TODO: Avoid some calling conventions? 2292 2293 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2294 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2295 bool isVarArg = FTy->isVarArg(); 2296 2297 // Handle *simple* calls for now. 2298 Type *RetTy = I->getType(); 2299 MVT RetVT; 2300 if (RetTy->isVoidTy()) 2301 RetVT = MVT::isVoid; 2302 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2303 RetVT != MVT::i8 && RetVT != MVT::i1) 2304 return false; 2305 2306 // Can't handle non-double multi-reg retvals. 2307 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2308 RetVT != MVT::i16 && RetVT != MVT::i32) { 2309 SmallVector<CCValAssign, 16> RVLocs; 2310 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); 2311 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); 2312 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2313 return false; 2314 } 2315 2316 // Set up the argument vectors. 2317 SmallVector<Value*, 8> Args; 2318 SmallVector<unsigned, 8> ArgRegs; 2319 SmallVector<MVT, 8> ArgVTs; 2320 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2321 unsigned arg_size = CS.arg_size(); 2322 Args.reserve(arg_size); 2323 ArgRegs.reserve(arg_size); 2324 ArgVTs.reserve(arg_size); 2325 ArgFlags.reserve(arg_size); 2326 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2327 i != e; ++i) { 2328 // If we're lowering a memory intrinsic instead of a regular call, skip the 2329 // last two arguments, which shouldn't be passed to the underlying function. 2330 if (IntrMemName && e-i <= 2) 2331 break; 2332 2333 ISD::ArgFlagsTy Flags; 2334 unsigned AttrInd = i - CS.arg_begin() + 1; 2335 if (CS.paramHasAttr(AttrInd, Attributes::SExt)) 2336 Flags.setSExt(); 2337 if (CS.paramHasAttr(AttrInd, Attributes::ZExt)) 2338 Flags.setZExt(); 2339 2340 // FIXME: Only handle *easy* calls for now. 2341 if (CS.paramHasAttr(AttrInd, Attributes::InReg) || 2342 CS.paramHasAttr(AttrInd, Attributes::StructRet) || 2343 CS.paramHasAttr(AttrInd, Attributes::Nest) || 2344 CS.paramHasAttr(AttrInd, Attributes::ByVal)) 2345 return false; 2346 2347 Type *ArgTy = (*i)->getType(); 2348 MVT ArgVT; 2349 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2350 ArgVT != MVT::i1) 2351 return false; 2352 2353 unsigned Arg = getRegForValue(*i); 2354 if (Arg == 0) 2355 return false; 2356 2357 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2358 Flags.setOrigAlign(OriginalAlignment); 2359 2360 Args.push_back(*i); 2361 ArgRegs.push_back(Arg); 2362 ArgVTs.push_back(ArgVT); 2363 ArgFlags.push_back(Flags); 2364 } 2365 2366 // Handle the arguments now that we've gotten them. 2367 SmallVector<unsigned, 4> RegArgs; 2368 unsigned NumBytes; 2369 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, 2370 RegArgs, CC, NumBytes, isVarArg)) 2371 return false; 2372 2373 bool UseReg = false; 2374 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2375 if (!GV || EnableARMLongCalls) UseReg = true; 2376 2377 unsigned CalleeReg = 0; 2378 if (UseReg) { 2379 if (IntrMemName) 2380 CalleeReg = getLibcallReg(IntrMemName); 2381 else 2382 CalleeReg = getRegForValue(Callee); 2383 2384 if (CalleeReg == 0) return false; 2385 } 2386 2387 // Issue the call. 2388 unsigned CallOpc = ARMSelectCallOp(UseReg); 2389 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2390 DL, TII.get(CallOpc)); 2391 2392 // ARM calls don't take a predicate, but tBL / tBLX do. 2393 if(isThumb2) 2394 AddDefaultPred(MIB); 2395 if (UseReg) 2396 MIB.addReg(CalleeReg); 2397 else if (!IntrMemName) 2398 MIB.addGlobalAddress(GV, 0, 0); 2399 else 2400 MIB.addExternalSymbol(IntrMemName, 0); 2401 2402 // Add implicit physical register uses to the call. 2403 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2404 MIB.addReg(RegArgs[i], RegState::Implicit); 2405 2406 // Add a register mask with the call-preserved registers. 2407 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2408 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2409 2410 // Finish off the call including any return values. 2411 SmallVector<unsigned, 4> UsedRegs; 2412 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg)) 2413 return false; 2414 2415 // Set all unused physreg defs as dead. 2416 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2417 2418 return true; 2419} 2420 2421bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2422 return Len <= 16; 2423} 2424 2425bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2426 uint64_t Len) { 2427 // Make sure we don't bloat code by inlining very large memcpy's. 2428 if (!ARMIsMemCpySmall(Len)) 2429 return false; 2430 2431 // We don't care about alignment here since we just emit integer accesses. 2432 while (Len) { 2433 MVT VT; 2434 if (Len >= 4) 2435 VT = MVT::i32; 2436 else if (Len >= 2) 2437 VT = MVT::i16; 2438 else { 2439 assert(Len == 1); 2440 VT = MVT::i8; 2441 } 2442 2443 bool RV; 2444 unsigned ResultReg; 2445 RV = ARMEmitLoad(VT, ResultReg, Src); 2446 assert (RV == true && "Should be able to handle this load."); 2447 RV = ARMEmitStore(VT, ResultReg, Dest); 2448 assert (RV == true && "Should be able to handle this store."); 2449 (void)RV; 2450 2451 unsigned Size = VT.getSizeInBits()/8; 2452 Len -= Size; 2453 Dest.Offset += Size; 2454 Src.Offset += Size; 2455 } 2456 2457 return true; 2458} 2459 2460bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2461 // FIXME: Handle more intrinsics. 2462 switch (I.getIntrinsicID()) { 2463 default: return false; 2464 case Intrinsic::frameaddress: { 2465 MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); 2466 MFI->setFrameAddressIsTaken(true); 2467 2468 unsigned LdrOpc; 2469 const TargetRegisterClass *RC; 2470 if (isThumb2) { 2471 LdrOpc = ARM::t2LDRi12; 2472 RC = (const TargetRegisterClass*)&ARM::tGPRRegClass; 2473 } else { 2474 LdrOpc = ARM::LDRi12; 2475 RC = (const TargetRegisterClass*)&ARM::GPRRegClass; 2476 } 2477 2478 const ARMBaseRegisterInfo *RegInfo = 2479 static_cast<const ARMBaseRegisterInfo*>(TM.getRegisterInfo()); 2480 unsigned FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF)); 2481 unsigned SrcReg = FramePtr; 2482 2483 // Recursively load frame address 2484 // ldr r0 [fp] 2485 // ldr r0 [r0] 2486 // ldr r0 [r0] 2487 // ... 2488 unsigned DestReg; 2489 unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue(); 2490 while (Depth--) { 2491 DestReg = createResultReg(RC); 2492 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2493 TII.get(LdrOpc), DestReg) 2494 .addReg(SrcReg).addImm(0)); 2495 SrcReg = DestReg; 2496 } 2497 UpdateValueMap(&I, SrcReg); 2498 return true; 2499 } 2500 case Intrinsic::memcpy: 2501 case Intrinsic::memmove: { 2502 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2503 // Don't handle volatile. 2504 if (MTI.isVolatile()) 2505 return false; 2506 2507 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2508 // we would emit dead code because we don't currently handle memmoves. 2509 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2510 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2511 // Small memcpy's are common enough that we want to do them without a call 2512 // if possible. 2513 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2514 if (ARMIsMemCpySmall(Len)) { 2515 Address Dest, Src; 2516 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2517 !ARMComputeAddress(MTI.getRawSource(), Src)) 2518 return false; 2519 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2520 return true; 2521 } 2522 } 2523 2524 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2525 return false; 2526 2527 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2528 return false; 2529 2530 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2531 return SelectCall(&I, IntrMemName); 2532 } 2533 case Intrinsic::memset: { 2534 const MemSetInst &MSI = cast<MemSetInst>(I); 2535 // Don't handle volatile. 2536 if (MSI.isVolatile()) 2537 return false; 2538 2539 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2540 return false; 2541 2542 if (MSI.getDestAddressSpace() > 255) 2543 return false; 2544 2545 return SelectCall(&I, "memset"); 2546 } 2547 case Intrinsic::trap: { 2548 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::TRAP)); 2549 return true; 2550 } 2551 } 2552} 2553 2554bool ARMFastISel::SelectTrunc(const Instruction *I) { 2555 // The high bits for a type smaller than the register size are assumed to be 2556 // undefined. 2557 Value *Op = I->getOperand(0); 2558 2559 EVT SrcVT, DestVT; 2560 SrcVT = TLI.getValueType(Op->getType(), true); 2561 DestVT = TLI.getValueType(I->getType(), true); 2562 2563 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2564 return false; 2565 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2566 return false; 2567 2568 unsigned SrcReg = getRegForValue(Op); 2569 if (!SrcReg) return false; 2570 2571 // Because the high bits are undefined, a truncate doesn't generate 2572 // any code. 2573 UpdateValueMap(I, SrcReg); 2574 return true; 2575} 2576 2577unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2578 bool isZExt) { 2579 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2580 return 0; 2581 2582 unsigned Opc; 2583 bool isBoolZext = false; 2584 if (!SrcVT.isSimple()) return 0; 2585 switch (SrcVT.getSimpleVT().SimpleTy) { 2586 default: return 0; 2587 case MVT::i16: 2588 if (!Subtarget->hasV6Ops()) return 0; 2589 if (isZExt) 2590 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2591 else 2592 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2593 break; 2594 case MVT::i8: 2595 if (!Subtarget->hasV6Ops()) return 0; 2596 if (isZExt) 2597 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2598 else 2599 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2600 break; 2601 case MVT::i1: 2602 if (isZExt) { 2603 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2604 isBoolZext = true; 2605 break; 2606 } 2607 return 0; 2608 } 2609 2610 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2611 MachineInstrBuilder MIB; 2612 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2613 .addReg(SrcReg); 2614 if (isBoolZext) 2615 MIB.addImm(1); 2616 else 2617 MIB.addImm(0); 2618 AddOptionalDefs(MIB); 2619 return ResultReg; 2620} 2621 2622bool ARMFastISel::SelectIntExt(const Instruction *I) { 2623 // On ARM, in general, integer casts don't involve legal types; this code 2624 // handles promotable integers. 2625 Type *DestTy = I->getType(); 2626 Value *Src = I->getOperand(0); 2627 Type *SrcTy = Src->getType(); 2628 2629 EVT SrcVT, DestVT; 2630 SrcVT = TLI.getValueType(SrcTy, true); 2631 DestVT = TLI.getValueType(DestTy, true); 2632 2633 bool isZExt = isa<ZExtInst>(I); 2634 unsigned SrcReg = getRegForValue(Src); 2635 if (!SrcReg) return false; 2636 2637 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2638 if (ResultReg == 0) return false; 2639 UpdateValueMap(I, ResultReg); 2640 return true; 2641} 2642 2643bool ARMFastISel::SelectShift(const Instruction *I, 2644 ARM_AM::ShiftOpc ShiftTy) { 2645 // We handle thumb2 mode by target independent selector 2646 // or SelectionDAG ISel. 2647 if (isThumb2) 2648 return false; 2649 2650 // Only handle i32 now. 2651 EVT DestVT = TLI.getValueType(I->getType(), true); 2652 if (DestVT != MVT::i32) 2653 return false; 2654 2655 unsigned Opc = ARM::MOVsr; 2656 unsigned ShiftImm; 2657 Value *Src2Value = I->getOperand(1); 2658 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) { 2659 ShiftImm = CI->getZExtValue(); 2660 2661 // Fall back to selection DAG isel if the shift amount 2662 // is zero or greater than the width of the value type. 2663 if (ShiftImm == 0 || ShiftImm >=32) 2664 return false; 2665 2666 Opc = ARM::MOVsi; 2667 } 2668 2669 Value *Src1Value = I->getOperand(0); 2670 unsigned Reg1 = getRegForValue(Src1Value); 2671 if (Reg1 == 0) return false; 2672 2673 unsigned Reg2 = 0; 2674 if (Opc == ARM::MOVsr) { 2675 Reg2 = getRegForValue(Src2Value); 2676 if (Reg2 == 0) return false; 2677 } 2678 2679 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2680 if(ResultReg == 0) return false; 2681 2682 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2683 TII.get(Opc), ResultReg) 2684 .addReg(Reg1); 2685 2686 if (Opc == ARM::MOVsi) 2687 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm)); 2688 else if (Opc == ARM::MOVsr) { 2689 MIB.addReg(Reg2); 2690 MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0)); 2691 } 2692 2693 AddOptionalDefs(MIB); 2694 UpdateValueMap(I, ResultReg); 2695 return true; 2696} 2697 2698// TODO: SoftFP support. 2699bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2700 2701 switch (I->getOpcode()) { 2702 case Instruction::Load: 2703 return SelectLoad(I); 2704 case Instruction::Store: 2705 return SelectStore(I); 2706 case Instruction::Br: 2707 return SelectBranch(I); 2708 case Instruction::IndirectBr: 2709 return SelectIndirectBr(I); 2710 case Instruction::ICmp: 2711 case Instruction::FCmp: 2712 return SelectCmp(I); 2713 case Instruction::FPExt: 2714 return SelectFPExt(I); 2715 case Instruction::FPTrunc: 2716 return SelectFPTrunc(I); 2717 case Instruction::SIToFP: 2718 return SelectIToFP(I, /*isSigned*/ true); 2719 case Instruction::UIToFP: 2720 return SelectIToFP(I, /*isSigned*/ false); 2721 case Instruction::FPToSI: 2722 return SelectFPToI(I, /*isSigned*/ true); 2723 case Instruction::FPToUI: 2724 return SelectFPToI(I, /*isSigned*/ false); 2725 case Instruction::Add: 2726 return SelectBinaryIntOp(I, ISD::ADD); 2727 case Instruction::Or: 2728 return SelectBinaryIntOp(I, ISD::OR); 2729 case Instruction::Sub: 2730 return SelectBinaryIntOp(I, ISD::SUB); 2731 case Instruction::FAdd: 2732 return SelectBinaryFPOp(I, ISD::FADD); 2733 case Instruction::FSub: 2734 return SelectBinaryFPOp(I, ISD::FSUB); 2735 case Instruction::FMul: 2736 return SelectBinaryFPOp(I, ISD::FMUL); 2737 case Instruction::SDiv: 2738 return SelectDiv(I, /*isSigned*/ true); 2739 case Instruction::UDiv: 2740 return SelectDiv(I, /*isSigned*/ false); 2741 case Instruction::SRem: 2742 return SelectRem(I, /*isSigned*/ true); 2743 case Instruction::URem: 2744 return SelectRem(I, /*isSigned*/ false); 2745 case Instruction::Call: 2746 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2747 return SelectIntrinsicCall(*II); 2748 return SelectCall(I); 2749 case Instruction::Select: 2750 return SelectSelect(I); 2751 case Instruction::Ret: 2752 return SelectRet(I); 2753 case Instruction::Trunc: 2754 return SelectTrunc(I); 2755 case Instruction::ZExt: 2756 case Instruction::SExt: 2757 return SelectIntExt(I); 2758 case Instruction::Shl: 2759 return SelectShift(I, ARM_AM::lsl); 2760 case Instruction::LShr: 2761 return SelectShift(I, ARM_AM::lsr); 2762 case Instruction::AShr: 2763 return SelectShift(I, ARM_AM::asr); 2764 default: break; 2765 } 2766 return false; 2767} 2768 2769/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2770/// vreg is being provided by the specified load instruction. If possible, 2771/// try to fold the load as an operand to the instruction, returning true if 2772/// successful. 2773bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2774 const LoadInst *LI) { 2775 // Verify we have a legal type before going any further. 2776 MVT VT; 2777 if (!isLoadTypeLegal(LI->getType(), VT)) 2778 return false; 2779 2780 // Combine load followed by zero- or sign-extend. 2781 // ldrb r1, [r0] ldrb r1, [r0] 2782 // uxtb r2, r1 => 2783 // mov r3, r2 mov r3, r1 2784 bool isZExt = true; 2785 switch(MI->getOpcode()) { 2786 default: return false; 2787 case ARM::SXTH: 2788 case ARM::t2SXTH: 2789 isZExt = false; 2790 case ARM::UXTH: 2791 case ARM::t2UXTH: 2792 if (VT != MVT::i16) 2793 return false; 2794 break; 2795 case ARM::SXTB: 2796 case ARM::t2SXTB: 2797 isZExt = false; 2798 case ARM::UXTB: 2799 case ARM::t2UXTB: 2800 if (VT != MVT::i8) 2801 return false; 2802 break; 2803 } 2804 // See if we can handle this address. 2805 Address Addr; 2806 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2807 2808 unsigned ResultReg = MI->getOperand(0).getReg(); 2809 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2810 return false; 2811 MI->eraseFromParent(); 2812 return true; 2813} 2814 2815unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, 2816 unsigned Align, EVT VT) { 2817 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2818 ARMConstantPoolConstant *CPV = 2819 ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2820 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 2821 2822 unsigned Opc; 2823 unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); 2824 // Load value. 2825 if (isThumb2) { 2826 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2827 TII.get(ARM::t2LDRpci), DestReg1) 2828 .addConstantPoolIndex(Idx)); 2829 Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; 2830 } else { 2831 // The extra immediate is for addrmode2. 2832 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2833 DL, TII.get(ARM::LDRcp), DestReg1) 2834 .addConstantPoolIndex(Idx).addImm(0)); 2835 Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; 2836 } 2837 2838 unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); 2839 if (GlobalBaseReg == 0) { 2840 GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); 2841 AFI->setGlobalBaseReg(GlobalBaseReg); 2842 } 2843 2844 unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); 2845 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 2846 DL, TII.get(Opc), DestReg2) 2847 .addReg(DestReg1) 2848 .addReg(GlobalBaseReg); 2849 if (!UseGOTOFF) 2850 MIB.addImm(0); 2851 AddOptionalDefs(MIB); 2852 2853 return DestReg2; 2854} 2855 2856namespace llvm { 2857 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, 2858 const TargetLibraryInfo *libInfo) { 2859 // Completely untested on non-iOS. 2860 const TargetMachine &TM = funcInfo.MF->getTarget(); 2861 2862 // Darwin and thumb1 only for now. 2863 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2864 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only()) 2865 return new ARMFastISel(funcInfo, libInfo); 2866 return 0; 2867 } 2868} 2869