ARMFastISel.cpp revision 4964ba01f96d5b3a8fb27a7847c01666ee9b4ebd
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "MCTargetDesc/ARMAddressingModes.h" 24#include "llvm/CallingConv.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Instructions.h" 28#include "llvm/IntrinsicInst.h" 29#include "llvm/Module.h" 30#include "llvm/Operator.h" 31#include "llvm/CodeGen/Analysis.h" 32#include "llvm/CodeGen/FastISel.h" 33#include "llvm/CodeGen/FunctionLoweringInfo.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineConstantPool.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineMemOperand.h" 39#include "llvm/CodeGen/MachineRegisterInfo.h" 40#include "llvm/Support/CallSite.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/ErrorHandling.h" 43#include "llvm/Support/GetElementPtrTypeIterator.h" 44#include "llvm/Target/TargetData.h" 45#include "llvm/Target/TargetInstrInfo.h" 46#include "llvm/Target/TargetLowering.h" 47#include "llvm/Target/TargetMachine.h" 48#include "llvm/Target/TargetOptions.h" 49using namespace llvm; 50 51static cl::opt<bool> 52DisableARMFastISel("disable-arm-fast-isel", 53 cl::desc("Turn off experimental ARM fast-isel support"), 54 cl::init(false), cl::Hidden); 55 56extern cl::opt<bool> EnableARMLongCalls; 57 58namespace { 59 60 // All possible address modes, plus some. 61 typedef struct Address { 62 enum { 63 RegBase, 64 FrameIndexBase 65 } BaseType; 66 67 union { 68 unsigned Reg; 69 int FI; 70 } Base; 71 72 int Offset; 73 74 // Innocuous defaults for our address. 75 Address() 76 : BaseType(RegBase), Offset(0) { 77 Base.Reg = 0; 78 } 79 } Address; 80 81class ARMFastISel : public FastISel { 82 83 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 84 /// make the right decision when generating code for different targets. 85 const ARMSubtarget *Subtarget; 86 const TargetMachine &TM; 87 const TargetInstrInfo &TII; 88 const TargetLowering &TLI; 89 ARMFunctionInfo *AFI; 90 91 // Convenience variables to avoid some queries. 92 bool isThumb2; 93 LLVMContext *Context; 94 95 public: 96 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 97 : FastISel(funcInfo), 98 TM(funcInfo.MF->getTarget()), 99 TII(*TM.getInstrInfo()), 100 TLI(*TM.getTargetLowering()) { 101 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 102 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 103 isThumb2 = AFI->isThumbFunction(); 104 Context = &funcInfo.Fn->getContext(); 105 } 106 107 // Code from FastISel.cpp. 108 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 109 const TargetRegisterClass *RC); 110 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 111 const TargetRegisterClass *RC, 112 unsigned Op0, bool Op0IsKill); 113 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 114 const TargetRegisterClass *RC, 115 unsigned Op0, bool Op0IsKill, 116 unsigned Op1, bool Op1IsKill); 117 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 118 const TargetRegisterClass *RC, 119 unsigned Op0, bool Op0IsKill, 120 unsigned Op1, bool Op1IsKill, 121 unsigned Op2, bool Op2IsKill); 122 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 uint64_t Imm); 126 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 unsigned Op0, bool Op0IsKill, 129 const ConstantFP *FPImm); 130 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 131 const TargetRegisterClass *RC, 132 unsigned Op0, bool Op0IsKill, 133 unsigned Op1, bool Op1IsKill, 134 uint64_t Imm); 135 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 136 const TargetRegisterClass *RC, 137 uint64_t Imm); 138 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 139 const TargetRegisterClass *RC, 140 uint64_t Imm1, uint64_t Imm2); 141 142 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 143 unsigned Op0, bool Op0IsKill, 144 uint32_t Idx); 145 146 // Backend specific FastISel code. 147 virtual bool TargetSelectInstruction(const Instruction *I); 148 virtual unsigned TargetMaterializeConstant(const Constant *C); 149 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 150 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 151 const LoadInst *LI); 152 153 #include "ARMGenFastISel.inc" 154 155 // Instruction selection routines. 156 private: 157 bool SelectLoad(const Instruction *I); 158 bool SelectStore(const Instruction *I); 159 bool SelectBranch(const Instruction *I); 160 bool SelectCmp(const Instruction *I); 161 bool SelectFPExt(const Instruction *I); 162 bool SelectFPTrunc(const Instruction *I); 163 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 164 bool SelectSIToFP(const Instruction *I); 165 bool SelectFPToSI(const Instruction *I); 166 bool SelectSDiv(const Instruction *I); 167 bool SelectSRem(const Instruction *I); 168 bool SelectCall(const Instruction *I, const char *IntrMemName); 169 bool SelectIntrinsicCall(const IntrinsicInst &I); 170 bool SelectSelect(const Instruction *I); 171 bool SelectRet(const Instruction *I); 172 bool SelectTrunc(const Instruction *I); 173 bool SelectIntExt(const Instruction *I); 174 175 // Utility routines. 176 private: 177 bool isTypeLegal(Type *Ty, MVT &VT); 178 bool isLoadTypeLegal(Type *Ty, MVT &VT); 179 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 180 bool isZExt); 181 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 182 unsigned Alignment = 0, bool isZExt = true, 183 bool allocReg = true); 184 185 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 186 unsigned Alignment = 0); 187 bool ARMComputeAddress(const Value *Obj, Address &Addr); 188 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 189 bool ARMIsMemCpySmall(uint64_t Len); 190 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 191 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 192 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 193 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 194 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 195 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 196 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 197 unsigned ARMSelectCallOp(const GlobalValue *GV); 198 199 // Call handling routines. 200 private: 201 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 202 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 203 SmallVectorImpl<unsigned> &ArgRegs, 204 SmallVectorImpl<MVT> &ArgVTs, 205 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 206 SmallVectorImpl<unsigned> &RegArgs, 207 CallingConv::ID CC, 208 unsigned &NumBytes); 209 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 210 const Instruction *I, CallingConv::ID CC, 211 unsigned &NumBytes); 212 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 213 214 // OptionalDef handling routines. 215 private: 216 bool isARMNEONPred(const MachineInstr *MI); 217 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 218 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 219 void AddLoadStoreOperands(EVT VT, Address &Addr, 220 const MachineInstrBuilder &MIB, 221 unsigned Flags, bool useAM3); 222}; 223 224} // end anonymous namespace 225 226#include "ARMGenCallingConv.inc" 227 228// DefinesOptionalPredicate - This is different from DefinesPredicate in that 229// we don't care about implicit defs here, just places we'll need to add a 230// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 231bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 232 if (!MI->hasOptionalDef()) 233 return false; 234 235 // Look to see if our OptionalDef is defining CPSR or CCR. 236 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 237 const MachineOperand &MO = MI->getOperand(i); 238 if (!MO.isReg() || !MO.isDef()) continue; 239 if (MO.getReg() == ARM::CPSR) 240 *CPSR = true; 241 } 242 return true; 243} 244 245bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 246 const MCInstrDesc &MCID = MI->getDesc(); 247 248 // If we're a thumb2 or not NEON function we were handled via isPredicable. 249 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 250 AFI->isThumb2Function()) 251 return false; 252 253 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 254 if (MCID.OpInfo[i].isPredicate()) 255 return true; 256 257 return false; 258} 259 260// If the machine is predicable go ahead and add the predicate operands, if 261// it needs default CC operands add those. 262// TODO: If we want to support thumb1 then we'll need to deal with optional 263// CPSR defs that need to be added before the remaining operands. See s_cc_out 264// for descriptions why. 265const MachineInstrBuilder & 266ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 267 MachineInstr *MI = &*MIB; 268 269 // Do we use a predicate? or... 270 // Are we NEON in ARM mode and have a predicate operand? If so, I know 271 // we're not predicable but add it anyways. 272 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 273 AddDefaultPred(MIB); 274 275 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 276 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 277 bool CPSR = false; 278 if (DefinesOptionalPredicate(MI, &CPSR)) { 279 if (CPSR) 280 AddDefaultT1CC(MIB); 281 else 282 AddDefaultCC(MIB); 283 } 284 return MIB; 285} 286 287unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 288 const TargetRegisterClass* RC) { 289 unsigned ResultReg = createResultReg(RC); 290 const MCInstrDesc &II = TII.get(MachineInstOpcode); 291 292 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 293 return ResultReg; 294} 295 296unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 297 const TargetRegisterClass *RC, 298 unsigned Op0, bool Op0IsKill) { 299 unsigned ResultReg = createResultReg(RC); 300 const MCInstrDesc &II = TII.get(MachineInstOpcode); 301 302 if (II.getNumDefs() >= 1) 303 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 304 .addReg(Op0, Op0IsKill * RegState::Kill)); 305 else { 306 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 307 .addReg(Op0, Op0IsKill * RegState::Kill)); 308 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 309 TII.get(TargetOpcode::COPY), ResultReg) 310 .addReg(II.ImplicitDefs[0])); 311 } 312 return ResultReg; 313} 314 315unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 316 const TargetRegisterClass *RC, 317 unsigned Op0, bool Op0IsKill, 318 unsigned Op1, bool Op1IsKill) { 319 unsigned ResultReg = createResultReg(RC); 320 const MCInstrDesc &II = TII.get(MachineInstOpcode); 321 322 if (II.getNumDefs() >= 1) 323 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 324 .addReg(Op0, Op0IsKill * RegState::Kill) 325 .addReg(Op1, Op1IsKill * RegState::Kill)); 326 else { 327 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 328 .addReg(Op0, Op0IsKill * RegState::Kill) 329 .addReg(Op1, Op1IsKill * RegState::Kill)); 330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 331 TII.get(TargetOpcode::COPY), ResultReg) 332 .addReg(II.ImplicitDefs[0])); 333 } 334 return ResultReg; 335} 336 337unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 338 const TargetRegisterClass *RC, 339 unsigned Op0, bool Op0IsKill, 340 unsigned Op1, bool Op1IsKill, 341 unsigned Op2, bool Op2IsKill) { 342 unsigned ResultReg = createResultReg(RC); 343 const MCInstrDesc &II = TII.get(MachineInstOpcode); 344 345 if (II.getNumDefs() >= 1) 346 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 347 .addReg(Op0, Op0IsKill * RegState::Kill) 348 .addReg(Op1, Op1IsKill * RegState::Kill) 349 .addReg(Op2, Op2IsKill * RegState::Kill)); 350 else { 351 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 352 .addReg(Op0, Op0IsKill * RegState::Kill) 353 .addReg(Op1, Op1IsKill * RegState::Kill) 354 .addReg(Op2, Op2IsKill * RegState::Kill)); 355 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 356 TII.get(TargetOpcode::COPY), ResultReg) 357 .addReg(II.ImplicitDefs[0])); 358 } 359 return ResultReg; 360} 361 362unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 363 const TargetRegisterClass *RC, 364 unsigned Op0, bool Op0IsKill, 365 uint64_t Imm) { 366 unsigned ResultReg = createResultReg(RC); 367 const MCInstrDesc &II = TII.get(MachineInstOpcode); 368 369 if (II.getNumDefs() >= 1) 370 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 371 .addReg(Op0, Op0IsKill * RegState::Kill) 372 .addImm(Imm)); 373 else { 374 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 375 .addReg(Op0, Op0IsKill * RegState::Kill) 376 .addImm(Imm)); 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 378 TII.get(TargetOpcode::COPY), ResultReg) 379 .addReg(II.ImplicitDefs[0])); 380 } 381 return ResultReg; 382} 383 384unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 385 const TargetRegisterClass *RC, 386 unsigned Op0, bool Op0IsKill, 387 const ConstantFP *FPImm) { 388 unsigned ResultReg = createResultReg(RC); 389 const MCInstrDesc &II = TII.get(MachineInstOpcode); 390 391 if (II.getNumDefs() >= 1) 392 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 393 .addReg(Op0, Op0IsKill * RegState::Kill) 394 .addFPImm(FPImm)); 395 else { 396 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 397 .addReg(Op0, Op0IsKill * RegState::Kill) 398 .addFPImm(FPImm)); 399 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 400 TII.get(TargetOpcode::COPY), ResultReg) 401 .addReg(II.ImplicitDefs[0])); 402 } 403 return ResultReg; 404} 405 406unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 407 const TargetRegisterClass *RC, 408 unsigned Op0, bool Op0IsKill, 409 unsigned Op1, bool Op1IsKill, 410 uint64_t Imm) { 411 unsigned ResultReg = createResultReg(RC); 412 const MCInstrDesc &II = TII.get(MachineInstOpcode); 413 414 if (II.getNumDefs() >= 1) 415 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 416 .addReg(Op0, Op0IsKill * RegState::Kill) 417 .addReg(Op1, Op1IsKill * RegState::Kill) 418 .addImm(Imm)); 419 else { 420 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 421 .addReg(Op0, Op0IsKill * RegState::Kill) 422 .addReg(Op1, Op1IsKill * RegState::Kill) 423 .addImm(Imm)); 424 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 425 TII.get(TargetOpcode::COPY), ResultReg) 426 .addReg(II.ImplicitDefs[0])); 427 } 428 return ResultReg; 429} 430 431unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 432 const TargetRegisterClass *RC, 433 uint64_t Imm) { 434 unsigned ResultReg = createResultReg(RC); 435 const MCInstrDesc &II = TII.get(MachineInstOpcode); 436 437 if (II.getNumDefs() >= 1) 438 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 439 .addImm(Imm)); 440 else { 441 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 442 .addImm(Imm)); 443 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 444 TII.get(TargetOpcode::COPY), ResultReg) 445 .addReg(II.ImplicitDefs[0])); 446 } 447 return ResultReg; 448} 449 450unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 451 const TargetRegisterClass *RC, 452 uint64_t Imm1, uint64_t Imm2) { 453 unsigned ResultReg = createResultReg(RC); 454 const MCInstrDesc &II = TII.get(MachineInstOpcode); 455 456 if (II.getNumDefs() >= 1) 457 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 458 .addImm(Imm1).addImm(Imm2)); 459 else { 460 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 461 .addImm(Imm1).addImm(Imm2)); 462 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 463 TII.get(TargetOpcode::COPY), 464 ResultReg) 465 .addReg(II.ImplicitDefs[0])); 466 } 467 return ResultReg; 468} 469 470unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 471 unsigned Op0, bool Op0IsKill, 472 uint32_t Idx) { 473 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 474 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 475 "Cannot yet extract from physregs"); 476 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 477 DL, TII.get(TargetOpcode::COPY), ResultReg) 478 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 479 return ResultReg; 480} 481 482// TODO: Don't worry about 64-bit now, but when this is fixed remove the 483// checks from the various callers. 484unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 485 if (VT == MVT::f64) return 0; 486 487 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 488 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 489 TII.get(ARM::VMOVRS), MoveReg) 490 .addReg(SrcReg)); 491 return MoveReg; 492} 493 494unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 495 if (VT == MVT::i64) return 0; 496 497 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 498 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 499 TII.get(ARM::VMOVSR), MoveReg) 500 .addReg(SrcReg)); 501 return MoveReg; 502} 503 504// For double width floating point we need to materialize two constants 505// (the high and the low) into integer registers then use a move to get 506// the combined constant into an FP reg. 507unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 508 const APFloat Val = CFP->getValueAPF(); 509 bool is64bit = VT == MVT::f64; 510 511 // This checks to see if we can use VFP3 instructions to materialize 512 // a constant, otherwise we have to go through the constant pool. 513 if (TLI.isFPImmLegal(Val, VT)) { 514 int Imm; 515 unsigned Opc; 516 if (is64bit) { 517 Imm = ARM_AM::getFP64Imm(Val); 518 Opc = ARM::FCONSTD; 519 } else { 520 Imm = ARM_AM::getFP32Imm(Val); 521 Opc = ARM::FCONSTS; 522 } 523 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 524 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 525 DestReg) 526 .addImm(Imm)); 527 return DestReg; 528 } 529 530 // Require VFP2 for loading fp constants. 531 if (!Subtarget->hasVFP2()) return false; 532 533 // MachineConstantPool wants an explicit alignment. 534 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 535 if (Align == 0) { 536 // TODO: Figure out if this is correct. 537 Align = TD.getTypeAllocSize(CFP->getType()); 538 } 539 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 540 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 541 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 542 543 // The extra reg is for addrmode5. 544 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 545 DestReg) 546 .addConstantPoolIndex(Idx) 547 .addReg(0)); 548 return DestReg; 549} 550 551unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 552 553 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 554 return false; 555 556 // If we can do this in a single instruction without a constant pool entry 557 // do so now. 558 const ConstantInt *CI = cast<ConstantInt>(C); 559 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 560 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 561 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 562 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 563 TII.get(Opc), ImmReg) 564 .addImm(CI->getZExtValue())); 565 return ImmReg; 566 } 567 568 // Use MVN to emit negative constants. 569 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 570 unsigned Imm = (unsigned)~(CI->getSExtValue()); 571 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 572 (ARM_AM::getSOImmVal(Imm) != -1); 573 if (UseImm) { 574 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 575 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 576 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 577 TII.get(Opc), ImmReg) 578 .addImm(Imm)); 579 return ImmReg; 580 } 581 } 582 583 // Load from constant pool. For now 32-bit only. 584 if (VT != MVT::i32) 585 return false; 586 587 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 588 589 // MachineConstantPool wants an explicit alignment. 590 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 591 if (Align == 0) { 592 // TODO: Figure out if this is correct. 593 Align = TD.getTypeAllocSize(C->getType()); 594 } 595 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 596 597 if (isThumb2) 598 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 599 TII.get(ARM::t2LDRpci), DestReg) 600 .addConstantPoolIndex(Idx)); 601 else 602 // The extra immediate is for addrmode2. 603 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 604 TII.get(ARM::LDRcp), DestReg) 605 .addConstantPoolIndex(Idx) 606 .addImm(0)); 607 608 return DestReg; 609} 610 611unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 612 // For now 32-bit only. 613 if (VT != MVT::i32) return 0; 614 615 Reloc::Model RelocM = TM.getRelocationModel(); 616 617 // TODO: Need more magic for ARM PIC. 618 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 619 620 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 621 622 // Use movw+movt when possible, it avoids constant pool entries. 623 if (Subtarget->isTargetDarwin() && Subtarget->useMovt()) { 624 unsigned Opc; 625 switch (RelocM) { 626 case Reloc::PIC_: 627 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 628 break; 629 case Reloc::DynamicNoPIC: 630 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 631 break; 632 default: 633 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 634 break; 635 } 636 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 637 DestReg).addGlobalAddress(GV)); 638 } else { 639 // MachineConstantPool wants an explicit alignment. 640 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 641 if (Align == 0) { 642 // TODO: Figure out if this is correct. 643 Align = TD.getTypeAllocSize(GV->getType()); 644 } 645 646 // Grab index. 647 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 648 (Subtarget->isThumb() ? 4 : 8); 649 unsigned Id = AFI->createPICLabelUId(); 650 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 651 ARMCP::CPValue, 652 PCAdj); 653 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 654 655 // Load value. 656 MachineInstrBuilder MIB; 657 if (isThumb2) { 658 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 659 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 660 .addConstantPoolIndex(Idx); 661 if (RelocM == Reloc::PIC_) 662 MIB.addImm(Id); 663 } else { 664 // The extra immediate is for addrmode2. 665 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 666 DestReg) 667 .addConstantPoolIndex(Idx) 668 .addImm(0); 669 } 670 AddOptionalDefs(MIB); 671 } 672 673 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 674 MachineInstrBuilder MIB; 675 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 676 if (isThumb2) 677 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 678 TII.get(ARM::t2LDRi12), NewDestReg) 679 .addReg(DestReg) 680 .addImm(0); 681 else 682 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 683 NewDestReg) 684 .addReg(DestReg) 685 .addImm(0); 686 DestReg = NewDestReg; 687 AddOptionalDefs(MIB); 688 } 689 690 return DestReg; 691} 692 693unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 694 EVT VT = TLI.getValueType(C->getType(), true); 695 696 // Only handle simple types. 697 if (!VT.isSimple()) return 0; 698 699 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 700 return ARMMaterializeFP(CFP, VT); 701 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 702 return ARMMaterializeGV(GV, VT); 703 else if (isa<ConstantInt>(C)) 704 return ARMMaterializeInt(C, VT); 705 706 return 0; 707} 708 709// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 710 711unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 712 // Don't handle dynamic allocas. 713 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 714 715 MVT VT; 716 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 717 718 DenseMap<const AllocaInst*, int>::iterator SI = 719 FuncInfo.StaticAllocaMap.find(AI); 720 721 // This will get lowered later into the correct offsets and registers 722 // via rewriteXFrameIndex. 723 if (SI != FuncInfo.StaticAllocaMap.end()) { 724 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 725 unsigned ResultReg = createResultReg(RC); 726 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 727 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 728 TII.get(Opc), ResultReg) 729 .addFrameIndex(SI->second) 730 .addImm(0)); 731 return ResultReg; 732 } 733 734 return 0; 735} 736 737bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 738 EVT evt = TLI.getValueType(Ty, true); 739 740 // Only handle simple types. 741 if (evt == MVT::Other || !evt.isSimple()) return false; 742 VT = evt.getSimpleVT(); 743 744 // Handle all legal types, i.e. a register that will directly hold this 745 // value. 746 return TLI.isTypeLegal(VT); 747} 748 749bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 750 if (isTypeLegal(Ty, VT)) return true; 751 752 // If this is a type than can be sign or zero-extended to a basic operation 753 // go ahead and accept it now. 754 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 755 return true; 756 757 return false; 758} 759 760// Computes the address to get to an object. 761bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 762 // Some boilerplate from the X86 FastISel. 763 const User *U = NULL; 764 unsigned Opcode = Instruction::UserOp1; 765 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 766 // Don't walk into other basic blocks unless the object is an alloca from 767 // another block, otherwise it may not have a virtual register assigned. 768 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 769 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 770 Opcode = I->getOpcode(); 771 U = I; 772 } 773 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 774 Opcode = C->getOpcode(); 775 U = C; 776 } 777 778 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 779 if (Ty->getAddressSpace() > 255) 780 // Fast instruction selection doesn't support the special 781 // address spaces. 782 return false; 783 784 switch (Opcode) { 785 default: 786 break; 787 case Instruction::BitCast: { 788 // Look through bitcasts. 789 return ARMComputeAddress(U->getOperand(0), Addr); 790 } 791 case Instruction::IntToPtr: { 792 // Look past no-op inttoptrs. 793 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 794 return ARMComputeAddress(U->getOperand(0), Addr); 795 break; 796 } 797 case Instruction::PtrToInt: { 798 // Look past no-op ptrtoints. 799 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 800 return ARMComputeAddress(U->getOperand(0), Addr); 801 break; 802 } 803 case Instruction::GetElementPtr: { 804 Address SavedAddr = Addr; 805 int TmpOffset = Addr.Offset; 806 807 // Iterate through the GEP folding the constants into offsets where 808 // we can. 809 gep_type_iterator GTI = gep_type_begin(U); 810 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 811 i != e; ++i, ++GTI) { 812 const Value *Op = *i; 813 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 814 const StructLayout *SL = TD.getStructLayout(STy); 815 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 816 TmpOffset += SL->getElementOffset(Idx); 817 } else { 818 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 819 for (;;) { 820 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 821 // Constant-offset addressing. 822 TmpOffset += CI->getSExtValue() * S; 823 break; 824 } 825 if (isa<AddOperator>(Op) && 826 (!isa<Instruction>(Op) || 827 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 828 == FuncInfo.MBB) && 829 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 830 // An add (in the same block) with a constant operand. Fold the 831 // constant. 832 ConstantInt *CI = 833 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 834 TmpOffset += CI->getSExtValue() * S; 835 // Iterate on the other operand. 836 Op = cast<AddOperator>(Op)->getOperand(0); 837 continue; 838 } 839 // Unsupported 840 goto unsupported_gep; 841 } 842 } 843 } 844 845 // Try to grab the base operand now. 846 Addr.Offset = TmpOffset; 847 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 848 849 // We failed, restore everything and try the other options. 850 Addr = SavedAddr; 851 852 unsupported_gep: 853 break; 854 } 855 case Instruction::Alloca: { 856 const AllocaInst *AI = cast<AllocaInst>(Obj); 857 DenseMap<const AllocaInst*, int>::iterator SI = 858 FuncInfo.StaticAllocaMap.find(AI); 859 if (SI != FuncInfo.StaticAllocaMap.end()) { 860 Addr.BaseType = Address::FrameIndexBase; 861 Addr.Base.FI = SI->second; 862 return true; 863 } 864 break; 865 } 866 } 867 868 // Try to get this in a register if nothing else has worked. 869 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 870 return Addr.Base.Reg != 0; 871} 872 873void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 874 875 assert(VT.isSimple() && "Non-simple types are invalid here!"); 876 877 bool needsLowering = false; 878 switch (VT.getSimpleVT().SimpleTy) { 879 default: 880 assert(false && "Unhandled load/store type!"); 881 break; 882 case MVT::i1: 883 case MVT::i8: 884 case MVT::i16: 885 case MVT::i32: 886 if (!useAM3) { 887 // Integer loads/stores handle 12-bit offsets. 888 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 889 // Handle negative offsets. 890 if (needsLowering && isThumb2) 891 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 892 Addr.Offset > -256); 893 } else { 894 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 895 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 896 } 897 break; 898 case MVT::f32: 899 case MVT::f64: 900 // Floating point operands handle 8-bit offsets. 901 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 902 break; 903 } 904 905 // If this is a stack pointer and the offset needs to be simplified then 906 // put the alloca address into a register, set the base type back to 907 // register and continue. This should almost never happen. 908 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 909 TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass : 910 ARM::GPRRegisterClass; 911 unsigned ResultReg = createResultReg(RC); 912 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 913 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 914 TII.get(Opc), ResultReg) 915 .addFrameIndex(Addr.Base.FI) 916 .addImm(0)); 917 Addr.Base.Reg = ResultReg; 918 Addr.BaseType = Address::RegBase; 919 } 920 921 // Since the offset is too large for the load/store instruction 922 // get the reg+offset into a register. 923 if (needsLowering) { 924 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 925 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 926 Addr.Offset = 0; 927 } 928} 929 930void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 931 const MachineInstrBuilder &MIB, 932 unsigned Flags, bool useAM3) { 933 // addrmode5 output depends on the selection dag addressing dividing the 934 // offset by 4 that it then later multiplies. Do this here as well. 935 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 936 VT.getSimpleVT().SimpleTy == MVT::f64) 937 Addr.Offset /= 4; 938 939 // Frame base works a bit differently. Handle it separately. 940 if (Addr.BaseType == Address::FrameIndexBase) { 941 int FI = Addr.Base.FI; 942 int Offset = Addr.Offset; 943 MachineMemOperand *MMO = 944 FuncInfo.MF->getMachineMemOperand( 945 MachinePointerInfo::getFixedStack(FI, Offset), 946 Flags, 947 MFI.getObjectSize(FI), 948 MFI.getObjectAlignment(FI)); 949 // Now add the rest of the operands. 950 MIB.addFrameIndex(FI); 951 952 // ARM halfword load/stores and signed byte loads need an additional 953 // operand. 954 if (useAM3) { 955 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 956 MIB.addReg(0); 957 MIB.addImm(Imm); 958 } else { 959 MIB.addImm(Addr.Offset); 960 } 961 MIB.addMemOperand(MMO); 962 } else { 963 // Now add the rest of the operands. 964 MIB.addReg(Addr.Base.Reg); 965 966 // ARM halfword load/stores and signed byte loads need an additional 967 // operand. 968 if (useAM3) { 969 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 970 MIB.addReg(0); 971 MIB.addImm(Imm); 972 } else { 973 MIB.addImm(Addr.Offset); 974 } 975 } 976 AddOptionalDefs(MIB); 977} 978 979bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 980 unsigned Alignment, bool isZExt, bool allocReg) { 981 assert(VT.isSimple() && "Non-simple types are invalid here!"); 982 unsigned Opc; 983 bool useAM3 = false; 984 bool needVMOV = false; 985 TargetRegisterClass *RC; 986 switch (VT.getSimpleVT().SimpleTy) { 987 // This is mostly going to be Neon/vector support. 988 default: return false; 989 case MVT::i1: 990 case MVT::i8: 991 if (isThumb2) { 992 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 993 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 994 else 995 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 996 } else { 997 if (isZExt) { 998 Opc = ARM::LDRBi12; 999 } else { 1000 Opc = ARM::LDRSB; 1001 useAM3 = true; 1002 } 1003 } 1004 RC = ARM::GPRRegisterClass; 1005 break; 1006 case MVT::i16: 1007 if (isThumb2) { 1008 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1009 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1010 else 1011 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1012 } else { 1013 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1014 useAM3 = true; 1015 } 1016 RC = ARM::GPRRegisterClass; 1017 break; 1018 case MVT::i32: 1019 if (isThumb2) { 1020 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1021 Opc = ARM::t2LDRi8; 1022 else 1023 Opc = ARM::t2LDRi12; 1024 } else { 1025 Opc = ARM::LDRi12; 1026 } 1027 RC = ARM::GPRRegisterClass; 1028 break; 1029 case MVT::f32: 1030 if (!Subtarget->hasVFP2()) return false; 1031 // Unaligned loads need special handling. Floats require word-alignment. 1032 if (Alignment && Alignment < 4) { 1033 needVMOV = true; 1034 VT = MVT::i32; 1035 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1036 RC = ARM::GPRRegisterClass; 1037 } else { 1038 Opc = ARM::VLDRS; 1039 RC = TLI.getRegClassFor(VT); 1040 } 1041 break; 1042 case MVT::f64: 1043 if (!Subtarget->hasVFP2()) return false; 1044 // FIXME: Unaligned loads need special handling. Doublewords require 1045 // word-alignment. 1046 if (Alignment && Alignment < 4) 1047 return false; 1048 1049 Opc = ARM::VLDRD; 1050 RC = TLI.getRegClassFor(VT); 1051 break; 1052 } 1053 // Simplify this down to something we can handle. 1054 ARMSimplifyAddress(Addr, VT, useAM3); 1055 1056 // Create the base instruction, then add the operands. 1057 if (allocReg) 1058 ResultReg = createResultReg(RC); 1059 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1060 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1061 TII.get(Opc), ResultReg); 1062 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1063 1064 // If we had an unaligned load of a float we've converted it to an regular 1065 // load. Now we must move from the GRP to the FP register. 1066 if (needVMOV) { 1067 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1068 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1069 TII.get(ARM::VMOVSR), MoveReg) 1070 .addReg(ResultReg)); 1071 ResultReg = MoveReg; 1072 } 1073 return true; 1074} 1075 1076bool ARMFastISel::SelectLoad(const Instruction *I) { 1077 // Atomic loads need special handling. 1078 if (cast<LoadInst>(I)->isAtomic()) 1079 return false; 1080 1081 // Verify we have a legal type before going any further. 1082 MVT VT; 1083 if (!isLoadTypeLegal(I->getType(), VT)) 1084 return false; 1085 1086 // See if we can handle this address. 1087 Address Addr; 1088 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1089 1090 unsigned ResultReg; 1091 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1092 return false; 1093 UpdateValueMap(I, ResultReg); 1094 return true; 1095} 1096 1097bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1098 unsigned Alignment) { 1099 unsigned StrOpc; 1100 bool useAM3 = false; 1101 switch (VT.getSimpleVT().SimpleTy) { 1102 // This is mostly going to be Neon/vector support. 1103 default: return false; 1104 case MVT::i1: { 1105 unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass : 1106 ARM::GPRRegisterClass); 1107 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1108 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1109 TII.get(Opc), Res) 1110 .addReg(SrcReg).addImm(1)); 1111 SrcReg = Res; 1112 } // Fallthrough here. 1113 case MVT::i8: 1114 if (isThumb2) { 1115 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1116 StrOpc = ARM::t2STRBi8; 1117 else 1118 StrOpc = ARM::t2STRBi12; 1119 } else { 1120 StrOpc = ARM::STRBi12; 1121 } 1122 break; 1123 case MVT::i16: 1124 if (isThumb2) { 1125 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1126 StrOpc = ARM::t2STRHi8; 1127 else 1128 StrOpc = ARM::t2STRHi12; 1129 } else { 1130 StrOpc = ARM::STRH; 1131 useAM3 = true; 1132 } 1133 break; 1134 case MVT::i32: 1135 if (isThumb2) { 1136 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1137 StrOpc = ARM::t2STRi8; 1138 else 1139 StrOpc = ARM::t2STRi12; 1140 } else { 1141 StrOpc = ARM::STRi12; 1142 } 1143 break; 1144 case MVT::f32: 1145 if (!Subtarget->hasVFP2()) return false; 1146 // Unaligned stores need special handling. Floats require word-alignment. 1147 if (Alignment && Alignment < 4) { 1148 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1149 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1150 TII.get(ARM::VMOVRS), MoveReg) 1151 .addReg(SrcReg)); 1152 SrcReg = MoveReg; 1153 VT = MVT::i32; 1154 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1155 } else { 1156 StrOpc = ARM::VSTRS; 1157 } 1158 break; 1159 case MVT::f64: 1160 if (!Subtarget->hasVFP2()) return false; 1161 // FIXME: Unaligned stores need special handling. Doublewords require 1162 // word-alignment. 1163 if (Alignment && Alignment < 4) 1164 return false; 1165 1166 StrOpc = ARM::VSTRD; 1167 break; 1168 } 1169 // Simplify this down to something we can handle. 1170 ARMSimplifyAddress(Addr, VT, useAM3); 1171 1172 // Create the base instruction, then add the operands. 1173 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1174 TII.get(StrOpc)) 1175 .addReg(SrcReg); 1176 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1177 return true; 1178} 1179 1180bool ARMFastISel::SelectStore(const Instruction *I) { 1181 Value *Op0 = I->getOperand(0); 1182 unsigned SrcReg = 0; 1183 1184 // Atomic stores need special handling. 1185 if (cast<StoreInst>(I)->isAtomic()) 1186 return false; 1187 1188 // Verify we have a legal type before going any further. 1189 MVT VT; 1190 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1191 return false; 1192 1193 // Get the value to be stored into a register. 1194 SrcReg = getRegForValue(Op0); 1195 if (SrcReg == 0) return false; 1196 1197 // See if we can handle this address. 1198 Address Addr; 1199 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1200 return false; 1201 1202 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1203 return false; 1204 return true; 1205} 1206 1207static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1208 switch (Pred) { 1209 // Needs two compares... 1210 case CmpInst::FCMP_ONE: 1211 case CmpInst::FCMP_UEQ: 1212 default: 1213 // AL is our "false" for now. The other two need more compares. 1214 return ARMCC::AL; 1215 case CmpInst::ICMP_EQ: 1216 case CmpInst::FCMP_OEQ: 1217 return ARMCC::EQ; 1218 case CmpInst::ICMP_SGT: 1219 case CmpInst::FCMP_OGT: 1220 return ARMCC::GT; 1221 case CmpInst::ICMP_SGE: 1222 case CmpInst::FCMP_OGE: 1223 return ARMCC::GE; 1224 case CmpInst::ICMP_UGT: 1225 case CmpInst::FCMP_UGT: 1226 return ARMCC::HI; 1227 case CmpInst::FCMP_OLT: 1228 return ARMCC::MI; 1229 case CmpInst::ICMP_ULE: 1230 case CmpInst::FCMP_OLE: 1231 return ARMCC::LS; 1232 case CmpInst::FCMP_ORD: 1233 return ARMCC::VC; 1234 case CmpInst::FCMP_UNO: 1235 return ARMCC::VS; 1236 case CmpInst::FCMP_UGE: 1237 return ARMCC::PL; 1238 case CmpInst::ICMP_SLT: 1239 case CmpInst::FCMP_ULT: 1240 return ARMCC::LT; 1241 case CmpInst::ICMP_SLE: 1242 case CmpInst::FCMP_ULE: 1243 return ARMCC::LE; 1244 case CmpInst::FCMP_UNE: 1245 case CmpInst::ICMP_NE: 1246 return ARMCC::NE; 1247 case CmpInst::ICMP_UGE: 1248 return ARMCC::HS; 1249 case CmpInst::ICMP_ULT: 1250 return ARMCC::LO; 1251 } 1252} 1253 1254bool ARMFastISel::SelectBranch(const Instruction *I) { 1255 const BranchInst *BI = cast<BranchInst>(I); 1256 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1257 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1258 1259 // Simple branch support. 1260 1261 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1262 // behavior. 1263 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1264 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1265 1266 // Get the compare predicate. 1267 // Try to take advantage of fallthrough opportunities. 1268 CmpInst::Predicate Predicate = CI->getPredicate(); 1269 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1270 std::swap(TBB, FBB); 1271 Predicate = CmpInst::getInversePredicate(Predicate); 1272 } 1273 1274 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1275 1276 // We may not handle every CC for now. 1277 if (ARMPred == ARMCC::AL) return false; 1278 1279 // Emit the compare. 1280 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1281 return false; 1282 1283 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1284 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1285 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1286 FastEmitBranch(FBB, DL); 1287 FuncInfo.MBB->addSuccessor(TBB); 1288 return true; 1289 } 1290 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1291 MVT SourceVT; 1292 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1293 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1294 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1295 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1296 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1297 TII.get(TstOpc)) 1298 .addReg(OpReg).addImm(1)); 1299 1300 unsigned CCMode = ARMCC::NE; 1301 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1302 std::swap(TBB, FBB); 1303 CCMode = ARMCC::EQ; 1304 } 1305 1306 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1307 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1308 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1309 1310 FastEmitBranch(FBB, DL); 1311 FuncInfo.MBB->addSuccessor(TBB); 1312 return true; 1313 } 1314 } else if (const ConstantInt *CI = 1315 dyn_cast<ConstantInt>(BI->getCondition())) { 1316 uint64_t Imm = CI->getZExtValue(); 1317 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1318 FastEmitBranch(Target, DL); 1319 return true; 1320 } 1321 1322 unsigned CmpReg = getRegForValue(BI->getCondition()); 1323 if (CmpReg == 0) return false; 1324 1325 // We've been divorced from our compare! Our block was split, and 1326 // now our compare lives in a predecessor block. We musn't 1327 // re-compare here, as the children of the compare aren't guaranteed 1328 // live across the block boundary (we *could* check for this). 1329 // Regardless, the compare has been done in the predecessor block, 1330 // and it left a value for us in a virtual register. Ergo, we test 1331 // the one-bit value left in the virtual register. 1332 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1333 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1334 .addReg(CmpReg).addImm(1)); 1335 1336 unsigned CCMode = ARMCC::NE; 1337 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1338 std::swap(TBB, FBB); 1339 CCMode = ARMCC::EQ; 1340 } 1341 1342 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1343 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1344 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1345 FastEmitBranch(FBB, DL); 1346 FuncInfo.MBB->addSuccessor(TBB); 1347 return true; 1348} 1349 1350bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1351 bool isZExt) { 1352 Type *Ty = Src1Value->getType(); 1353 EVT SrcVT = TLI.getValueType(Ty, true); 1354 if (!SrcVT.isSimple()) return false; 1355 1356 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1357 if (isFloat && !Subtarget->hasVFP2()) 1358 return false; 1359 1360 // Check to see if the 2nd operand is a constant that we can encode directly 1361 // in the compare. 1362 int Imm = 0; 1363 bool UseImm = false; 1364 bool isNegativeImm = false; 1365 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1366 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1367 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1368 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1369 SrcVT == MVT::i1) { 1370 const APInt &CIVal = ConstInt->getValue(); 1371 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1372 if (Imm < 0) { 1373 isNegativeImm = true; 1374 Imm = -Imm; 1375 } 1376 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1377 (ARM_AM::getSOImmVal(Imm) != -1); 1378 } 1379 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1380 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1381 if (ConstFP->isZero() && !ConstFP->isNegative()) 1382 UseImm = true; 1383 } 1384 1385 unsigned CmpOpc; 1386 bool isICmp = true; 1387 bool needsExt = false; 1388 switch (SrcVT.getSimpleVT().SimpleTy) { 1389 default: return false; 1390 // TODO: Verify compares. 1391 case MVT::f32: 1392 isICmp = false; 1393 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1394 break; 1395 case MVT::f64: 1396 isICmp = false; 1397 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1398 break; 1399 case MVT::i1: 1400 case MVT::i8: 1401 case MVT::i16: 1402 needsExt = true; 1403 // Intentional fall-through. 1404 case MVT::i32: 1405 if (isThumb2) { 1406 if (!UseImm) 1407 CmpOpc = ARM::t2CMPrr; 1408 else 1409 CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri; 1410 } else { 1411 if (!UseImm) 1412 CmpOpc = ARM::CMPrr; 1413 else 1414 CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri; 1415 } 1416 break; 1417 } 1418 1419 unsigned SrcReg1 = getRegForValue(Src1Value); 1420 if (SrcReg1 == 0) return false; 1421 1422 unsigned SrcReg2 = 0; 1423 if (!UseImm) { 1424 SrcReg2 = getRegForValue(Src2Value); 1425 if (SrcReg2 == 0) return false; 1426 } 1427 1428 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1429 if (needsExt) { 1430 unsigned ResultReg; 1431 ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1432 if (ResultReg == 0) return false; 1433 SrcReg1 = ResultReg; 1434 if (!UseImm) { 1435 ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1436 if (ResultReg == 0) return false; 1437 SrcReg2 = ResultReg; 1438 } 1439 } 1440 1441 if (!UseImm) { 1442 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1443 TII.get(CmpOpc)) 1444 .addReg(SrcReg1).addReg(SrcReg2)); 1445 } else { 1446 MachineInstrBuilder MIB; 1447 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1448 .addReg(SrcReg1); 1449 1450 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1451 if (isICmp) 1452 MIB.addImm(Imm); 1453 AddOptionalDefs(MIB); 1454 } 1455 1456 // For floating point we need to move the result to a comparison register 1457 // that we can then use for branches. 1458 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1459 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1460 TII.get(ARM::FMSTAT))); 1461 return true; 1462} 1463 1464bool ARMFastISel::SelectCmp(const Instruction *I) { 1465 const CmpInst *CI = cast<CmpInst>(I); 1466 Type *Ty = CI->getOperand(0)->getType(); 1467 1468 // Get the compare predicate. 1469 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1470 1471 // We may not handle every CC for now. 1472 if (ARMPred == ARMCC::AL) return false; 1473 1474 // Emit the compare. 1475 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1476 return false; 1477 1478 // Now set a register based on the comparison. Explicitly set the predicates 1479 // here. 1480 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1481 TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass 1482 : ARM::GPRRegisterClass; 1483 unsigned DestReg = createResultReg(RC); 1484 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1485 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1486 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1487 unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR; 1488 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1489 .addReg(ZeroReg).addImm(1) 1490 .addImm(ARMPred).addReg(CondReg); 1491 1492 UpdateValueMap(I, DestReg); 1493 return true; 1494} 1495 1496bool ARMFastISel::SelectFPExt(const Instruction *I) { 1497 // Make sure we have VFP and that we're extending float to double. 1498 if (!Subtarget->hasVFP2()) return false; 1499 1500 Value *V = I->getOperand(0); 1501 if (!I->getType()->isDoubleTy() || 1502 !V->getType()->isFloatTy()) return false; 1503 1504 unsigned Op = getRegForValue(V); 1505 if (Op == 0) return false; 1506 1507 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1508 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1509 TII.get(ARM::VCVTDS), Result) 1510 .addReg(Op)); 1511 UpdateValueMap(I, Result); 1512 return true; 1513} 1514 1515bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1516 // Make sure we have VFP and that we're truncating double to float. 1517 if (!Subtarget->hasVFP2()) return false; 1518 1519 Value *V = I->getOperand(0); 1520 if (!(I->getType()->isFloatTy() && 1521 V->getType()->isDoubleTy())) return false; 1522 1523 unsigned Op = getRegForValue(V); 1524 if (Op == 0) return false; 1525 1526 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1527 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1528 TII.get(ARM::VCVTSD), Result) 1529 .addReg(Op)); 1530 UpdateValueMap(I, Result); 1531 return true; 1532} 1533 1534bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1535 // Make sure we have VFP. 1536 if (!Subtarget->hasVFP2()) return false; 1537 1538 MVT DstVT; 1539 Type *Ty = I->getType(); 1540 if (!isTypeLegal(Ty, DstVT)) 1541 return false; 1542 1543 Value *Src = I->getOperand(0); 1544 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1545 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1546 return false; 1547 1548 unsigned SrcReg = getRegForValue(Src); 1549 if (SrcReg == 0) return false; 1550 1551 // Handle sign-extension. 1552 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1553 EVT DestVT = MVT::i32; 1554 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, /*isZExt*/ false); 1555 if (ResultReg == 0) return false; 1556 SrcReg = ResultReg; 1557 } 1558 1559 // The conversion routine works on fp-reg to fp-reg and the operand above 1560 // was an integer, move it to the fp registers if possible. 1561 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1562 if (FP == 0) return false; 1563 1564 unsigned Opc; 1565 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1566 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1567 else return false; 1568 1569 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1570 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1571 ResultReg) 1572 .addReg(FP)); 1573 UpdateValueMap(I, ResultReg); 1574 return true; 1575} 1576 1577bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1578 // Make sure we have VFP. 1579 if (!Subtarget->hasVFP2()) return false; 1580 1581 MVT DstVT; 1582 Type *RetTy = I->getType(); 1583 if (!isTypeLegal(RetTy, DstVT)) 1584 return false; 1585 1586 unsigned Op = getRegForValue(I->getOperand(0)); 1587 if (Op == 0) return false; 1588 1589 unsigned Opc; 1590 Type *OpTy = I->getOperand(0)->getType(); 1591 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1592 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1593 else return false; 1594 1595 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1596 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1597 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1598 ResultReg) 1599 .addReg(Op)); 1600 1601 // This result needs to be in an integer register, but the conversion only 1602 // takes place in fp-regs. 1603 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1604 if (IntReg == 0) return false; 1605 1606 UpdateValueMap(I, IntReg); 1607 return true; 1608} 1609 1610bool ARMFastISel::SelectSelect(const Instruction *I) { 1611 MVT VT; 1612 if (!isTypeLegal(I->getType(), VT)) 1613 return false; 1614 1615 // Things need to be register sized for register moves. 1616 if (VT != MVT::i32) return false; 1617 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1618 1619 unsigned CondReg = getRegForValue(I->getOperand(0)); 1620 if (CondReg == 0) return false; 1621 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1622 if (Op1Reg == 0) return false; 1623 1624 // Check to see if we can use an immediate in the conditional move. 1625 int Imm = 0; 1626 bool UseImm = false; 1627 bool isNegativeImm = false; 1628 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1629 assert (VT == MVT::i32 && "Expecting an i32."); 1630 Imm = (int)ConstInt->getValue().getZExtValue(); 1631 if (Imm < 0) { 1632 isNegativeImm = true; 1633 Imm = ~Imm; 1634 } 1635 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1636 (ARM_AM::getSOImmVal(Imm) != -1); 1637 } 1638 1639 unsigned Op2Reg = 0; 1640 if (!UseImm) { 1641 Op2Reg = getRegForValue(I->getOperand(2)); 1642 if (Op2Reg == 0) return false; 1643 } 1644 1645 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1646 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1647 .addReg(CondReg).addImm(0)); 1648 1649 unsigned MovCCOpc; 1650 if (!UseImm) { 1651 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1652 } else { 1653 if (!isNegativeImm) { 1654 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1655 } else { 1656 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1657 } 1658 } 1659 unsigned ResultReg = createResultReg(RC); 1660 if (!UseImm) 1661 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1662 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1663 else 1664 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1665 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1666 UpdateValueMap(I, ResultReg); 1667 return true; 1668} 1669 1670bool ARMFastISel::SelectSDiv(const Instruction *I) { 1671 MVT VT; 1672 Type *Ty = I->getType(); 1673 if (!isTypeLegal(Ty, VT)) 1674 return false; 1675 1676 // If we have integer div support we should have selected this automagically. 1677 // In case we have a real miss go ahead and return false and we'll pick 1678 // it up later. 1679 if (Subtarget->hasDivide()) return false; 1680 1681 // Otherwise emit a libcall. 1682 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1683 if (VT == MVT::i8) 1684 LC = RTLIB::SDIV_I8; 1685 else if (VT == MVT::i16) 1686 LC = RTLIB::SDIV_I16; 1687 else if (VT == MVT::i32) 1688 LC = RTLIB::SDIV_I32; 1689 else if (VT == MVT::i64) 1690 LC = RTLIB::SDIV_I64; 1691 else if (VT == MVT::i128) 1692 LC = RTLIB::SDIV_I128; 1693 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1694 1695 return ARMEmitLibcall(I, LC); 1696} 1697 1698bool ARMFastISel::SelectSRem(const Instruction *I) { 1699 MVT VT; 1700 Type *Ty = I->getType(); 1701 if (!isTypeLegal(Ty, VT)) 1702 return false; 1703 1704 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1705 if (VT == MVT::i8) 1706 LC = RTLIB::SREM_I8; 1707 else if (VT == MVT::i16) 1708 LC = RTLIB::SREM_I16; 1709 else if (VT == MVT::i32) 1710 LC = RTLIB::SREM_I32; 1711 else if (VT == MVT::i64) 1712 LC = RTLIB::SREM_I64; 1713 else if (VT == MVT::i128) 1714 LC = RTLIB::SREM_I128; 1715 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1716 1717 return ARMEmitLibcall(I, LC); 1718} 1719 1720bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1721 EVT VT = TLI.getValueType(I->getType(), true); 1722 1723 // We can get here in the case when we want to use NEON for our fp 1724 // operations, but can't figure out how to. Just use the vfp instructions 1725 // if we have them. 1726 // FIXME: It'd be nice to use NEON instructions. 1727 Type *Ty = I->getType(); 1728 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1729 if (isFloat && !Subtarget->hasVFP2()) 1730 return false; 1731 1732 unsigned Opc; 1733 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1734 switch (ISDOpcode) { 1735 default: return false; 1736 case ISD::FADD: 1737 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1738 break; 1739 case ISD::FSUB: 1740 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1741 break; 1742 case ISD::FMUL: 1743 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1744 break; 1745 } 1746 unsigned Op1 = getRegForValue(I->getOperand(0)); 1747 if (Op1 == 0) return false; 1748 1749 unsigned Op2 = getRegForValue(I->getOperand(1)); 1750 if (Op2 == 0) return false; 1751 1752 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1753 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1754 TII.get(Opc), ResultReg) 1755 .addReg(Op1).addReg(Op2)); 1756 UpdateValueMap(I, ResultReg); 1757 return true; 1758} 1759 1760// Call Handling Code 1761 1762// This is largely taken directly from CCAssignFnForNode - we don't support 1763// varargs in FastISel so that part has been removed. 1764// TODO: We may not support all of this. 1765CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1766 switch (CC) { 1767 default: 1768 llvm_unreachable("Unsupported calling convention"); 1769 case CallingConv::Fast: 1770 // Ignore fastcc. Silence compiler warnings. 1771 (void)RetFastCC_ARM_APCS; 1772 (void)FastCC_ARM_APCS; 1773 // Fallthrough 1774 case CallingConv::C: 1775 // Use target triple & subtarget features to do actual dispatch. 1776 if (Subtarget->isAAPCS_ABI()) { 1777 if (Subtarget->hasVFP2() && 1778 TM.Options.FloatABIType == FloatABI::Hard) 1779 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1780 else 1781 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1782 } else 1783 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1784 case CallingConv::ARM_AAPCS_VFP: 1785 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1786 case CallingConv::ARM_AAPCS: 1787 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1788 case CallingConv::ARM_APCS: 1789 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1790 } 1791} 1792 1793bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1794 SmallVectorImpl<unsigned> &ArgRegs, 1795 SmallVectorImpl<MVT> &ArgVTs, 1796 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1797 SmallVectorImpl<unsigned> &RegArgs, 1798 CallingConv::ID CC, 1799 unsigned &NumBytes) { 1800 SmallVector<CCValAssign, 16> ArgLocs; 1801 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1802 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1803 1804 // Get a count of how many bytes are to be pushed on the stack. 1805 NumBytes = CCInfo.getNextStackOffset(); 1806 1807 // Issue CALLSEQ_START 1808 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1809 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1810 TII.get(AdjStackDown)) 1811 .addImm(NumBytes)); 1812 1813 // Process the args. 1814 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1815 CCValAssign &VA = ArgLocs[i]; 1816 unsigned Arg = ArgRegs[VA.getValNo()]; 1817 MVT ArgVT = ArgVTs[VA.getValNo()]; 1818 1819 // We don't handle NEON/vector parameters yet. 1820 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1821 return false; 1822 1823 // Handle arg promotion, etc. 1824 switch (VA.getLocInfo()) { 1825 case CCValAssign::Full: break; 1826 case CCValAssign::SExt: { 1827 MVT DestVT = VA.getLocVT(); 1828 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1829 /*isZExt*/false); 1830 assert (ResultReg != 0 && "Failed to emit a sext"); 1831 Arg = ResultReg; 1832 ArgVT = DestVT; 1833 break; 1834 } 1835 case CCValAssign::AExt: 1836 // Intentional fall-through. Handle AExt and ZExt. 1837 case CCValAssign::ZExt: { 1838 MVT DestVT = VA.getLocVT(); 1839 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1840 /*isZExt*/true); 1841 assert (ResultReg != 0 && "Failed to emit a sext"); 1842 Arg = ResultReg; 1843 ArgVT = DestVT; 1844 break; 1845 } 1846 case CCValAssign::BCvt: { 1847 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1848 /*TODO: Kill=*/false); 1849 assert(BC != 0 && "Failed to emit a bitcast!"); 1850 Arg = BC; 1851 ArgVT = VA.getLocVT(); 1852 break; 1853 } 1854 default: llvm_unreachable("Unknown arg promotion!"); 1855 } 1856 1857 // Now copy/store arg to correct locations. 1858 if (VA.isRegLoc() && !VA.needsCustom()) { 1859 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1860 VA.getLocReg()) 1861 .addReg(Arg); 1862 RegArgs.push_back(VA.getLocReg()); 1863 } else if (VA.needsCustom()) { 1864 // TODO: We need custom lowering for vector (v2f64) args. 1865 if (VA.getLocVT() != MVT::f64) return false; 1866 1867 CCValAssign &NextVA = ArgLocs[++i]; 1868 1869 // TODO: Only handle register args for now. 1870 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1871 1872 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1873 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1874 .addReg(NextVA.getLocReg(), RegState::Define) 1875 .addReg(Arg)); 1876 RegArgs.push_back(VA.getLocReg()); 1877 RegArgs.push_back(NextVA.getLocReg()); 1878 } else { 1879 assert(VA.isMemLoc()); 1880 // Need to store on the stack. 1881 Address Addr; 1882 Addr.BaseType = Address::RegBase; 1883 Addr.Base.Reg = ARM::SP; 1884 Addr.Offset = VA.getLocMemOffset(); 1885 1886 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1887 } 1888 } 1889 return true; 1890} 1891 1892bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1893 const Instruction *I, CallingConv::ID CC, 1894 unsigned &NumBytes) { 1895 // Issue CALLSEQ_END 1896 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1897 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1898 TII.get(AdjStackUp)) 1899 .addImm(NumBytes).addImm(0)); 1900 1901 // Now the return value. 1902 if (RetVT != MVT::isVoid) { 1903 SmallVector<CCValAssign, 16> RVLocs; 1904 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1905 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1906 1907 // Copy all of the result registers out of their specified physreg. 1908 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1909 // For this move we copy into two registers and then move into the 1910 // double fp reg we want. 1911 EVT DestVT = RVLocs[0].getValVT(); 1912 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1913 unsigned ResultReg = createResultReg(DstRC); 1914 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1915 TII.get(ARM::VMOVDRR), ResultReg) 1916 .addReg(RVLocs[0].getLocReg()) 1917 .addReg(RVLocs[1].getLocReg())); 1918 1919 UsedRegs.push_back(RVLocs[0].getLocReg()); 1920 UsedRegs.push_back(RVLocs[1].getLocReg()); 1921 1922 // Finally update the result. 1923 UpdateValueMap(I, ResultReg); 1924 } else { 1925 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1926 EVT CopyVT = RVLocs[0].getValVT(); 1927 1928 // Special handling for extended integers. 1929 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 1930 CopyVT = MVT::i32; 1931 1932 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1933 1934 unsigned ResultReg = createResultReg(DstRC); 1935 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1936 ResultReg).addReg(RVLocs[0].getLocReg()); 1937 UsedRegs.push_back(RVLocs[0].getLocReg()); 1938 1939 // Finally update the result. 1940 UpdateValueMap(I, ResultReg); 1941 } 1942 } 1943 1944 return true; 1945} 1946 1947bool ARMFastISel::SelectRet(const Instruction *I) { 1948 const ReturnInst *Ret = cast<ReturnInst>(I); 1949 const Function &F = *I->getParent()->getParent(); 1950 1951 if (!FuncInfo.CanLowerReturn) 1952 return false; 1953 1954 if (F.isVarArg()) 1955 return false; 1956 1957 CallingConv::ID CC = F.getCallingConv(); 1958 if (Ret->getNumOperands() > 0) { 1959 SmallVector<ISD::OutputArg, 4> Outs; 1960 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1961 Outs, TLI); 1962 1963 // Analyze operands of the call, assigning locations to each operand. 1964 SmallVector<CCValAssign, 16> ValLocs; 1965 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 1966 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1967 1968 const Value *RV = Ret->getOperand(0); 1969 unsigned Reg = getRegForValue(RV); 1970 if (Reg == 0) 1971 return false; 1972 1973 // Only handle a single return value for now. 1974 if (ValLocs.size() != 1) 1975 return false; 1976 1977 CCValAssign &VA = ValLocs[0]; 1978 1979 // Don't bother handling odd stuff for now. 1980 if (VA.getLocInfo() != CCValAssign::Full) 1981 return false; 1982 // Only handle register returns for now. 1983 if (!VA.isRegLoc()) 1984 return false; 1985 1986 unsigned SrcReg = Reg + VA.getValNo(); 1987 EVT RVVT = TLI.getValueType(RV->getType()); 1988 EVT DestVT = VA.getValVT(); 1989 // Special handling for extended integers. 1990 if (RVVT != DestVT) { 1991 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 1992 return false; 1993 1994 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt()) 1995 return false; 1996 1997 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 1998 1999 bool isZExt = Outs[0].Flags.isZExt(); 2000 unsigned ResultReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, isZExt); 2001 if (ResultReg == 0) return false; 2002 SrcReg = ResultReg; 2003 } 2004 2005 // Make the copy. 2006 unsigned DstReg = VA.getLocReg(); 2007 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2008 // Avoid a cross-class copy. This is very unlikely. 2009 if (!SrcRC->contains(DstReg)) 2010 return false; 2011 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2012 DstReg).addReg(SrcReg); 2013 2014 // Mark the register as live out of the function. 2015 MRI.addLiveOut(VA.getLocReg()); 2016 } 2017 2018 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2019 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2020 TII.get(RetOpc))); 2021 return true; 2022} 2023 2024unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 2025 2026 // iOS needs the r9 versions of the opcodes. 2027 bool isiOS = Subtarget->isTargetIOS(); 2028 if (isThumb2) { 2029 return isiOS ? ARM::tBLr9 : ARM::tBL; 2030 } else { 2031 return isiOS ? ARM::BLr9 : ARM::BL; 2032 } 2033} 2034 2035// A quick function that will emit a call for a named libcall in F with the 2036// vector of passed arguments for the Instruction in I. We can assume that we 2037// can emit a call for any libcall we can produce. This is an abridged version 2038// of the full call infrastructure since we won't need to worry about things 2039// like computed function pointers or strange arguments at call sites. 2040// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2041// with X86. 2042bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2043 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2044 2045 // Handle *simple* calls for now. 2046 Type *RetTy = I->getType(); 2047 MVT RetVT; 2048 if (RetTy->isVoidTy()) 2049 RetVT = MVT::isVoid; 2050 else if (!isTypeLegal(RetTy, RetVT)) 2051 return false; 2052 2053 // TODO: For now if we have long calls specified we don't handle the call. 2054 if (EnableARMLongCalls) return false; 2055 2056 // Set up the argument vectors. 2057 SmallVector<Value*, 8> Args; 2058 SmallVector<unsigned, 8> ArgRegs; 2059 SmallVector<MVT, 8> ArgVTs; 2060 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2061 Args.reserve(I->getNumOperands()); 2062 ArgRegs.reserve(I->getNumOperands()); 2063 ArgVTs.reserve(I->getNumOperands()); 2064 ArgFlags.reserve(I->getNumOperands()); 2065 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2066 Value *Op = I->getOperand(i); 2067 unsigned Arg = getRegForValue(Op); 2068 if (Arg == 0) return false; 2069 2070 Type *ArgTy = Op->getType(); 2071 MVT ArgVT; 2072 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2073 2074 ISD::ArgFlagsTy Flags; 2075 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2076 Flags.setOrigAlign(OriginalAlignment); 2077 2078 Args.push_back(Op); 2079 ArgRegs.push_back(Arg); 2080 ArgVTs.push_back(ArgVT); 2081 ArgFlags.push_back(Flags); 2082 } 2083 2084 // Handle the arguments now that we've gotten them. 2085 SmallVector<unsigned, 4> RegArgs; 2086 unsigned NumBytes; 2087 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2088 return false; 2089 2090 // Issue the call, BLr9 for iOS, BL otherwise. 2091 // TODO: Turn this into the table of arm call ops. 2092 MachineInstrBuilder MIB; 2093 unsigned CallOpc = ARMSelectCallOp(NULL); 2094 if(isThumb2) 2095 // Explicitly adding the predicate here. 2096 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2097 TII.get(CallOpc))) 2098 .addExternalSymbol(TLI.getLibcallName(Call)); 2099 else 2100 // Explicitly adding the predicate here. 2101 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2102 TII.get(CallOpc)) 2103 .addExternalSymbol(TLI.getLibcallName(Call))); 2104 2105 // Add implicit physical register uses to the call. 2106 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2107 MIB.addReg(RegArgs[i]); 2108 2109 // Finish off the call including any return values. 2110 SmallVector<unsigned, 4> UsedRegs; 2111 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2112 2113 // Set all unused physreg defs as dead. 2114 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2115 2116 return true; 2117} 2118 2119bool ARMFastISel::SelectCall(const Instruction *I, 2120 const char *IntrMemName = 0) { 2121 const CallInst *CI = cast<CallInst>(I); 2122 const Value *Callee = CI->getCalledValue(); 2123 2124 // Can't handle inline asm. 2125 if (isa<InlineAsm>(Callee)) return false; 2126 2127 // Only handle global variable Callees. 2128 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2129 if (!GV) 2130 return false; 2131 2132 // Check the calling convention. 2133 ImmutableCallSite CS(CI); 2134 CallingConv::ID CC = CS.getCallingConv(); 2135 2136 // TODO: Avoid some calling conventions? 2137 2138 // Let SDISel handle vararg functions. 2139 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2140 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2141 if (FTy->isVarArg()) 2142 return false; 2143 2144 // Handle *simple* calls for now. 2145 Type *RetTy = I->getType(); 2146 MVT RetVT; 2147 if (RetTy->isVoidTy()) 2148 RetVT = MVT::isVoid; 2149 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2150 RetVT != MVT::i8 && RetVT != MVT::i1) 2151 return false; 2152 2153 // TODO: For now if we have long calls specified we don't handle the call. 2154 if (EnableARMLongCalls) return false; 2155 2156 // Set up the argument vectors. 2157 SmallVector<Value*, 8> Args; 2158 SmallVector<unsigned, 8> ArgRegs; 2159 SmallVector<MVT, 8> ArgVTs; 2160 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2161 Args.reserve(CS.arg_size()); 2162 ArgRegs.reserve(CS.arg_size()); 2163 ArgVTs.reserve(CS.arg_size()); 2164 ArgFlags.reserve(CS.arg_size()); 2165 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2166 i != e; ++i) { 2167 // If we're lowering a memory intrinsic instead of a regular call, skip the 2168 // last two arguments, which shouldn't be passed to the underlying function. 2169 if (IntrMemName && e-i <= 2) 2170 break; 2171 2172 ISD::ArgFlagsTy Flags; 2173 unsigned AttrInd = i - CS.arg_begin() + 1; 2174 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2175 Flags.setSExt(); 2176 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2177 Flags.setZExt(); 2178 2179 // FIXME: Only handle *easy* calls for now. 2180 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2181 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2182 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2183 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2184 return false; 2185 2186 Type *ArgTy = (*i)->getType(); 2187 MVT ArgVT; 2188 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2189 ArgVT != MVT::i1) 2190 return false; 2191 2192 unsigned Arg = getRegForValue(*i); 2193 if (Arg == 0) 2194 return false; 2195 2196 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2197 Flags.setOrigAlign(OriginalAlignment); 2198 2199 Args.push_back(*i); 2200 ArgRegs.push_back(Arg); 2201 ArgVTs.push_back(ArgVT); 2202 ArgFlags.push_back(Flags); 2203 } 2204 2205 // Handle the arguments now that we've gotten them. 2206 SmallVector<unsigned, 4> RegArgs; 2207 unsigned NumBytes; 2208 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2209 return false; 2210 2211 // Issue the call, BLr9 for iOS, BL otherwise. 2212 // TODO: Turn this into the table of arm call ops. 2213 MachineInstrBuilder MIB; 2214 unsigned CallOpc = ARMSelectCallOp(GV); 2215 // Explicitly adding the predicate here. 2216 if(isThumb2) { 2217 // Explicitly adding the predicate here. 2218 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2219 TII.get(CallOpc))); 2220 if (!IntrMemName) 2221 MIB.addGlobalAddress(GV, 0, 0); 2222 else 2223 MIB.addExternalSymbol(IntrMemName, 0); 2224 } else { 2225 if (!IntrMemName) 2226 // Explicitly adding the predicate here. 2227 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2228 TII.get(CallOpc)) 2229 .addGlobalAddress(GV, 0, 0)); 2230 else 2231 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2232 TII.get(CallOpc)) 2233 .addExternalSymbol(IntrMemName, 0)); 2234 } 2235 2236 // Add implicit physical register uses to the call. 2237 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2238 MIB.addReg(RegArgs[i]); 2239 2240 // Finish off the call including any return values. 2241 SmallVector<unsigned, 4> UsedRegs; 2242 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2243 2244 // Set all unused physreg defs as dead. 2245 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2246 2247 return true; 2248} 2249 2250bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2251 return Len <= 16; 2252} 2253 2254bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len) { 2255 // Make sure we don't bloat code by inlining very large memcpy's. 2256 if (!ARMIsMemCpySmall(Len)) 2257 return false; 2258 2259 // We don't care about alignment here since we just emit integer accesses. 2260 while (Len) { 2261 MVT VT; 2262 if (Len >= 4) 2263 VT = MVT::i32; 2264 else if (Len >= 2) 2265 VT = MVT::i16; 2266 else { 2267 assert(Len == 1); 2268 VT = MVT::i8; 2269 } 2270 2271 bool RV; 2272 unsigned ResultReg; 2273 RV = ARMEmitLoad(VT, ResultReg, Src); 2274 assert (RV = true && "Should be able to handle this load."); 2275 RV = ARMEmitStore(VT, ResultReg, Dest); 2276 assert (RV = true && "Should be able to handle this store."); 2277 2278 unsigned Size = VT.getSizeInBits()/8; 2279 Len -= Size; 2280 Dest.Offset += Size; 2281 Src.Offset += Size; 2282 } 2283 2284 return true; 2285} 2286 2287bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2288 // FIXME: Handle more intrinsics. 2289 switch (I.getIntrinsicID()) { 2290 default: return false; 2291 case Intrinsic::memcpy: 2292 case Intrinsic::memmove: { 2293 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2294 // Don't handle volatile. 2295 if (MTI.isVolatile()) 2296 return false; 2297 2298 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2299 // we would emit dead code because we don't currently handle memmoves. 2300 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2301 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2302 // Small memcpy's are common enough that we want to do them without a call 2303 // if possible. 2304 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2305 if (ARMIsMemCpySmall(Len)) { 2306 Address Dest, Src; 2307 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2308 !ARMComputeAddress(MTI.getRawSource(), Src)) 2309 return false; 2310 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2311 return true; 2312 } 2313 } 2314 2315 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2316 return false; 2317 2318 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2319 return false; 2320 2321 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2322 return SelectCall(&I, IntrMemName); 2323 } 2324 case Intrinsic::memset: { 2325 const MemSetInst &MSI = cast<MemSetInst>(I); 2326 // Don't handle volatile. 2327 if (MSI.isVolatile()) 2328 return false; 2329 2330 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2331 return false; 2332 2333 if (MSI.getDestAddressSpace() > 255) 2334 return false; 2335 2336 return SelectCall(&I, "memset"); 2337 } 2338 } 2339 return false; 2340} 2341 2342bool ARMFastISel::SelectTrunc(const Instruction *I) { 2343 // The high bits for a type smaller than the register size are assumed to be 2344 // undefined. 2345 Value *Op = I->getOperand(0); 2346 2347 EVT SrcVT, DestVT; 2348 SrcVT = TLI.getValueType(Op->getType(), true); 2349 DestVT = TLI.getValueType(I->getType(), true); 2350 2351 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2352 return false; 2353 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2354 return false; 2355 2356 unsigned SrcReg = getRegForValue(Op); 2357 if (!SrcReg) return false; 2358 2359 // Because the high bits are undefined, a truncate doesn't generate 2360 // any code. 2361 UpdateValueMap(I, SrcReg); 2362 return true; 2363} 2364 2365unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2366 bool isZExt) { 2367 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2368 return 0; 2369 2370 unsigned Opc; 2371 bool isBoolZext = false; 2372 if (!SrcVT.isSimple()) return 0; 2373 switch (SrcVT.getSimpleVT().SimpleTy) { 2374 default: return 0; 2375 case MVT::i16: 2376 if (!Subtarget->hasV6Ops()) return 0; 2377 if (isZExt) 2378 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2379 else 2380 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2381 break; 2382 case MVT::i8: 2383 if (!Subtarget->hasV6Ops()) return 0; 2384 if (isZExt) 2385 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2386 else 2387 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2388 break; 2389 case MVT::i1: 2390 if (isZExt) { 2391 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2392 isBoolZext = true; 2393 break; 2394 } 2395 return 0; 2396 } 2397 2398 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2399 MachineInstrBuilder MIB; 2400 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2401 .addReg(SrcReg); 2402 if (isBoolZext) 2403 MIB.addImm(1); 2404 else 2405 MIB.addImm(0); 2406 AddOptionalDefs(MIB); 2407 return ResultReg; 2408} 2409 2410bool ARMFastISel::SelectIntExt(const Instruction *I) { 2411 // On ARM, in general, integer casts don't involve legal types; this code 2412 // handles promotable integers. 2413 Type *DestTy = I->getType(); 2414 Value *Src = I->getOperand(0); 2415 Type *SrcTy = Src->getType(); 2416 2417 EVT SrcVT, DestVT; 2418 SrcVT = TLI.getValueType(SrcTy, true); 2419 DestVT = TLI.getValueType(DestTy, true); 2420 2421 bool isZExt = isa<ZExtInst>(I); 2422 unsigned SrcReg = getRegForValue(Src); 2423 if (!SrcReg) return false; 2424 2425 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2426 if (ResultReg == 0) return false; 2427 UpdateValueMap(I, ResultReg); 2428 return true; 2429} 2430 2431// TODO: SoftFP support. 2432bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2433 2434 switch (I->getOpcode()) { 2435 case Instruction::Load: 2436 return SelectLoad(I); 2437 case Instruction::Store: 2438 return SelectStore(I); 2439 case Instruction::Br: 2440 return SelectBranch(I); 2441 case Instruction::ICmp: 2442 case Instruction::FCmp: 2443 return SelectCmp(I); 2444 case Instruction::FPExt: 2445 return SelectFPExt(I); 2446 case Instruction::FPTrunc: 2447 return SelectFPTrunc(I); 2448 case Instruction::SIToFP: 2449 return SelectSIToFP(I); 2450 case Instruction::FPToSI: 2451 return SelectFPToSI(I); 2452 case Instruction::FAdd: 2453 return SelectBinaryOp(I, ISD::FADD); 2454 case Instruction::FSub: 2455 return SelectBinaryOp(I, ISD::FSUB); 2456 case Instruction::FMul: 2457 return SelectBinaryOp(I, ISD::FMUL); 2458 case Instruction::SDiv: 2459 return SelectSDiv(I); 2460 case Instruction::SRem: 2461 return SelectSRem(I); 2462 case Instruction::Call: 2463 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2464 return SelectIntrinsicCall(*II); 2465 return SelectCall(I); 2466 case Instruction::Select: 2467 return SelectSelect(I); 2468 case Instruction::Ret: 2469 return SelectRet(I); 2470 case Instruction::Trunc: 2471 return SelectTrunc(I); 2472 case Instruction::ZExt: 2473 case Instruction::SExt: 2474 return SelectIntExt(I); 2475 default: break; 2476 } 2477 return false; 2478} 2479 2480/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2481/// vreg is being provided by the specified load instruction. If possible, 2482/// try to fold the load as an operand to the instruction, returning true if 2483/// successful. 2484bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2485 const LoadInst *LI) { 2486 // Verify we have a legal type before going any further. 2487 MVT VT; 2488 if (!isLoadTypeLegal(LI->getType(), VT)) 2489 return false; 2490 2491 // Combine load followed by zero- or sign-extend. 2492 // ldrb r1, [r0] ldrb r1, [r0] 2493 // uxtb r2, r1 => 2494 // mov r3, r2 mov r3, r1 2495 bool isZExt = true; 2496 switch(MI->getOpcode()) { 2497 default: return false; 2498 case ARM::SXTH: 2499 case ARM::t2SXTH: 2500 isZExt = false; 2501 case ARM::UXTH: 2502 case ARM::t2UXTH: 2503 if (VT != MVT::i16) 2504 return false; 2505 break; 2506 case ARM::SXTB: 2507 case ARM::t2SXTB: 2508 isZExt = false; 2509 case ARM::UXTB: 2510 case ARM::t2UXTB: 2511 if (VT != MVT::i8) 2512 return false; 2513 break; 2514 } 2515 // See if we can handle this address. 2516 Address Addr; 2517 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2518 2519 unsigned ResultReg = MI->getOperand(0).getReg(); 2520 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2521 return false; 2522 MI->eraseFromParent(); 2523 return true; 2524} 2525 2526namespace llvm { 2527 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2528 // Completely untested on non-iOS. 2529 const TargetMachine &TM = funcInfo.MF->getTarget(); 2530 2531 // Darwin and thumb1 only for now. 2532 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2533 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only() && 2534 !DisableARMFastISel) 2535 return new ARMFastISel(funcInfo); 2536 return 0; 2537 } 2538} 2539