ARMFastISel.cpp revision 2b3b335f2d2886bbffa005998972de689a9f3e21
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMTargetMachine.h" 20#include "ARMSubtarget.h" 21#include "ARMConstantPoolValue.h" 22#include "MCTargetDesc/ARMAddressingModes.h" 23#include "llvm/CallingConv.h" 24#include "llvm/DerivedTypes.h" 25#include "llvm/GlobalVariable.h" 26#include "llvm/Instructions.h" 27#include "llvm/IntrinsicInst.h" 28#include "llvm/Module.h" 29#include "llvm/Operator.h" 30#include "llvm/CodeGen/Analysis.h" 31#include "llvm/CodeGen/FastISel.h" 32#include "llvm/CodeGen/FunctionLoweringInfo.h" 33#include "llvm/CodeGen/MachineInstrBuilder.h" 34#include "llvm/CodeGen/MachineModuleInfo.h" 35#include "llvm/CodeGen/MachineConstantPool.h" 36#include "llvm/CodeGen/MachineFrameInfo.h" 37#include "llvm/CodeGen/MachineMemOperand.h" 38#include "llvm/CodeGen/MachineRegisterInfo.h" 39#include "llvm/Support/CallSite.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/ErrorHandling.h" 42#include "llvm/Support/GetElementPtrTypeIterator.h" 43#include "llvm/Target/TargetData.h" 44#include "llvm/Target/TargetInstrInfo.h" 45#include "llvm/Target/TargetLowering.h" 46#include "llvm/Target/TargetMachine.h" 47#include "llvm/Target/TargetOptions.h" 48using namespace llvm; 49 50extern cl::opt<bool> EnableARMLongCalls; 51 52namespace { 53 54 // All possible address modes, plus some. 55 typedef struct Address { 56 enum { 57 RegBase, 58 FrameIndexBase 59 } BaseType; 60 61 union { 62 unsigned Reg; 63 int FI; 64 } Base; 65 66 int Offset; 67 68 // Innocuous defaults for our address. 69 Address() 70 : BaseType(RegBase), Offset(0) { 71 Base.Reg = 0; 72 } 73 } Address; 74 75class ARMFastISel : public FastISel { 76 77 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 78 /// make the right decision when generating code for different targets. 79 const ARMSubtarget *Subtarget; 80 const TargetMachine &TM; 81 const TargetInstrInfo &TII; 82 const TargetLowering &TLI; 83 ARMFunctionInfo *AFI; 84 85 // Convenience variables to avoid some queries. 86 bool isThumb2; 87 LLVMContext *Context; 88 89 public: 90 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 91 : FastISel(funcInfo), 92 TM(funcInfo.MF->getTarget()), 93 TII(*TM.getInstrInfo()), 94 TLI(*TM.getTargetLowering()) { 95 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 96 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 97 isThumb2 = AFI->isThumbFunction(); 98 Context = &funcInfo.Fn->getContext(); 99 } 100 101 // Code from FastISel.cpp. 102 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 103 const TargetRegisterClass *RC); 104 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 105 const TargetRegisterClass *RC, 106 unsigned Op0, bool Op0IsKill); 107 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 108 const TargetRegisterClass *RC, 109 unsigned Op0, bool Op0IsKill, 110 unsigned Op1, bool Op1IsKill); 111 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill, 114 unsigned Op1, bool Op1IsKill, 115 unsigned Op2, bool Op2IsKill); 116 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 117 const TargetRegisterClass *RC, 118 unsigned Op0, bool Op0IsKill, 119 uint64_t Imm); 120 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 121 const TargetRegisterClass *RC, 122 unsigned Op0, bool Op0IsKill, 123 const ConstantFP *FPImm); 124 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 125 const TargetRegisterClass *RC, 126 unsigned Op0, bool Op0IsKill, 127 unsigned Op1, bool Op1IsKill, 128 uint64_t Imm); 129 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 130 const TargetRegisterClass *RC, 131 uint64_t Imm); 132 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 133 const TargetRegisterClass *RC, 134 uint64_t Imm1, uint64_t Imm2); 135 136 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 137 unsigned Op0, bool Op0IsKill, 138 uint32_t Idx); 139 140 // Backend specific FastISel code. 141 virtual bool TargetSelectInstruction(const Instruction *I); 142 virtual unsigned TargetMaterializeConstant(const Constant *C); 143 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 144 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 145 const LoadInst *LI); 146 147 #include "ARMGenFastISel.inc" 148 149 // Instruction selection routines. 150 private: 151 bool SelectLoad(const Instruction *I); 152 bool SelectStore(const Instruction *I); 153 bool SelectBranch(const Instruction *I); 154 bool SelectIndirectBr(const Instruction *I); 155 bool SelectCmp(const Instruction *I); 156 bool SelectFPExt(const Instruction *I); 157 bool SelectFPTrunc(const Instruction *I); 158 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode); 159 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode); 160 bool SelectIToFP(const Instruction *I, bool isSigned); 161 bool SelectFPToI(const Instruction *I, bool isSigned); 162 bool SelectDiv(const Instruction *I, bool isSigned); 163 bool SelectRem(const Instruction *I, bool isSigned); 164 bool SelectCall(const Instruction *I, const char *IntrMemName); 165 bool SelectIntrinsicCall(const IntrinsicInst &I); 166 bool SelectSelect(const Instruction *I); 167 bool SelectRet(const Instruction *I); 168 bool SelectTrunc(const Instruction *I); 169 bool SelectIntExt(const Instruction *I); 170 171 // Utility routines. 172 private: 173 bool isTypeLegal(Type *Ty, MVT &VT); 174 bool isLoadTypeLegal(Type *Ty, MVT &VT); 175 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 176 bool isZExt); 177 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 178 unsigned Alignment = 0, bool isZExt = true, 179 bool allocReg = true); 180 181 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 182 unsigned Alignment = 0); 183 bool ARMComputeAddress(const Value *Obj, Address &Addr); 184 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 185 bool ARMIsMemCpySmall(uint64_t Len); 186 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 187 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 188 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 189 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 190 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 191 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 192 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 193 unsigned ARMSelectCallOp(const GlobalValue *GV); 194 195 // Call handling routines. 196 private: 197 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 198 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 199 SmallVectorImpl<unsigned> &ArgRegs, 200 SmallVectorImpl<MVT> &ArgVTs, 201 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 202 SmallVectorImpl<unsigned> &RegArgs, 203 CallingConv::ID CC, 204 unsigned &NumBytes); 205 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 206 const Instruction *I, CallingConv::ID CC, 207 unsigned &NumBytes); 208 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 209 210 // OptionalDef handling routines. 211 private: 212 bool isARMNEONPred(const MachineInstr *MI); 213 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 214 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 215 void AddLoadStoreOperands(EVT VT, Address &Addr, 216 const MachineInstrBuilder &MIB, 217 unsigned Flags, bool useAM3); 218}; 219 220} // end anonymous namespace 221 222#include "ARMGenCallingConv.inc" 223 224// DefinesOptionalPredicate - This is different from DefinesPredicate in that 225// we don't care about implicit defs here, just places we'll need to add a 226// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 227bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 228 if (!MI->hasOptionalDef()) 229 return false; 230 231 // Look to see if our OptionalDef is defining CPSR or CCR. 232 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 233 const MachineOperand &MO = MI->getOperand(i); 234 if (!MO.isReg() || !MO.isDef()) continue; 235 if (MO.getReg() == ARM::CPSR) 236 *CPSR = true; 237 } 238 return true; 239} 240 241bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 242 const MCInstrDesc &MCID = MI->getDesc(); 243 244 // If we're a thumb2 or not NEON function we were handled via isPredicable. 245 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 246 AFI->isThumb2Function()) 247 return false; 248 249 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 250 if (MCID.OpInfo[i].isPredicate()) 251 return true; 252 253 return false; 254} 255 256// If the machine is predicable go ahead and add the predicate operands, if 257// it needs default CC operands add those. 258// TODO: If we want to support thumb1 then we'll need to deal with optional 259// CPSR defs that need to be added before the remaining operands. See s_cc_out 260// for descriptions why. 261const MachineInstrBuilder & 262ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 263 MachineInstr *MI = &*MIB; 264 265 // Do we use a predicate? or... 266 // Are we NEON in ARM mode and have a predicate operand? If so, I know 267 // we're not predicable but add it anyways. 268 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 269 AddDefaultPred(MIB); 270 271 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 272 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 273 bool CPSR = false; 274 if (DefinesOptionalPredicate(MI, &CPSR)) { 275 if (CPSR) 276 AddDefaultT1CC(MIB); 277 else 278 AddDefaultCC(MIB); 279 } 280 return MIB; 281} 282 283unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 284 const TargetRegisterClass* RC) { 285 unsigned ResultReg = createResultReg(RC); 286 const MCInstrDesc &II = TII.get(MachineInstOpcode); 287 288 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 289 return ResultReg; 290} 291 292unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 293 const TargetRegisterClass *RC, 294 unsigned Op0, bool Op0IsKill) { 295 unsigned ResultReg = createResultReg(RC); 296 const MCInstrDesc &II = TII.get(MachineInstOpcode); 297 298 if (II.getNumDefs() >= 1) { 299 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 300 .addReg(Op0, Op0IsKill * RegState::Kill)); 301 } else { 302 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 303 .addReg(Op0, Op0IsKill * RegState::Kill)); 304 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 305 TII.get(TargetOpcode::COPY), ResultReg) 306 .addReg(II.ImplicitDefs[0])); 307 } 308 return ResultReg; 309} 310 311unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 312 const TargetRegisterClass *RC, 313 unsigned Op0, bool Op0IsKill, 314 unsigned Op1, bool Op1IsKill) { 315 unsigned ResultReg = createResultReg(RC); 316 const MCInstrDesc &II = TII.get(MachineInstOpcode); 317 318 if (II.getNumDefs() >= 1) { 319 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 320 .addReg(Op0, Op0IsKill * RegState::Kill) 321 .addReg(Op1, Op1IsKill * RegState::Kill)); 322 } else { 323 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 324 .addReg(Op0, Op0IsKill * RegState::Kill) 325 .addReg(Op1, Op1IsKill * RegState::Kill)); 326 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 327 TII.get(TargetOpcode::COPY), ResultReg) 328 .addReg(II.ImplicitDefs[0])); 329 } 330 return ResultReg; 331} 332 333unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 334 const TargetRegisterClass *RC, 335 unsigned Op0, bool Op0IsKill, 336 unsigned Op1, bool Op1IsKill, 337 unsigned Op2, bool Op2IsKill) { 338 unsigned ResultReg = createResultReg(RC); 339 const MCInstrDesc &II = TII.get(MachineInstOpcode); 340 341 if (II.getNumDefs() >= 1) { 342 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 343 .addReg(Op0, Op0IsKill * RegState::Kill) 344 .addReg(Op1, Op1IsKill * RegState::Kill) 345 .addReg(Op2, Op2IsKill * RegState::Kill)); 346 } else { 347 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 348 .addReg(Op0, Op0IsKill * RegState::Kill) 349 .addReg(Op1, Op1IsKill * RegState::Kill) 350 .addReg(Op2, Op2IsKill * RegState::Kill)); 351 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 352 TII.get(TargetOpcode::COPY), ResultReg) 353 .addReg(II.ImplicitDefs[0])); 354 } 355 return ResultReg; 356} 357 358unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 359 const TargetRegisterClass *RC, 360 unsigned Op0, bool Op0IsKill, 361 uint64_t Imm) { 362 unsigned ResultReg = createResultReg(RC); 363 const MCInstrDesc &II = TII.get(MachineInstOpcode); 364 365 if (II.getNumDefs() >= 1) { 366 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 367 .addReg(Op0, Op0IsKill * RegState::Kill) 368 .addImm(Imm)); 369 } else { 370 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 371 .addReg(Op0, Op0IsKill * RegState::Kill) 372 .addImm(Imm)); 373 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 374 TII.get(TargetOpcode::COPY), ResultReg) 375 .addReg(II.ImplicitDefs[0])); 376 } 377 return ResultReg; 378} 379 380unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 381 const TargetRegisterClass *RC, 382 unsigned Op0, bool Op0IsKill, 383 const ConstantFP *FPImm) { 384 unsigned ResultReg = createResultReg(RC); 385 const MCInstrDesc &II = TII.get(MachineInstOpcode); 386 387 if (II.getNumDefs() >= 1) { 388 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 389 .addReg(Op0, Op0IsKill * RegState::Kill) 390 .addFPImm(FPImm)); 391 } else { 392 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 393 .addReg(Op0, Op0IsKill * RegState::Kill) 394 .addFPImm(FPImm)); 395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 396 TII.get(TargetOpcode::COPY), ResultReg) 397 .addReg(II.ImplicitDefs[0])); 398 } 399 return ResultReg; 400} 401 402unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 403 const TargetRegisterClass *RC, 404 unsigned Op0, bool Op0IsKill, 405 unsigned Op1, bool Op1IsKill, 406 uint64_t Imm) { 407 unsigned ResultReg = createResultReg(RC); 408 const MCInstrDesc &II = TII.get(MachineInstOpcode); 409 410 if (II.getNumDefs() >= 1) { 411 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 412 .addReg(Op0, Op0IsKill * RegState::Kill) 413 .addReg(Op1, Op1IsKill * RegState::Kill) 414 .addImm(Imm)); 415 } else { 416 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 417 .addReg(Op0, Op0IsKill * RegState::Kill) 418 .addReg(Op1, Op1IsKill * RegState::Kill) 419 .addImm(Imm)); 420 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 421 TII.get(TargetOpcode::COPY), ResultReg) 422 .addReg(II.ImplicitDefs[0])); 423 } 424 return ResultReg; 425} 426 427unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 428 const TargetRegisterClass *RC, 429 uint64_t Imm) { 430 unsigned ResultReg = createResultReg(RC); 431 const MCInstrDesc &II = TII.get(MachineInstOpcode); 432 433 if (II.getNumDefs() >= 1) { 434 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 435 .addImm(Imm)); 436 } else { 437 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 438 .addImm(Imm)); 439 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 440 TII.get(TargetOpcode::COPY), ResultReg) 441 .addReg(II.ImplicitDefs[0])); 442 } 443 return ResultReg; 444} 445 446unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 447 const TargetRegisterClass *RC, 448 uint64_t Imm1, uint64_t Imm2) { 449 unsigned ResultReg = createResultReg(RC); 450 const MCInstrDesc &II = TII.get(MachineInstOpcode); 451 452 if (II.getNumDefs() >= 1) { 453 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 454 .addImm(Imm1).addImm(Imm2)); 455 } else { 456 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 457 .addImm(Imm1).addImm(Imm2)); 458 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 459 TII.get(TargetOpcode::COPY), 460 ResultReg) 461 .addReg(II.ImplicitDefs[0])); 462 } 463 return ResultReg; 464} 465 466unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 467 unsigned Op0, bool Op0IsKill, 468 uint32_t Idx) { 469 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 470 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 471 "Cannot yet extract from physregs"); 472 473 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 474 DL, TII.get(TargetOpcode::COPY), ResultReg) 475 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 476 return ResultReg; 477} 478 479// TODO: Don't worry about 64-bit now, but when this is fixed remove the 480// checks from the various callers. 481unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 482 if (VT == MVT::f64) return 0; 483 484 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 485 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 486 TII.get(ARM::VMOVSR), MoveReg) 487 .addReg(SrcReg)); 488 return MoveReg; 489} 490 491unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 492 if (VT == MVT::i64) return 0; 493 494 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 495 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 496 TII.get(ARM::VMOVRS), MoveReg) 497 .addReg(SrcReg)); 498 return MoveReg; 499} 500 501// For double width floating point we need to materialize two constants 502// (the high and the low) into integer registers then use a move to get 503// the combined constant into an FP reg. 504unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 505 const APFloat Val = CFP->getValueAPF(); 506 bool is64bit = VT == MVT::f64; 507 508 // This checks to see if we can use VFP3 instructions to materialize 509 // a constant, otherwise we have to go through the constant pool. 510 if (TLI.isFPImmLegal(Val, VT)) { 511 int Imm; 512 unsigned Opc; 513 if (is64bit) { 514 Imm = ARM_AM::getFP64Imm(Val); 515 Opc = ARM::FCONSTD; 516 } else { 517 Imm = ARM_AM::getFP32Imm(Val); 518 Opc = ARM::FCONSTS; 519 } 520 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 521 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 522 DestReg) 523 .addImm(Imm)); 524 return DestReg; 525 } 526 527 // Require VFP2 for loading fp constants. 528 if (!Subtarget->hasVFP2()) return false; 529 530 // MachineConstantPool wants an explicit alignment. 531 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 532 if (Align == 0) { 533 // TODO: Figure out if this is correct. 534 Align = TD.getTypeAllocSize(CFP->getType()); 535 } 536 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 537 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 538 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 539 540 // The extra reg is for addrmode5. 541 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 542 DestReg) 543 .addConstantPoolIndex(Idx) 544 .addReg(0)); 545 return DestReg; 546} 547 548unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 549 550 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 551 return false; 552 553 // If we can do this in a single instruction without a constant pool entry 554 // do so now. 555 const ConstantInt *CI = cast<ConstantInt>(C); 556 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 557 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 558 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 559 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 560 TII.get(Opc), ImmReg) 561 .addImm(CI->getZExtValue())); 562 return ImmReg; 563 } 564 565 // Use MVN to emit negative constants. 566 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 567 unsigned Imm = (unsigned)~(CI->getSExtValue()); 568 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 569 (ARM_AM::getSOImmVal(Imm) != -1); 570 if (UseImm) { 571 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 572 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 573 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 574 TII.get(Opc), ImmReg) 575 .addImm(Imm)); 576 return ImmReg; 577 } 578 } 579 580 // Load from constant pool. For now 32-bit only. 581 if (VT != MVT::i32) 582 return false; 583 584 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 585 586 // MachineConstantPool wants an explicit alignment. 587 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 588 if (Align == 0) { 589 // TODO: Figure out if this is correct. 590 Align = TD.getTypeAllocSize(C->getType()); 591 } 592 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 593 594 if (isThumb2) 595 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 596 TII.get(ARM::t2LDRpci), DestReg) 597 .addConstantPoolIndex(Idx)); 598 else 599 // The extra immediate is for addrmode2. 600 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 601 TII.get(ARM::LDRcp), DestReg) 602 .addConstantPoolIndex(Idx) 603 .addImm(0)); 604 605 return DestReg; 606} 607 608unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 609 // For now 32-bit only. 610 if (VT != MVT::i32) return 0; 611 612 Reloc::Model RelocM = TM.getRelocationModel(); 613 614 // TODO: Need more magic for ARM PIC. 615 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 616 617 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 618 619 // Use movw+movt when possible, it avoids constant pool entries. 620 // Darwin targets don't support movt with Reloc::Static, see 621 // ARMTargetLowering::LowerGlobalAddressDarwin. Other targets only support 622 // static movt relocations. 623 if (Subtarget->useMovt() && 624 Subtarget->isTargetDarwin() == (RelocM != Reloc::Static)) { 625 unsigned Opc; 626 switch (RelocM) { 627 case Reloc::PIC_: 628 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel; 629 break; 630 case Reloc::DynamicNoPIC: 631 Opc = isThumb2 ? ARM::t2MOV_ga_dyn : ARM::MOV_ga_dyn; 632 break; 633 default: 634 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm; 635 break; 636 } 637 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 638 DestReg).addGlobalAddress(GV)); 639 } else { 640 // MachineConstantPool wants an explicit alignment. 641 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 642 if (Align == 0) { 643 // TODO: Figure out if this is correct. 644 Align = TD.getTypeAllocSize(GV->getType()); 645 } 646 647 // Grab index. 648 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : 649 (Subtarget->isThumb() ? 4 : 8); 650 unsigned Id = AFI->createPICLabelUId(); 651 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 652 ARMCP::CPValue, 653 PCAdj); 654 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 655 656 // Load value. 657 MachineInstrBuilder MIB; 658 if (isThumb2) { 659 unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 660 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 661 .addConstantPoolIndex(Idx); 662 if (RelocM == Reloc::PIC_) 663 MIB.addImm(Id); 664 } else { 665 // The extra immediate is for addrmode2. 666 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 667 DestReg) 668 .addConstantPoolIndex(Idx) 669 .addImm(0); 670 } 671 AddOptionalDefs(MIB); 672 } 673 674 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 675 MachineInstrBuilder MIB; 676 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 677 if (isThumb2) 678 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 679 TII.get(ARM::t2LDRi12), NewDestReg) 680 .addReg(DestReg) 681 .addImm(0); 682 else 683 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 684 NewDestReg) 685 .addReg(DestReg) 686 .addImm(0); 687 DestReg = NewDestReg; 688 AddOptionalDefs(MIB); 689 } 690 691 return DestReg; 692} 693 694unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 695 EVT VT = TLI.getValueType(C->getType(), true); 696 697 // Only handle simple types. 698 if (!VT.isSimple()) return 0; 699 700 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 701 return ARMMaterializeFP(CFP, VT); 702 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 703 return ARMMaterializeGV(GV, VT); 704 else if (isa<ConstantInt>(C)) 705 return ARMMaterializeInt(C, VT); 706 707 return 0; 708} 709 710// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 711 712unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 713 // Don't handle dynamic allocas. 714 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 715 716 MVT VT; 717 if (!isLoadTypeLegal(AI->getType(), VT)) return 0; 718 719 DenseMap<const AllocaInst*, int>::iterator SI = 720 FuncInfo.StaticAllocaMap.find(AI); 721 722 // This will get lowered later into the correct offsets and registers 723 // via rewriteXFrameIndex. 724 if (SI != FuncInfo.StaticAllocaMap.end()) { 725 const TargetRegisterClass* RC = TLI.getRegClassFor(VT); 726 unsigned ResultReg = createResultReg(RC); 727 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 728 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 729 TII.get(Opc), ResultReg) 730 .addFrameIndex(SI->second) 731 .addImm(0)); 732 return ResultReg; 733 } 734 735 return 0; 736} 737 738bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 739 EVT evt = TLI.getValueType(Ty, true); 740 741 // Only handle simple types. 742 if (evt == MVT::Other || !evt.isSimple()) return false; 743 VT = evt.getSimpleVT(); 744 745 // Handle all legal types, i.e. a register that will directly hold this 746 // value. 747 return TLI.isTypeLegal(VT); 748} 749 750bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 751 if (isTypeLegal(Ty, VT)) return true; 752 753 // If this is a type than can be sign or zero-extended to a basic operation 754 // go ahead and accept it now. 755 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 756 return true; 757 758 return false; 759} 760 761// Computes the address to get to an object. 762bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 763 // Some boilerplate from the X86 FastISel. 764 const User *U = NULL; 765 unsigned Opcode = Instruction::UserOp1; 766 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 767 // Don't walk into other basic blocks unless the object is an alloca from 768 // another block, otherwise it may not have a virtual register assigned. 769 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 770 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 771 Opcode = I->getOpcode(); 772 U = I; 773 } 774 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 775 Opcode = C->getOpcode(); 776 U = C; 777 } 778 779 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 780 if (Ty->getAddressSpace() > 255) 781 // Fast instruction selection doesn't support the special 782 // address spaces. 783 return false; 784 785 switch (Opcode) { 786 default: 787 break; 788 case Instruction::BitCast: { 789 // Look through bitcasts. 790 return ARMComputeAddress(U->getOperand(0), Addr); 791 } 792 case Instruction::IntToPtr: { 793 // Look past no-op inttoptrs. 794 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 795 return ARMComputeAddress(U->getOperand(0), Addr); 796 break; 797 } 798 case Instruction::PtrToInt: { 799 // Look past no-op ptrtoints. 800 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 801 return ARMComputeAddress(U->getOperand(0), Addr); 802 break; 803 } 804 case Instruction::GetElementPtr: { 805 Address SavedAddr = Addr; 806 int TmpOffset = Addr.Offset; 807 808 // Iterate through the GEP folding the constants into offsets where 809 // we can. 810 gep_type_iterator GTI = gep_type_begin(U); 811 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 812 i != e; ++i, ++GTI) { 813 const Value *Op = *i; 814 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 815 const StructLayout *SL = TD.getStructLayout(STy); 816 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 817 TmpOffset += SL->getElementOffset(Idx); 818 } else { 819 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 820 for (;;) { 821 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 822 // Constant-offset addressing. 823 TmpOffset += CI->getSExtValue() * S; 824 break; 825 } 826 if (isa<AddOperator>(Op) && 827 (!isa<Instruction>(Op) || 828 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 829 == FuncInfo.MBB) && 830 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 831 // An add (in the same block) with a constant operand. Fold the 832 // constant. 833 ConstantInt *CI = 834 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 835 TmpOffset += CI->getSExtValue() * S; 836 // Iterate on the other operand. 837 Op = cast<AddOperator>(Op)->getOperand(0); 838 continue; 839 } 840 // Unsupported 841 goto unsupported_gep; 842 } 843 } 844 } 845 846 // Try to grab the base operand now. 847 Addr.Offset = TmpOffset; 848 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 849 850 // We failed, restore everything and try the other options. 851 Addr = SavedAddr; 852 853 unsupported_gep: 854 break; 855 } 856 case Instruction::Alloca: { 857 const AllocaInst *AI = cast<AllocaInst>(Obj); 858 DenseMap<const AllocaInst*, int>::iterator SI = 859 FuncInfo.StaticAllocaMap.find(AI); 860 if (SI != FuncInfo.StaticAllocaMap.end()) { 861 Addr.BaseType = Address::FrameIndexBase; 862 Addr.Base.FI = SI->second; 863 return true; 864 } 865 break; 866 } 867 } 868 869 // Try to get this in a register if nothing else has worked. 870 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 871 return Addr.Base.Reg != 0; 872} 873 874void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 875 876 assert(VT.isSimple() && "Non-simple types are invalid here!"); 877 878 bool needsLowering = false; 879 switch (VT.getSimpleVT().SimpleTy) { 880 default: llvm_unreachable("Unhandled load/store type!"); 881 case MVT::i1: 882 case MVT::i8: 883 case MVT::i16: 884 case MVT::i32: 885 if (!useAM3) { 886 // Integer loads/stores handle 12-bit offsets. 887 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 888 // Handle negative offsets. 889 if (needsLowering && isThumb2) 890 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 891 Addr.Offset > -256); 892 } else { 893 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 894 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 895 } 896 break; 897 case MVT::f32: 898 case MVT::f64: 899 // Floating point operands handle 8-bit offsets. 900 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 901 break; 902 } 903 904 // If this is a stack pointer and the offset needs to be simplified then 905 // put the alloca address into a register, set the base type back to 906 // register and continue. This should almost never happen. 907 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 908 const TargetRegisterClass *RC = isThumb2 ? 909 (const TargetRegisterClass*)&ARM::tGPRRegClass : 910 (const TargetRegisterClass*)&ARM::GPRRegClass; 911 unsigned ResultReg = createResultReg(RC); 912 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 913 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 914 TII.get(Opc), ResultReg) 915 .addFrameIndex(Addr.Base.FI) 916 .addImm(0)); 917 Addr.Base.Reg = ResultReg; 918 Addr.BaseType = Address::RegBase; 919 } 920 921 // Since the offset is too large for the load/store instruction 922 // get the reg+offset into a register. 923 if (needsLowering) { 924 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 925 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 926 Addr.Offset = 0; 927 } 928} 929 930void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 931 const MachineInstrBuilder &MIB, 932 unsigned Flags, bool useAM3) { 933 // addrmode5 output depends on the selection dag addressing dividing the 934 // offset by 4 that it then later multiplies. Do this here as well. 935 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 936 VT.getSimpleVT().SimpleTy == MVT::f64) 937 Addr.Offset /= 4; 938 939 // Frame base works a bit differently. Handle it separately. 940 if (Addr.BaseType == Address::FrameIndexBase) { 941 int FI = Addr.Base.FI; 942 int Offset = Addr.Offset; 943 MachineMemOperand *MMO = 944 FuncInfo.MF->getMachineMemOperand( 945 MachinePointerInfo::getFixedStack(FI, Offset), 946 Flags, 947 MFI.getObjectSize(FI), 948 MFI.getObjectAlignment(FI)); 949 // Now add the rest of the operands. 950 MIB.addFrameIndex(FI); 951 952 // ARM halfword load/stores and signed byte loads need an additional 953 // operand. 954 if (useAM3) { 955 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 956 MIB.addReg(0); 957 MIB.addImm(Imm); 958 } else { 959 MIB.addImm(Addr.Offset); 960 } 961 MIB.addMemOperand(MMO); 962 } else { 963 // Now add the rest of the operands. 964 MIB.addReg(Addr.Base.Reg); 965 966 // ARM halfword load/stores and signed byte loads need an additional 967 // operand. 968 if (useAM3) { 969 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 970 MIB.addReg(0); 971 MIB.addImm(Imm); 972 } else { 973 MIB.addImm(Addr.Offset); 974 } 975 } 976 AddOptionalDefs(MIB); 977} 978 979bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 980 unsigned Alignment, bool isZExt, bool allocReg) { 981 assert(VT.isSimple() && "Non-simple types are invalid here!"); 982 unsigned Opc; 983 bool useAM3 = false; 984 bool needVMOV = false; 985 const TargetRegisterClass *RC; 986 switch (VT.getSimpleVT().SimpleTy) { 987 // This is mostly going to be Neon/vector support. 988 default: return false; 989 case MVT::i1: 990 case MVT::i8: 991 if (isThumb2) { 992 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 993 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 994 else 995 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 996 } else { 997 if (isZExt) { 998 Opc = ARM::LDRBi12; 999 } else { 1000 Opc = ARM::LDRSB; 1001 useAM3 = true; 1002 } 1003 } 1004 RC = &ARM::GPRRegClass; 1005 break; 1006 case MVT::i16: 1007 if (isThumb2) { 1008 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1009 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 1010 else 1011 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1012 } else { 1013 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1014 useAM3 = true; 1015 } 1016 RC = &ARM::GPRRegClass; 1017 break; 1018 case MVT::i32: 1019 if (isThumb2) { 1020 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1021 Opc = ARM::t2LDRi8; 1022 else 1023 Opc = ARM::t2LDRi12; 1024 } else { 1025 Opc = ARM::LDRi12; 1026 } 1027 RC = &ARM::GPRRegClass; 1028 break; 1029 case MVT::f32: 1030 if (!Subtarget->hasVFP2()) return false; 1031 // Unaligned loads need special handling. Floats require word-alignment. 1032 if (Alignment && Alignment < 4) { 1033 needVMOV = true; 1034 VT = MVT::i32; 1035 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1036 RC = &ARM::GPRRegClass; 1037 } else { 1038 Opc = ARM::VLDRS; 1039 RC = TLI.getRegClassFor(VT); 1040 } 1041 break; 1042 case MVT::f64: 1043 if (!Subtarget->hasVFP2()) return false; 1044 // FIXME: Unaligned loads need special handling. Doublewords require 1045 // word-alignment. 1046 if (Alignment && Alignment < 4) 1047 return false; 1048 1049 Opc = ARM::VLDRD; 1050 RC = TLI.getRegClassFor(VT); 1051 break; 1052 } 1053 // Simplify this down to something we can handle. 1054 ARMSimplifyAddress(Addr, VT, useAM3); 1055 1056 // Create the base instruction, then add the operands. 1057 if (allocReg) 1058 ResultReg = createResultReg(RC); 1059 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1060 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1061 TII.get(Opc), ResultReg); 1062 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1063 1064 // If we had an unaligned load of a float we've converted it to an regular 1065 // load. Now we must move from the GRP to the FP register. 1066 if (needVMOV) { 1067 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1068 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1069 TII.get(ARM::VMOVSR), MoveReg) 1070 .addReg(ResultReg)); 1071 ResultReg = MoveReg; 1072 } 1073 return true; 1074} 1075 1076bool ARMFastISel::SelectLoad(const Instruction *I) { 1077 // Atomic loads need special handling. 1078 if (cast<LoadInst>(I)->isAtomic()) 1079 return false; 1080 1081 // Verify we have a legal type before going any further. 1082 MVT VT; 1083 if (!isLoadTypeLegal(I->getType(), VT)) 1084 return false; 1085 1086 // See if we can handle this address. 1087 Address Addr; 1088 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1089 1090 unsigned ResultReg; 1091 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1092 return false; 1093 UpdateValueMap(I, ResultReg); 1094 return true; 1095} 1096 1097bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1098 unsigned Alignment) { 1099 unsigned StrOpc; 1100 bool useAM3 = false; 1101 switch (VT.getSimpleVT().SimpleTy) { 1102 // This is mostly going to be Neon/vector support. 1103 default: return false; 1104 case MVT::i1: { 1105 unsigned Res = createResultReg(isThumb2 ? 1106 (const TargetRegisterClass*)&ARM::tGPRRegClass : 1107 (const TargetRegisterClass*)&ARM::GPRRegClass); 1108 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1109 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1110 TII.get(Opc), Res) 1111 .addReg(SrcReg).addImm(1)); 1112 SrcReg = Res; 1113 } // Fallthrough here. 1114 case MVT::i8: 1115 if (isThumb2) { 1116 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1117 StrOpc = ARM::t2STRBi8; 1118 else 1119 StrOpc = ARM::t2STRBi12; 1120 } else { 1121 StrOpc = ARM::STRBi12; 1122 } 1123 break; 1124 case MVT::i16: 1125 if (isThumb2) { 1126 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1127 StrOpc = ARM::t2STRHi8; 1128 else 1129 StrOpc = ARM::t2STRHi12; 1130 } else { 1131 StrOpc = ARM::STRH; 1132 useAM3 = true; 1133 } 1134 break; 1135 case MVT::i32: 1136 if (isThumb2) { 1137 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1138 StrOpc = ARM::t2STRi8; 1139 else 1140 StrOpc = ARM::t2STRi12; 1141 } else { 1142 StrOpc = ARM::STRi12; 1143 } 1144 break; 1145 case MVT::f32: 1146 if (!Subtarget->hasVFP2()) return false; 1147 // Unaligned stores need special handling. Floats require word-alignment. 1148 if (Alignment && Alignment < 4) { 1149 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1150 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1151 TII.get(ARM::VMOVRS), MoveReg) 1152 .addReg(SrcReg)); 1153 SrcReg = MoveReg; 1154 VT = MVT::i32; 1155 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1156 } else { 1157 StrOpc = ARM::VSTRS; 1158 } 1159 break; 1160 case MVT::f64: 1161 if (!Subtarget->hasVFP2()) return false; 1162 // FIXME: Unaligned stores need special handling. Doublewords require 1163 // word-alignment. 1164 if (Alignment && Alignment < 4) 1165 return false; 1166 1167 StrOpc = ARM::VSTRD; 1168 break; 1169 } 1170 // Simplify this down to something we can handle. 1171 ARMSimplifyAddress(Addr, VT, useAM3); 1172 1173 // Create the base instruction, then add the operands. 1174 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1175 TII.get(StrOpc)) 1176 .addReg(SrcReg); 1177 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1178 return true; 1179} 1180 1181bool ARMFastISel::SelectStore(const Instruction *I) { 1182 Value *Op0 = I->getOperand(0); 1183 unsigned SrcReg = 0; 1184 1185 // Atomic stores need special handling. 1186 if (cast<StoreInst>(I)->isAtomic()) 1187 return false; 1188 1189 // Verify we have a legal type before going any further. 1190 MVT VT; 1191 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1192 return false; 1193 1194 // Get the value to be stored into a register. 1195 SrcReg = getRegForValue(Op0); 1196 if (SrcReg == 0) return false; 1197 1198 // See if we can handle this address. 1199 Address Addr; 1200 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1201 return false; 1202 1203 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1204 return false; 1205 return true; 1206} 1207 1208static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1209 switch (Pred) { 1210 // Needs two compares... 1211 case CmpInst::FCMP_ONE: 1212 case CmpInst::FCMP_UEQ: 1213 default: 1214 // AL is our "false" for now. The other two need more compares. 1215 return ARMCC::AL; 1216 case CmpInst::ICMP_EQ: 1217 case CmpInst::FCMP_OEQ: 1218 return ARMCC::EQ; 1219 case CmpInst::ICMP_SGT: 1220 case CmpInst::FCMP_OGT: 1221 return ARMCC::GT; 1222 case CmpInst::ICMP_SGE: 1223 case CmpInst::FCMP_OGE: 1224 return ARMCC::GE; 1225 case CmpInst::ICMP_UGT: 1226 case CmpInst::FCMP_UGT: 1227 return ARMCC::HI; 1228 case CmpInst::FCMP_OLT: 1229 return ARMCC::MI; 1230 case CmpInst::ICMP_ULE: 1231 case CmpInst::FCMP_OLE: 1232 return ARMCC::LS; 1233 case CmpInst::FCMP_ORD: 1234 return ARMCC::VC; 1235 case CmpInst::FCMP_UNO: 1236 return ARMCC::VS; 1237 case CmpInst::FCMP_UGE: 1238 return ARMCC::PL; 1239 case CmpInst::ICMP_SLT: 1240 case CmpInst::FCMP_ULT: 1241 return ARMCC::LT; 1242 case CmpInst::ICMP_SLE: 1243 case CmpInst::FCMP_ULE: 1244 return ARMCC::LE; 1245 case CmpInst::FCMP_UNE: 1246 case CmpInst::ICMP_NE: 1247 return ARMCC::NE; 1248 case CmpInst::ICMP_UGE: 1249 return ARMCC::HS; 1250 case CmpInst::ICMP_ULT: 1251 return ARMCC::LO; 1252 } 1253} 1254 1255bool ARMFastISel::SelectBranch(const Instruction *I) { 1256 const BranchInst *BI = cast<BranchInst>(I); 1257 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1258 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1259 1260 // Simple branch support. 1261 1262 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1263 // behavior. 1264 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1265 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1266 1267 // Get the compare predicate. 1268 // Try to take advantage of fallthrough opportunities. 1269 CmpInst::Predicate Predicate = CI->getPredicate(); 1270 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1271 std::swap(TBB, FBB); 1272 Predicate = CmpInst::getInversePredicate(Predicate); 1273 } 1274 1275 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1276 1277 // We may not handle every CC for now. 1278 if (ARMPred == ARMCC::AL) return false; 1279 1280 // Emit the compare. 1281 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1282 return false; 1283 1284 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1285 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1286 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1287 FastEmitBranch(FBB, DL); 1288 FuncInfo.MBB->addSuccessor(TBB); 1289 return true; 1290 } 1291 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1292 MVT SourceVT; 1293 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1294 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1295 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1296 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1297 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1298 TII.get(TstOpc)) 1299 .addReg(OpReg).addImm(1)); 1300 1301 unsigned CCMode = ARMCC::NE; 1302 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1303 std::swap(TBB, FBB); 1304 CCMode = ARMCC::EQ; 1305 } 1306 1307 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1308 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1309 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1310 1311 FastEmitBranch(FBB, DL); 1312 FuncInfo.MBB->addSuccessor(TBB); 1313 return true; 1314 } 1315 } else if (const ConstantInt *CI = 1316 dyn_cast<ConstantInt>(BI->getCondition())) { 1317 uint64_t Imm = CI->getZExtValue(); 1318 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1319 FastEmitBranch(Target, DL); 1320 return true; 1321 } 1322 1323 unsigned CmpReg = getRegForValue(BI->getCondition()); 1324 if (CmpReg == 0) return false; 1325 1326 // We've been divorced from our compare! Our block was split, and 1327 // now our compare lives in a predecessor block. We musn't 1328 // re-compare here, as the children of the compare aren't guaranteed 1329 // live across the block boundary (we *could* check for this). 1330 // Regardless, the compare has been done in the predecessor block, 1331 // and it left a value for us in a virtual register. Ergo, we test 1332 // the one-bit value left in the virtual register. 1333 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1334 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1335 .addReg(CmpReg).addImm(1)); 1336 1337 unsigned CCMode = ARMCC::NE; 1338 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1339 std::swap(TBB, FBB); 1340 CCMode = ARMCC::EQ; 1341 } 1342 1343 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1344 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1345 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1346 FastEmitBranch(FBB, DL); 1347 FuncInfo.MBB->addSuccessor(TBB); 1348 return true; 1349} 1350 1351bool ARMFastISel::SelectIndirectBr(const Instruction *I) { 1352 unsigned AddrReg = getRegForValue(I->getOperand(0)); 1353 if (AddrReg == 0) return false; 1354 1355 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; 1356 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) 1357 .addReg(AddrReg)); 1358 return true; 1359} 1360 1361bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1362 bool isZExt) { 1363 Type *Ty = Src1Value->getType(); 1364 EVT SrcVT = TLI.getValueType(Ty, true); 1365 if (!SrcVT.isSimple()) return false; 1366 1367 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1368 if (isFloat && !Subtarget->hasVFP2()) 1369 return false; 1370 1371 // Check to see if the 2nd operand is a constant that we can encode directly 1372 // in the compare. 1373 int Imm = 0; 1374 bool UseImm = false; 1375 bool isNegativeImm = false; 1376 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1377 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1378 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1379 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1380 SrcVT == MVT::i1) { 1381 const APInt &CIVal = ConstInt->getValue(); 1382 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1383 // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather 1384 // then a cmn, because there is no way to represent 2147483648 as a 1385 // signed 32-bit int. 1386 if (Imm < 0 && Imm != (int)0x80000000) { 1387 isNegativeImm = true; 1388 Imm = -Imm; 1389 } 1390 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1391 (ARM_AM::getSOImmVal(Imm) != -1); 1392 } 1393 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1394 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1395 if (ConstFP->isZero() && !ConstFP->isNegative()) 1396 UseImm = true; 1397 } 1398 1399 unsigned CmpOpc; 1400 bool isICmp = true; 1401 bool needsExt = false; 1402 switch (SrcVT.getSimpleVT().SimpleTy) { 1403 default: return false; 1404 // TODO: Verify compares. 1405 case MVT::f32: 1406 isICmp = false; 1407 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1408 break; 1409 case MVT::f64: 1410 isICmp = false; 1411 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1412 break; 1413 case MVT::i1: 1414 case MVT::i8: 1415 case MVT::i16: 1416 needsExt = true; 1417 // Intentional fall-through. 1418 case MVT::i32: 1419 if (isThumb2) { 1420 if (!UseImm) 1421 CmpOpc = ARM::t2CMPrr; 1422 else 1423 CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri; 1424 } else { 1425 if (!UseImm) 1426 CmpOpc = ARM::CMPrr; 1427 else 1428 CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri; 1429 } 1430 break; 1431 } 1432 1433 unsigned SrcReg1 = getRegForValue(Src1Value); 1434 if (SrcReg1 == 0) return false; 1435 1436 unsigned SrcReg2 = 0; 1437 if (!UseImm) { 1438 SrcReg2 = getRegForValue(Src2Value); 1439 if (SrcReg2 == 0) return false; 1440 } 1441 1442 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1443 if (needsExt) { 1444 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1445 if (SrcReg1 == 0) return false; 1446 if (!UseImm) { 1447 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1448 if (SrcReg2 == 0) return false; 1449 } 1450 } 1451 1452 if (!UseImm) { 1453 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1454 TII.get(CmpOpc)) 1455 .addReg(SrcReg1).addReg(SrcReg2)); 1456 } else { 1457 MachineInstrBuilder MIB; 1458 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1459 .addReg(SrcReg1); 1460 1461 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1462 if (isICmp) 1463 MIB.addImm(Imm); 1464 AddOptionalDefs(MIB); 1465 } 1466 1467 // For floating point we need to move the result to a comparison register 1468 // that we can then use for branches. 1469 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1470 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1471 TII.get(ARM::FMSTAT))); 1472 return true; 1473} 1474 1475bool ARMFastISel::SelectCmp(const Instruction *I) { 1476 const CmpInst *CI = cast<CmpInst>(I); 1477 1478 // Get the compare predicate. 1479 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1480 1481 // We may not handle every CC for now. 1482 if (ARMPred == ARMCC::AL) return false; 1483 1484 // Emit the compare. 1485 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1486 return false; 1487 1488 // Now set a register based on the comparison. Explicitly set the predicates 1489 // here. 1490 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1491 const TargetRegisterClass *RC = isThumb2 ? 1492 (const TargetRegisterClass*)&ARM::rGPRRegClass : 1493 (const TargetRegisterClass*)&ARM::GPRRegClass; 1494 unsigned DestReg = createResultReg(RC); 1495 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1496 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1497 // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR. 1498 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1499 .addReg(ZeroReg).addImm(1) 1500 .addImm(ARMPred).addReg(ARM::CPSR); 1501 1502 UpdateValueMap(I, DestReg); 1503 return true; 1504} 1505 1506bool ARMFastISel::SelectFPExt(const Instruction *I) { 1507 // Make sure we have VFP and that we're extending float to double. 1508 if (!Subtarget->hasVFP2()) return false; 1509 1510 Value *V = I->getOperand(0); 1511 if (!I->getType()->isDoubleTy() || 1512 !V->getType()->isFloatTy()) return false; 1513 1514 unsigned Op = getRegForValue(V); 1515 if (Op == 0) return false; 1516 1517 unsigned Result = createResultReg(&ARM::DPRRegClass); 1518 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1519 TII.get(ARM::VCVTDS), Result) 1520 .addReg(Op)); 1521 UpdateValueMap(I, Result); 1522 return true; 1523} 1524 1525bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1526 // Make sure we have VFP and that we're truncating double to float. 1527 if (!Subtarget->hasVFP2()) return false; 1528 1529 Value *V = I->getOperand(0); 1530 if (!(I->getType()->isFloatTy() && 1531 V->getType()->isDoubleTy())) return false; 1532 1533 unsigned Op = getRegForValue(V); 1534 if (Op == 0) return false; 1535 1536 unsigned Result = createResultReg(&ARM::SPRRegClass); 1537 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1538 TII.get(ARM::VCVTSD), Result) 1539 .addReg(Op)); 1540 UpdateValueMap(I, Result); 1541 return true; 1542} 1543 1544bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { 1545 // Make sure we have VFP. 1546 if (!Subtarget->hasVFP2()) return false; 1547 1548 MVT DstVT; 1549 Type *Ty = I->getType(); 1550 if (!isTypeLegal(Ty, DstVT)) 1551 return false; 1552 1553 Value *Src = I->getOperand(0); 1554 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1555 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1556 return false; 1557 1558 unsigned SrcReg = getRegForValue(Src); 1559 if (SrcReg == 0) return false; 1560 1561 // Handle sign-extension. 1562 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1563 EVT DestVT = MVT::i32; 1564 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, 1565 /*isZExt*/!isSigned); 1566 if (SrcReg == 0) return false; 1567 } 1568 1569 // The conversion routine works on fp-reg to fp-reg and the operand above 1570 // was an integer, move it to the fp registers if possible. 1571 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1572 if (FP == 0) return false; 1573 1574 unsigned Opc; 1575 if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; 1576 else if (Ty->isDoubleTy()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; 1577 else return false; 1578 1579 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1580 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1581 ResultReg) 1582 .addReg(FP)); 1583 UpdateValueMap(I, ResultReg); 1584 return true; 1585} 1586 1587bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { 1588 // Make sure we have VFP. 1589 if (!Subtarget->hasVFP2()) return false; 1590 1591 MVT DstVT; 1592 Type *RetTy = I->getType(); 1593 if (!isTypeLegal(RetTy, DstVT)) 1594 return false; 1595 1596 unsigned Op = getRegForValue(I->getOperand(0)); 1597 if (Op == 0) return false; 1598 1599 unsigned Opc; 1600 Type *OpTy = I->getOperand(0)->getType(); 1601 if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; 1602 else if (OpTy->isDoubleTy()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; 1603 else return false; 1604 1605 // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg. 1606 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1607 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1608 ResultReg) 1609 .addReg(Op)); 1610 1611 // This result needs to be in an integer register, but the conversion only 1612 // takes place in fp-regs. 1613 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1614 if (IntReg == 0) return false; 1615 1616 UpdateValueMap(I, IntReg); 1617 return true; 1618} 1619 1620bool ARMFastISel::SelectSelect(const Instruction *I) { 1621 MVT VT; 1622 if (!isTypeLegal(I->getType(), VT)) 1623 return false; 1624 1625 // Things need to be register sized for register moves. 1626 if (VT != MVT::i32) return false; 1627 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1628 1629 unsigned CondReg = getRegForValue(I->getOperand(0)); 1630 if (CondReg == 0) return false; 1631 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1632 if (Op1Reg == 0) return false; 1633 1634 // Check to see if we can use an immediate in the conditional move. 1635 int Imm = 0; 1636 bool UseImm = false; 1637 bool isNegativeImm = false; 1638 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1639 assert (VT == MVT::i32 && "Expecting an i32."); 1640 Imm = (int)ConstInt->getValue().getZExtValue(); 1641 if (Imm < 0) { 1642 isNegativeImm = true; 1643 Imm = ~Imm; 1644 } 1645 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1646 (ARM_AM::getSOImmVal(Imm) != -1); 1647 } 1648 1649 unsigned Op2Reg = 0; 1650 if (!UseImm) { 1651 Op2Reg = getRegForValue(I->getOperand(2)); 1652 if (Op2Reg == 0) return false; 1653 } 1654 1655 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1656 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1657 .addReg(CondReg).addImm(0)); 1658 1659 unsigned MovCCOpc; 1660 if (!UseImm) { 1661 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1662 } else { 1663 if (!isNegativeImm) { 1664 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1665 } else { 1666 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1667 } 1668 } 1669 unsigned ResultReg = createResultReg(RC); 1670 if (!UseImm) 1671 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1672 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1673 else 1674 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1675 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1676 UpdateValueMap(I, ResultReg); 1677 return true; 1678} 1679 1680bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) { 1681 MVT VT; 1682 Type *Ty = I->getType(); 1683 if (!isTypeLegal(Ty, VT)) 1684 return false; 1685 1686 // If we have integer div support we should have selected this automagically. 1687 // In case we have a real miss go ahead and return false and we'll pick 1688 // it up later. 1689 if (Subtarget->hasDivide()) return false; 1690 1691 // Otherwise emit a libcall. 1692 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1693 if (VT == MVT::i8) 1694 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8; 1695 else if (VT == MVT::i16) 1696 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16; 1697 else if (VT == MVT::i32) 1698 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32; 1699 else if (VT == MVT::i64) 1700 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64; 1701 else if (VT == MVT::i128) 1702 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128; 1703 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1704 1705 return ARMEmitLibcall(I, LC); 1706} 1707 1708bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) { 1709 MVT VT; 1710 Type *Ty = I->getType(); 1711 if (!isTypeLegal(Ty, VT)) 1712 return false; 1713 1714 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1715 if (VT == MVT::i8) 1716 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8; 1717 else if (VT == MVT::i16) 1718 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16; 1719 else if (VT == MVT::i32) 1720 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32; 1721 else if (VT == MVT::i64) 1722 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64; 1723 else if (VT == MVT::i128) 1724 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128; 1725 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1726 1727 return ARMEmitLibcall(I, LC); 1728} 1729 1730bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) { 1731 EVT DestVT = TLI.getValueType(I->getType(), true); 1732 1733 // We can get here in the case when we have a binary operation on a non-legal 1734 // type and the target independent selector doesn't know how to handle it. 1735 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1736 return false; 1737 1738 unsigned Opc; 1739 switch (ISDOpcode) { 1740 default: return false; 1741 case ISD::ADD: 1742 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr; 1743 break; 1744 case ISD::OR: 1745 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr; 1746 break; 1747 case ISD::SUB: 1748 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr; 1749 break; 1750 } 1751 1752 unsigned SrcReg1 = getRegForValue(I->getOperand(0)); 1753 if (SrcReg1 == 0) return false; 1754 1755 // TODO: Often the 2nd operand is an immediate, which can be encoded directly 1756 // in the instruction, rather then materializing the value in a register. 1757 unsigned SrcReg2 = getRegForValue(I->getOperand(1)); 1758 if (SrcReg2 == 0) return false; 1759 1760 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1761 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1762 TII.get(Opc), ResultReg) 1763 .addReg(SrcReg1).addReg(SrcReg2)); 1764 UpdateValueMap(I, ResultReg); 1765 return true; 1766} 1767 1768bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { 1769 EVT VT = TLI.getValueType(I->getType(), true); 1770 1771 // We can get here in the case when we want to use NEON for our fp 1772 // operations, but can't figure out how to. Just use the vfp instructions 1773 // if we have them. 1774 // FIXME: It'd be nice to use NEON instructions. 1775 Type *Ty = I->getType(); 1776 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1777 if (isFloat && !Subtarget->hasVFP2()) 1778 return false; 1779 1780 unsigned Opc; 1781 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1782 switch (ISDOpcode) { 1783 default: return false; 1784 case ISD::FADD: 1785 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1786 break; 1787 case ISD::FSUB: 1788 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1789 break; 1790 case ISD::FMUL: 1791 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1792 break; 1793 } 1794 unsigned Op1 = getRegForValue(I->getOperand(0)); 1795 if (Op1 == 0) return false; 1796 1797 unsigned Op2 = getRegForValue(I->getOperand(1)); 1798 if (Op2 == 0) return false; 1799 1800 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1801 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1802 TII.get(Opc), ResultReg) 1803 .addReg(Op1).addReg(Op2)); 1804 UpdateValueMap(I, ResultReg); 1805 return true; 1806} 1807 1808// Call Handling Code 1809 1810// This is largely taken directly from CCAssignFnForNode - we don't support 1811// varargs in FastISel so that part has been removed. 1812// TODO: We may not support all of this. 1813CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1814 switch (CC) { 1815 default: 1816 llvm_unreachable("Unsupported calling convention"); 1817 case CallingConv::Fast: 1818 // Ignore fastcc. Silence compiler warnings. 1819 (void)RetFastCC_ARM_APCS; 1820 (void)FastCC_ARM_APCS; 1821 // Fallthrough 1822 case CallingConv::C: 1823 // Use target triple & subtarget features to do actual dispatch. 1824 if (Subtarget->isAAPCS_ABI()) { 1825 if (Subtarget->hasVFP2() && 1826 TM.Options.FloatABIType == FloatABI::Hard) 1827 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1828 else 1829 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1830 } else 1831 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1832 case CallingConv::ARM_AAPCS_VFP: 1833 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1834 case CallingConv::ARM_AAPCS: 1835 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1836 case CallingConv::ARM_APCS: 1837 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1838 } 1839} 1840 1841bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1842 SmallVectorImpl<unsigned> &ArgRegs, 1843 SmallVectorImpl<MVT> &ArgVTs, 1844 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1845 SmallVectorImpl<unsigned> &RegArgs, 1846 CallingConv::ID CC, 1847 unsigned &NumBytes) { 1848 SmallVector<CCValAssign, 16> ArgLocs; 1849 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1850 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1851 1852 // Check that we can handle all of the arguments. If we can't, then bail out 1853 // now before we add code to the MBB. 1854 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1855 CCValAssign &VA = ArgLocs[i]; 1856 MVT ArgVT = ArgVTs[VA.getValNo()]; 1857 1858 // We don't handle NEON/vector parameters yet. 1859 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1860 return false; 1861 1862 // Now copy/store arg to correct locations. 1863 if (VA.isRegLoc() && !VA.needsCustom()) { 1864 continue; 1865 } else if (VA.needsCustom()) { 1866 // TODO: We need custom lowering for vector (v2f64) args. 1867 if (VA.getLocVT() != MVT::f64 || 1868 // TODO: Only handle register args for now. 1869 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc()) 1870 return false; 1871 } else { 1872 switch (static_cast<EVT>(ArgVT).getSimpleVT().SimpleTy) { 1873 default: 1874 return false; 1875 case MVT::i1: 1876 case MVT::i8: 1877 case MVT::i16: 1878 case MVT::i32: 1879 break; 1880 case MVT::f32: 1881 if (!Subtarget->hasVFP2()) 1882 return false; 1883 break; 1884 case MVT::f64: 1885 if (!Subtarget->hasVFP2()) 1886 return false; 1887 break; 1888 } 1889 } 1890 } 1891 1892 // At the point, we are able to handle the call's arguments in fast isel. 1893 1894 // Get a count of how many bytes are to be pushed on the stack. 1895 NumBytes = CCInfo.getNextStackOffset(); 1896 1897 // Issue CALLSEQ_START 1898 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1899 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1900 TII.get(AdjStackDown)) 1901 .addImm(NumBytes)); 1902 1903 // Process the args. 1904 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1905 CCValAssign &VA = ArgLocs[i]; 1906 unsigned Arg = ArgRegs[VA.getValNo()]; 1907 MVT ArgVT = ArgVTs[VA.getValNo()]; 1908 1909 assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) && 1910 "We don't handle NEON/vector parameters yet."); 1911 1912 // Handle arg promotion, etc. 1913 switch (VA.getLocInfo()) { 1914 case CCValAssign::Full: break; 1915 case CCValAssign::SExt: { 1916 MVT DestVT = VA.getLocVT(); 1917 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false); 1918 assert (Arg != 0 && "Failed to emit a sext"); 1919 ArgVT = DestVT; 1920 break; 1921 } 1922 case CCValAssign::AExt: 1923 // Intentional fall-through. Handle AExt and ZExt. 1924 case CCValAssign::ZExt: { 1925 MVT DestVT = VA.getLocVT(); 1926 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true); 1927 assert (Arg != 0 && "Failed to emit a sext"); 1928 ArgVT = DestVT; 1929 break; 1930 } 1931 case CCValAssign::BCvt: { 1932 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1933 /*TODO: Kill=*/false); 1934 assert(BC != 0 && "Failed to emit a bitcast!"); 1935 Arg = BC; 1936 ArgVT = VA.getLocVT(); 1937 break; 1938 } 1939 default: llvm_unreachable("Unknown arg promotion!"); 1940 } 1941 1942 // Now copy/store arg to correct locations. 1943 if (VA.isRegLoc() && !VA.needsCustom()) { 1944 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1945 VA.getLocReg()) 1946 .addReg(Arg); 1947 RegArgs.push_back(VA.getLocReg()); 1948 } else if (VA.needsCustom()) { 1949 // TODO: We need custom lowering for vector (v2f64) args. 1950 assert(VA.getLocVT() == MVT::f64 && 1951 "Custom lowering for v2f64 args not available"); 1952 1953 CCValAssign &NextVA = ArgLocs[++i]; 1954 1955 assert(VA.isRegLoc() && NextVA.isRegLoc() && 1956 "We only handle register args!"); 1957 1958 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1959 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1960 .addReg(NextVA.getLocReg(), RegState::Define) 1961 .addReg(Arg)); 1962 RegArgs.push_back(VA.getLocReg()); 1963 RegArgs.push_back(NextVA.getLocReg()); 1964 } else { 1965 assert(VA.isMemLoc()); 1966 // Need to store on the stack. 1967 Address Addr; 1968 Addr.BaseType = Address::RegBase; 1969 Addr.Base.Reg = ARM::SP; 1970 Addr.Offset = VA.getLocMemOffset(); 1971 1972 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet; 1973 assert(EmitRet && "Could not emit a store for argument!"); 1974 } 1975 } 1976 1977 return true; 1978} 1979 1980bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1981 const Instruction *I, CallingConv::ID CC, 1982 unsigned &NumBytes) { 1983 // Issue CALLSEQ_END 1984 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1985 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1986 TII.get(AdjStackUp)) 1987 .addImm(NumBytes).addImm(0)); 1988 1989 // Now the return value. 1990 if (RetVT != MVT::isVoid) { 1991 SmallVector<CCValAssign, 16> RVLocs; 1992 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1993 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1994 1995 // Copy all of the result registers out of their specified physreg. 1996 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1997 // For this move we copy into two registers and then move into the 1998 // double fp reg we want. 1999 EVT DestVT = RVLocs[0].getValVT(); 2000 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 2001 unsigned ResultReg = createResultReg(DstRC); 2002 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2003 TII.get(ARM::VMOVDRR), ResultReg) 2004 .addReg(RVLocs[0].getLocReg()) 2005 .addReg(RVLocs[1].getLocReg())); 2006 2007 UsedRegs.push_back(RVLocs[0].getLocReg()); 2008 UsedRegs.push_back(RVLocs[1].getLocReg()); 2009 2010 // Finally update the result. 2011 UpdateValueMap(I, ResultReg); 2012 } else { 2013 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 2014 EVT CopyVT = RVLocs[0].getValVT(); 2015 2016 // Special handling for extended integers. 2017 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 2018 CopyVT = MVT::i32; 2019 2020 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 2021 2022 unsigned ResultReg = createResultReg(DstRC); 2023 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2024 ResultReg).addReg(RVLocs[0].getLocReg()); 2025 UsedRegs.push_back(RVLocs[0].getLocReg()); 2026 2027 // Finally update the result. 2028 UpdateValueMap(I, ResultReg); 2029 } 2030 } 2031 2032 return true; 2033} 2034 2035bool ARMFastISel::SelectRet(const Instruction *I) { 2036 const ReturnInst *Ret = cast<ReturnInst>(I); 2037 const Function &F = *I->getParent()->getParent(); 2038 2039 if (!FuncInfo.CanLowerReturn) 2040 return false; 2041 2042 if (F.isVarArg()) 2043 return false; 2044 2045 CallingConv::ID CC = F.getCallingConv(); 2046 if (Ret->getNumOperands() > 0) { 2047 SmallVector<ISD::OutputArg, 4> Outs; 2048 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 2049 Outs, TLI); 2050 2051 // Analyze operands of the call, assigning locations to each operand. 2052 SmallVector<CCValAssign, 16> ValLocs; 2053 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 2054 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 2055 2056 const Value *RV = Ret->getOperand(0); 2057 unsigned Reg = getRegForValue(RV); 2058 if (Reg == 0) 2059 return false; 2060 2061 // Only handle a single return value for now. 2062 if (ValLocs.size() != 1) 2063 return false; 2064 2065 CCValAssign &VA = ValLocs[0]; 2066 2067 // Don't bother handling odd stuff for now. 2068 if (VA.getLocInfo() != CCValAssign::Full) 2069 return false; 2070 // Only handle register returns for now. 2071 if (!VA.isRegLoc()) 2072 return false; 2073 2074 unsigned SrcReg = Reg + VA.getValNo(); 2075 EVT RVVT = TLI.getValueType(RV->getType()); 2076 EVT DestVT = VA.getValVT(); 2077 // Special handling for extended integers. 2078 if (RVVT != DestVT) { 2079 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 2080 return false; 2081 2082 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 2083 2084 // Perform extension if flagged as either zext or sext. Otherwise, do 2085 // nothing. 2086 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 2087 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt()); 2088 if (SrcReg == 0) return false; 2089 } 2090 } 2091 2092 // Make the copy. 2093 unsigned DstReg = VA.getLocReg(); 2094 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 2095 // Avoid a cross-class copy. This is very unlikely. 2096 if (!SrcRC->contains(DstReg)) 2097 return false; 2098 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 2099 DstReg).addReg(SrcReg); 2100 2101 // Mark the register as live out of the function. 2102 MRI.addLiveOut(VA.getLocReg()); 2103 } 2104 2105 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2106 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2107 TII.get(RetOpc))); 2108 return true; 2109} 2110 2111unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 2112 return isThumb2 ? ARM::tBL : ARM::BL; 2113} 2114 2115// A quick function that will emit a call for a named libcall in F with the 2116// vector of passed arguments for the Instruction in I. We can assume that we 2117// can emit a call for any libcall we can produce. This is an abridged version 2118// of the full call infrastructure since we won't need to worry about things 2119// like computed function pointers or strange arguments at call sites. 2120// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2121// with X86. 2122bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2123 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2124 2125 // Handle *simple* calls for now. 2126 Type *RetTy = I->getType(); 2127 MVT RetVT; 2128 if (RetTy->isVoidTy()) 2129 RetVT = MVT::isVoid; 2130 else if (!isTypeLegal(RetTy, RetVT)) 2131 return false; 2132 2133 // TODO: For now if we have long calls specified we don't handle the call. 2134 if (EnableARMLongCalls) return false; 2135 2136 // Can't handle non-double multi-reg retvals. 2137 if (RetVT != MVT::isVoid && RetVT != MVT::i32) { 2138 SmallVector<CCValAssign, 16> RVLocs; 2139 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2140 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 2141 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2142 return false; 2143 } 2144 2145 // Set up the argument vectors. 2146 SmallVector<Value*, 8> Args; 2147 SmallVector<unsigned, 8> ArgRegs; 2148 SmallVector<MVT, 8> ArgVTs; 2149 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2150 Args.reserve(I->getNumOperands()); 2151 ArgRegs.reserve(I->getNumOperands()); 2152 ArgVTs.reserve(I->getNumOperands()); 2153 ArgFlags.reserve(I->getNumOperands()); 2154 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2155 Value *Op = I->getOperand(i); 2156 unsigned Arg = getRegForValue(Op); 2157 if (Arg == 0) return false; 2158 2159 Type *ArgTy = Op->getType(); 2160 MVT ArgVT; 2161 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2162 2163 ISD::ArgFlagsTy Flags; 2164 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2165 Flags.setOrigAlign(OriginalAlignment); 2166 2167 Args.push_back(Op); 2168 ArgRegs.push_back(Arg); 2169 ArgVTs.push_back(ArgVT); 2170 ArgFlags.push_back(Flags); 2171 } 2172 2173 // Handle the arguments now that we've gotten them. 2174 SmallVector<unsigned, 4> RegArgs; 2175 unsigned NumBytes; 2176 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2177 return false; 2178 2179 // Issue the call. 2180 MachineInstrBuilder MIB; 2181 unsigned CallOpc = ARMSelectCallOp(NULL); 2182 if (isThumb2) 2183 // Explicitly adding the predicate here. 2184 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2185 TII.get(CallOpc))) 2186 .addExternalSymbol(TLI.getLibcallName(Call)); 2187 else 2188 // Explicitly adding the predicate here. 2189 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2190 TII.get(CallOpc)) 2191 .addExternalSymbol(TLI.getLibcallName(Call))); 2192 2193 // Add implicit physical register uses to the call. 2194 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2195 MIB.addReg(RegArgs[i]); 2196 2197 // Add a register mask with the call-preserved registers. 2198 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2199 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2200 2201 // Finish off the call including any return values. 2202 SmallVector<unsigned, 4> UsedRegs; 2203 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2204 2205 // Set all unused physreg defs as dead. 2206 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2207 2208 return true; 2209} 2210 2211bool ARMFastISel::SelectCall(const Instruction *I, 2212 const char *IntrMemName = 0) { 2213 const CallInst *CI = cast<CallInst>(I); 2214 const Value *Callee = CI->getCalledValue(); 2215 2216 // Can't handle inline asm. 2217 if (isa<InlineAsm>(Callee)) return false; 2218 2219 // Only handle global variable Callees. 2220 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2221 if (!GV) 2222 return false; 2223 2224 // Check the calling convention. 2225 ImmutableCallSite CS(CI); 2226 CallingConv::ID CC = CS.getCallingConv(); 2227 2228 // TODO: Avoid some calling conventions? 2229 2230 // Let SDISel handle vararg functions. 2231 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2232 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2233 if (FTy->isVarArg()) 2234 return false; 2235 2236 // Handle *simple* calls for now. 2237 Type *RetTy = I->getType(); 2238 MVT RetVT; 2239 if (RetTy->isVoidTy()) 2240 RetVT = MVT::isVoid; 2241 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2242 RetVT != MVT::i8 && RetVT != MVT::i1) 2243 return false; 2244 2245 // TODO: For now if we have long calls specified we don't handle the call. 2246 if (EnableARMLongCalls) return false; 2247 2248 // Can't handle non-double multi-reg retvals. 2249 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && 2250 RetVT != MVT::i16 && RetVT != MVT::i32) { 2251 SmallVector<CCValAssign, 16> RVLocs; 2252 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 2253 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 2254 if (RVLocs.size() >= 2 && RetVT != MVT::f64) 2255 return false; 2256 } 2257 2258 // Set up the argument vectors. 2259 SmallVector<Value*, 8> Args; 2260 SmallVector<unsigned, 8> ArgRegs; 2261 SmallVector<MVT, 8> ArgVTs; 2262 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2263 unsigned arg_size = CS.arg_size(); 2264 Args.reserve(arg_size); 2265 ArgRegs.reserve(arg_size); 2266 ArgVTs.reserve(arg_size); 2267 ArgFlags.reserve(arg_size); 2268 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2269 i != e; ++i) { 2270 // If we're lowering a memory intrinsic instead of a regular call, skip the 2271 // last two arguments, which shouldn't be passed to the underlying function. 2272 if (IntrMemName && e-i <= 2) 2273 break; 2274 2275 ISD::ArgFlagsTy Flags; 2276 unsigned AttrInd = i - CS.arg_begin() + 1; 2277 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2278 Flags.setSExt(); 2279 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2280 Flags.setZExt(); 2281 2282 // FIXME: Only handle *easy* calls for now. 2283 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2284 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2285 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2286 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2287 return false; 2288 2289 Type *ArgTy = (*i)->getType(); 2290 MVT ArgVT; 2291 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2292 ArgVT != MVT::i1) 2293 return false; 2294 2295 unsigned Arg = getRegForValue(*i); 2296 if (Arg == 0) 2297 return false; 2298 2299 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2300 Flags.setOrigAlign(OriginalAlignment); 2301 2302 Args.push_back(*i); 2303 ArgRegs.push_back(Arg); 2304 ArgVTs.push_back(ArgVT); 2305 ArgFlags.push_back(Flags); 2306 } 2307 2308 // Handle the arguments now that we've gotten them. 2309 SmallVector<unsigned, 4> RegArgs; 2310 unsigned NumBytes; 2311 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2312 return false; 2313 2314 // Issue the call. 2315 MachineInstrBuilder MIB; 2316 unsigned CallOpc = ARMSelectCallOp(GV); 2317 // Explicitly adding the predicate here. 2318 if(isThumb2) { 2319 // Explicitly adding the predicate here. 2320 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2321 TII.get(CallOpc))); 2322 if (!IntrMemName) 2323 MIB.addGlobalAddress(GV, 0, 0); 2324 else 2325 MIB.addExternalSymbol(IntrMemName, 0); 2326 } else { 2327 if (!IntrMemName) 2328 // Explicitly adding the predicate here. 2329 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2330 TII.get(CallOpc)) 2331 .addGlobalAddress(GV, 0, 0)); 2332 else 2333 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2334 TII.get(CallOpc)) 2335 .addExternalSymbol(IntrMemName, 0)); 2336 } 2337 2338 // Add implicit physical register uses to the call. 2339 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2340 MIB.addReg(RegArgs[i]); 2341 2342 // Add a register mask with the call-preserved registers. 2343 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 2344 MIB.addRegMask(TRI.getCallPreservedMask(CC)); 2345 2346 // Finish off the call including any return values. 2347 SmallVector<unsigned, 4> UsedRegs; 2348 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2349 2350 // Set all unused physreg defs as dead. 2351 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2352 2353 return true; 2354} 2355 2356bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2357 return Len <= 16; 2358} 2359 2360bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, 2361 uint64_t Len) { 2362 // Make sure we don't bloat code by inlining very large memcpy's. 2363 if (!ARMIsMemCpySmall(Len)) 2364 return false; 2365 2366 // We don't care about alignment here since we just emit integer accesses. 2367 while (Len) { 2368 MVT VT; 2369 if (Len >= 4) 2370 VT = MVT::i32; 2371 else if (Len >= 2) 2372 VT = MVT::i16; 2373 else { 2374 assert(Len == 1); 2375 VT = MVT::i8; 2376 } 2377 2378 bool RV; 2379 unsigned ResultReg; 2380 RV = ARMEmitLoad(VT, ResultReg, Src); 2381 assert (RV == true && "Should be able to handle this load."); 2382 RV = ARMEmitStore(VT, ResultReg, Dest); 2383 assert (RV == true && "Should be able to handle this store."); 2384 (void)RV; 2385 2386 unsigned Size = VT.getSizeInBits()/8; 2387 Len -= Size; 2388 Dest.Offset += Size; 2389 Src.Offset += Size; 2390 } 2391 2392 return true; 2393} 2394 2395bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2396 // FIXME: Handle more intrinsics. 2397 switch (I.getIntrinsicID()) { 2398 default: return false; 2399 case Intrinsic::memcpy: 2400 case Intrinsic::memmove: { 2401 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2402 // Don't handle volatile. 2403 if (MTI.isVolatile()) 2404 return false; 2405 2406 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2407 // we would emit dead code because we don't currently handle memmoves. 2408 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2409 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2410 // Small memcpy's are common enough that we want to do them without a call 2411 // if possible. 2412 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2413 if (ARMIsMemCpySmall(Len)) { 2414 Address Dest, Src; 2415 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2416 !ARMComputeAddress(MTI.getRawSource(), Src)) 2417 return false; 2418 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2419 return true; 2420 } 2421 } 2422 2423 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2424 return false; 2425 2426 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2427 return false; 2428 2429 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2430 return SelectCall(&I, IntrMemName); 2431 } 2432 case Intrinsic::memset: { 2433 const MemSetInst &MSI = cast<MemSetInst>(I); 2434 // Don't handle volatile. 2435 if (MSI.isVolatile()) 2436 return false; 2437 2438 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2439 return false; 2440 2441 if (MSI.getDestAddressSpace() > 255) 2442 return false; 2443 2444 return SelectCall(&I, "memset"); 2445 } 2446 } 2447} 2448 2449bool ARMFastISel::SelectTrunc(const Instruction *I) { 2450 // The high bits for a type smaller than the register size are assumed to be 2451 // undefined. 2452 Value *Op = I->getOperand(0); 2453 2454 EVT SrcVT, DestVT; 2455 SrcVT = TLI.getValueType(Op->getType(), true); 2456 DestVT = TLI.getValueType(I->getType(), true); 2457 2458 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2459 return false; 2460 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2461 return false; 2462 2463 unsigned SrcReg = getRegForValue(Op); 2464 if (!SrcReg) return false; 2465 2466 // Because the high bits are undefined, a truncate doesn't generate 2467 // any code. 2468 UpdateValueMap(I, SrcReg); 2469 return true; 2470} 2471 2472unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2473 bool isZExt) { 2474 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2475 return 0; 2476 2477 unsigned Opc; 2478 bool isBoolZext = false; 2479 if (!SrcVT.isSimple()) return 0; 2480 switch (SrcVT.getSimpleVT().SimpleTy) { 2481 default: return 0; 2482 case MVT::i16: 2483 if (!Subtarget->hasV6Ops()) return 0; 2484 if (isZExt) 2485 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2486 else 2487 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2488 break; 2489 case MVT::i8: 2490 if (!Subtarget->hasV6Ops()) return 0; 2491 if (isZExt) 2492 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2493 else 2494 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2495 break; 2496 case MVT::i1: 2497 if (isZExt) { 2498 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2499 isBoolZext = true; 2500 break; 2501 } 2502 return 0; 2503 } 2504 2505 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2506 MachineInstrBuilder MIB; 2507 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2508 .addReg(SrcReg); 2509 if (isBoolZext) 2510 MIB.addImm(1); 2511 else 2512 MIB.addImm(0); 2513 AddOptionalDefs(MIB); 2514 return ResultReg; 2515} 2516 2517bool ARMFastISel::SelectIntExt(const Instruction *I) { 2518 // On ARM, in general, integer casts don't involve legal types; this code 2519 // handles promotable integers. 2520 Type *DestTy = I->getType(); 2521 Value *Src = I->getOperand(0); 2522 Type *SrcTy = Src->getType(); 2523 2524 EVT SrcVT, DestVT; 2525 SrcVT = TLI.getValueType(SrcTy, true); 2526 DestVT = TLI.getValueType(DestTy, true); 2527 2528 bool isZExt = isa<ZExtInst>(I); 2529 unsigned SrcReg = getRegForValue(Src); 2530 if (!SrcReg) return false; 2531 2532 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2533 if (ResultReg == 0) return false; 2534 UpdateValueMap(I, ResultReg); 2535 return true; 2536} 2537 2538// TODO: SoftFP support. 2539bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2540 2541 switch (I->getOpcode()) { 2542 case Instruction::Load: 2543 return SelectLoad(I); 2544 case Instruction::Store: 2545 return SelectStore(I); 2546 case Instruction::Br: 2547 return SelectBranch(I); 2548 case Instruction::IndirectBr: 2549 return SelectIndirectBr(I); 2550 case Instruction::ICmp: 2551 case Instruction::FCmp: 2552 return SelectCmp(I); 2553 case Instruction::FPExt: 2554 return SelectFPExt(I); 2555 case Instruction::FPTrunc: 2556 return SelectFPTrunc(I); 2557 case Instruction::SIToFP: 2558 return SelectIToFP(I, /*isSigned*/ true); 2559 case Instruction::UIToFP: 2560 return SelectIToFP(I, /*isSigned*/ false); 2561 case Instruction::FPToSI: 2562 return SelectFPToI(I, /*isSigned*/ true); 2563 case Instruction::FPToUI: 2564 return SelectFPToI(I, /*isSigned*/ false); 2565 case Instruction::Add: 2566 return SelectBinaryIntOp(I, ISD::ADD); 2567 case Instruction::Or: 2568 return SelectBinaryIntOp(I, ISD::OR); 2569 case Instruction::Sub: 2570 return SelectBinaryIntOp(I, ISD::SUB); 2571 case Instruction::FAdd: 2572 return SelectBinaryFPOp(I, ISD::FADD); 2573 case Instruction::FSub: 2574 return SelectBinaryFPOp(I, ISD::FSUB); 2575 case Instruction::FMul: 2576 return SelectBinaryFPOp(I, ISD::FMUL); 2577 case Instruction::SDiv: 2578 return SelectDiv(I, /*isSigned*/ true); 2579 case Instruction::UDiv: 2580 return SelectDiv(I, /*isSigned*/ false); 2581 case Instruction::SRem: 2582 return SelectRem(I, /*isSigned*/ true); 2583 case Instruction::URem: 2584 return SelectRem(I, /*isSigned*/ false); 2585 case Instruction::Call: 2586 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2587 return SelectIntrinsicCall(*II); 2588 return SelectCall(I); 2589 case Instruction::Select: 2590 return SelectSelect(I); 2591 case Instruction::Ret: 2592 return SelectRet(I); 2593 case Instruction::Trunc: 2594 return SelectTrunc(I); 2595 case Instruction::ZExt: 2596 case Instruction::SExt: 2597 return SelectIntExt(I); 2598 default: break; 2599 } 2600 return false; 2601} 2602 2603/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2604/// vreg is being provided by the specified load instruction. If possible, 2605/// try to fold the load as an operand to the instruction, returning true if 2606/// successful. 2607bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2608 const LoadInst *LI) { 2609 // Verify we have a legal type before going any further. 2610 MVT VT; 2611 if (!isLoadTypeLegal(LI->getType(), VT)) 2612 return false; 2613 2614 // Combine load followed by zero- or sign-extend. 2615 // ldrb r1, [r0] ldrb r1, [r0] 2616 // uxtb r2, r1 => 2617 // mov r3, r2 mov r3, r1 2618 bool isZExt = true; 2619 switch(MI->getOpcode()) { 2620 default: return false; 2621 case ARM::SXTH: 2622 case ARM::t2SXTH: 2623 isZExt = false; 2624 case ARM::UXTH: 2625 case ARM::t2UXTH: 2626 if (VT != MVT::i16) 2627 return false; 2628 break; 2629 case ARM::SXTB: 2630 case ARM::t2SXTB: 2631 isZExt = false; 2632 case ARM::UXTB: 2633 case ARM::t2UXTB: 2634 if (VT != MVT::i8) 2635 return false; 2636 break; 2637 } 2638 // See if we can handle this address. 2639 Address Addr; 2640 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2641 2642 unsigned ResultReg = MI->getOperand(0).getReg(); 2643 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2644 return false; 2645 MI->eraseFromParent(); 2646 return true; 2647} 2648 2649namespace llvm { 2650 FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2651 // Completely untested on non-iOS. 2652 const TargetMachine &TM = funcInfo.MF->getTarget(); 2653 2654 // Darwin and thumb1 only for now. 2655 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2656 if (Subtarget->isTargetIOS() && !Subtarget->isThumb1Only()) 2657 return new ARMFastISel(funcInfo); 2658 return 0; 2659 } 2660} 2661