ARMFastISel.cpp revision 909cb4f2f2d227ea01852cb318c80a79c46bc9bf
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "MCTargetDesc/ARMAddressingModes.h" 24#include "llvm/CallingConv.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Instructions.h" 28#include "llvm/IntrinsicInst.h" 29#include "llvm/Module.h" 30#include "llvm/Operator.h" 31#include "llvm/CodeGen/Analysis.h" 32#include "llvm/CodeGen/FastISel.h" 33#include "llvm/CodeGen/FunctionLoweringInfo.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineConstantPool.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineMemOperand.h" 39#include "llvm/CodeGen/MachineRegisterInfo.h" 40#include "llvm/CodeGen/PseudoSourceValue.h" 41#include "llvm/Support/CallSite.h" 42#include "llvm/Support/CommandLine.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/GetElementPtrTypeIterator.h" 45#include "llvm/Target/TargetData.h" 46#include "llvm/Target/TargetInstrInfo.h" 47#include "llvm/Target/TargetLowering.h" 48#include "llvm/Target/TargetMachine.h" 49#include "llvm/Target/TargetOptions.h" 50using namespace llvm; 51 52static cl::opt<bool> 53DisableARMFastISel("disable-arm-fast-isel", 54 cl::desc("Turn off experimental ARM fast-isel support"), 55 cl::init(false), cl::Hidden); 56 57extern cl::opt<bool> EnableARMLongCalls; 58 59namespace { 60 61 // All possible address modes, plus some. 62 typedef struct Address { 63 enum { 64 RegBase, 65 FrameIndexBase 66 } BaseType; 67 68 union { 69 unsigned Reg; 70 int FI; 71 } Base; 72 73 int Offset; 74 75 // Innocuous defaults for our address. 76 Address() 77 : BaseType(RegBase), Offset(0) { 78 Base.Reg = 0; 79 } 80 } Address; 81 82class ARMFastISel : public FastISel { 83 84 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 85 /// make the right decision when generating code for different targets. 86 const ARMSubtarget *Subtarget; 87 const TargetMachine &TM; 88 const TargetInstrInfo &TII; 89 const TargetLowering &TLI; 90 ARMFunctionInfo *AFI; 91 92 // Convenience variables to avoid some queries. 93 bool isThumb2; 94 LLVMContext *Context; 95 96 public: 97 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 98 : FastISel(funcInfo), 99 TM(funcInfo.MF->getTarget()), 100 TII(*TM.getInstrInfo()), 101 TLI(*TM.getTargetLowering()) { 102 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 103 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 104 isThumb2 = AFI->isThumbFunction(); 105 Context = &funcInfo.Fn->getContext(); 106 } 107 108 // Code from FastISel.cpp. 109 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 110 const TargetRegisterClass *RC); 111 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 112 const TargetRegisterClass *RC, 113 unsigned Op0, bool Op0IsKill); 114 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 115 const TargetRegisterClass *RC, 116 unsigned Op0, bool Op0IsKill, 117 unsigned Op1, bool Op1IsKill); 118 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 119 const TargetRegisterClass *RC, 120 unsigned Op0, bool Op0IsKill, 121 unsigned Op1, bool Op1IsKill, 122 unsigned Op2, bool Op2IsKill); 123 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 124 const TargetRegisterClass *RC, 125 unsigned Op0, bool Op0IsKill, 126 uint64_t Imm); 127 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 128 const TargetRegisterClass *RC, 129 unsigned Op0, bool Op0IsKill, 130 const ConstantFP *FPImm); 131 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 132 const TargetRegisterClass *RC, 133 unsigned Op0, bool Op0IsKill, 134 unsigned Op1, bool Op1IsKill, 135 uint64_t Imm); 136 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 137 const TargetRegisterClass *RC, 138 uint64_t Imm); 139 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 140 const TargetRegisterClass *RC, 141 uint64_t Imm1, uint64_t Imm2); 142 143 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 144 unsigned Op0, bool Op0IsKill, 145 uint32_t Idx); 146 147 // Backend specific FastISel code. 148 virtual bool TargetSelectInstruction(const Instruction *I); 149 virtual unsigned TargetMaterializeConstant(const Constant *C); 150 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 151 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 152 const LoadInst *LI); 153 154 #include "ARMGenFastISel.inc" 155 156 // Instruction selection routines. 157 private: 158 bool SelectLoad(const Instruction *I); 159 bool SelectStore(const Instruction *I); 160 bool SelectBranch(const Instruction *I); 161 bool SelectCmp(const Instruction *I); 162 bool SelectFPExt(const Instruction *I); 163 bool SelectFPTrunc(const Instruction *I); 164 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 165 bool SelectSIToFP(const Instruction *I); 166 bool SelectFPToSI(const Instruction *I); 167 bool SelectSDiv(const Instruction *I); 168 bool SelectSRem(const Instruction *I); 169 bool SelectCall(const Instruction *I, const char *IntrMemName); 170 bool SelectIntrinsicCall(const IntrinsicInst &I); 171 bool SelectSelect(const Instruction *I); 172 bool SelectRet(const Instruction *I); 173 bool SelectTrunc(const Instruction *I); 174 bool SelectIntExt(const Instruction *I); 175 176 // Utility routines. 177 private: 178 bool isTypeLegal(Type *Ty, MVT &VT); 179 bool isLoadTypeLegal(Type *Ty, MVT &VT); 180 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 181 bool isZExt); 182 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, bool isZExt, 183 bool allocReg); 184 185 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr); 186 bool ARMComputeAddress(const Value *Obj, Address &Addr); 187 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 188 bool ARMIsMemXferSmall(uint64_t Len); 189 bool ARMTryEmitSmallMemXfer(Address Dest, Address Src, uint64_t Len, 190 bool isMemCpy); 191 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 192 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 193 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 194 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 195 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 196 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 197 unsigned ARMSelectCallOp(const GlobalValue *GV); 198 199 // Call handling routines. 200 private: 201 bool FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, 202 unsigned &ResultReg); 203 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 204 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 205 SmallVectorImpl<unsigned> &ArgRegs, 206 SmallVectorImpl<MVT> &ArgVTs, 207 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 208 SmallVectorImpl<unsigned> &RegArgs, 209 CallingConv::ID CC, 210 unsigned &NumBytes); 211 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 212 const Instruction *I, CallingConv::ID CC, 213 unsigned &NumBytes); 214 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 215 216 // OptionalDef handling routines. 217 private: 218 bool isARMNEONPred(const MachineInstr *MI); 219 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 220 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 221 void AddLoadStoreOperands(EVT VT, Address &Addr, 222 const MachineInstrBuilder &MIB, 223 unsigned Flags, bool useAM3); 224}; 225 226} // end anonymous namespace 227 228#include "ARMGenCallingConv.inc" 229 230// DefinesOptionalPredicate - This is different from DefinesPredicate in that 231// we don't care about implicit defs here, just places we'll need to add a 232// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 233bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 234 const MCInstrDesc &MCID = MI->getDesc(); 235 if (!MCID.hasOptionalDef()) 236 return false; 237 238 // Look to see if our OptionalDef is defining CPSR or CCR. 239 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 240 const MachineOperand &MO = MI->getOperand(i); 241 if (!MO.isReg() || !MO.isDef()) continue; 242 if (MO.getReg() == ARM::CPSR) 243 *CPSR = true; 244 } 245 return true; 246} 247 248bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 249 const MCInstrDesc &MCID = MI->getDesc(); 250 251 // If we're a thumb2 or not NEON function we were handled via isPredicable. 252 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 253 AFI->isThumb2Function()) 254 return false; 255 256 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 257 if (MCID.OpInfo[i].isPredicate()) 258 return true; 259 260 return false; 261} 262 263// If the machine is predicable go ahead and add the predicate operands, if 264// it needs default CC operands add those. 265// TODO: If we want to support thumb1 then we'll need to deal with optional 266// CPSR defs that need to be added before the remaining operands. See s_cc_out 267// for descriptions why. 268const MachineInstrBuilder & 269ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 270 MachineInstr *MI = &*MIB; 271 272 // Do we use a predicate? or... 273 // Are we NEON in ARM mode and have a predicate operand? If so, I know 274 // we're not predicable but add it anyways. 275 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 276 AddDefaultPred(MIB); 277 278 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 279 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 280 bool CPSR = false; 281 if (DefinesOptionalPredicate(MI, &CPSR)) { 282 if (CPSR) 283 AddDefaultT1CC(MIB); 284 else 285 AddDefaultCC(MIB); 286 } 287 return MIB; 288} 289 290unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 291 const TargetRegisterClass* RC) { 292 unsigned ResultReg = createResultReg(RC); 293 const MCInstrDesc &II = TII.get(MachineInstOpcode); 294 295 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 296 return ResultReg; 297} 298 299unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 300 const TargetRegisterClass *RC, 301 unsigned Op0, bool Op0IsKill) { 302 unsigned ResultReg = createResultReg(RC); 303 const MCInstrDesc &II = TII.get(MachineInstOpcode); 304 305 if (II.getNumDefs() >= 1) 306 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 307 .addReg(Op0, Op0IsKill * RegState::Kill)); 308 else { 309 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 310 .addReg(Op0, Op0IsKill * RegState::Kill)); 311 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 312 TII.get(TargetOpcode::COPY), ResultReg) 313 .addReg(II.ImplicitDefs[0])); 314 } 315 return ResultReg; 316} 317 318unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 319 const TargetRegisterClass *RC, 320 unsigned Op0, bool Op0IsKill, 321 unsigned Op1, bool Op1IsKill) { 322 unsigned ResultReg = createResultReg(RC); 323 const MCInstrDesc &II = TII.get(MachineInstOpcode); 324 325 if (II.getNumDefs() >= 1) 326 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 327 .addReg(Op0, Op0IsKill * RegState::Kill) 328 .addReg(Op1, Op1IsKill * RegState::Kill)); 329 else { 330 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 331 .addReg(Op0, Op0IsKill * RegState::Kill) 332 .addReg(Op1, Op1IsKill * RegState::Kill)); 333 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 334 TII.get(TargetOpcode::COPY), ResultReg) 335 .addReg(II.ImplicitDefs[0])); 336 } 337 return ResultReg; 338} 339 340unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 341 const TargetRegisterClass *RC, 342 unsigned Op0, bool Op0IsKill, 343 unsigned Op1, bool Op1IsKill, 344 unsigned Op2, bool Op2IsKill) { 345 unsigned ResultReg = createResultReg(RC); 346 const MCInstrDesc &II = TII.get(MachineInstOpcode); 347 348 if (II.getNumDefs() >= 1) 349 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 350 .addReg(Op0, Op0IsKill * RegState::Kill) 351 .addReg(Op1, Op1IsKill * RegState::Kill) 352 .addReg(Op2, Op2IsKill * RegState::Kill)); 353 else { 354 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 355 .addReg(Op0, Op0IsKill * RegState::Kill) 356 .addReg(Op1, Op1IsKill * RegState::Kill) 357 .addReg(Op2, Op2IsKill * RegState::Kill)); 358 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 359 TII.get(TargetOpcode::COPY), ResultReg) 360 .addReg(II.ImplicitDefs[0])); 361 } 362 return ResultReg; 363} 364 365unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 366 const TargetRegisterClass *RC, 367 unsigned Op0, bool Op0IsKill, 368 uint64_t Imm) { 369 unsigned ResultReg = createResultReg(RC); 370 const MCInstrDesc &II = TII.get(MachineInstOpcode); 371 372 if (II.getNumDefs() >= 1) 373 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 374 .addReg(Op0, Op0IsKill * RegState::Kill) 375 .addImm(Imm)); 376 else { 377 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 378 .addReg(Op0, Op0IsKill * RegState::Kill) 379 .addImm(Imm)); 380 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 381 TII.get(TargetOpcode::COPY), ResultReg) 382 .addReg(II.ImplicitDefs[0])); 383 } 384 return ResultReg; 385} 386 387unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 388 const TargetRegisterClass *RC, 389 unsigned Op0, bool Op0IsKill, 390 const ConstantFP *FPImm) { 391 unsigned ResultReg = createResultReg(RC); 392 const MCInstrDesc &II = TII.get(MachineInstOpcode); 393 394 if (II.getNumDefs() >= 1) 395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 396 .addReg(Op0, Op0IsKill * RegState::Kill) 397 .addFPImm(FPImm)); 398 else { 399 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 400 .addReg(Op0, Op0IsKill * RegState::Kill) 401 .addFPImm(FPImm)); 402 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 403 TII.get(TargetOpcode::COPY), ResultReg) 404 .addReg(II.ImplicitDefs[0])); 405 } 406 return ResultReg; 407} 408 409unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 410 const TargetRegisterClass *RC, 411 unsigned Op0, bool Op0IsKill, 412 unsigned Op1, bool Op1IsKill, 413 uint64_t Imm) { 414 unsigned ResultReg = createResultReg(RC); 415 const MCInstrDesc &II = TII.get(MachineInstOpcode); 416 417 if (II.getNumDefs() >= 1) 418 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 419 .addReg(Op0, Op0IsKill * RegState::Kill) 420 .addReg(Op1, Op1IsKill * RegState::Kill) 421 .addImm(Imm)); 422 else { 423 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 424 .addReg(Op0, Op0IsKill * RegState::Kill) 425 .addReg(Op1, Op1IsKill * RegState::Kill) 426 .addImm(Imm)); 427 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 428 TII.get(TargetOpcode::COPY), ResultReg) 429 .addReg(II.ImplicitDefs[0])); 430 } 431 return ResultReg; 432} 433 434unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 435 const TargetRegisterClass *RC, 436 uint64_t Imm) { 437 unsigned ResultReg = createResultReg(RC); 438 const MCInstrDesc &II = TII.get(MachineInstOpcode); 439 440 if (II.getNumDefs() >= 1) 441 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 442 .addImm(Imm)); 443 else { 444 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 445 .addImm(Imm)); 446 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 447 TII.get(TargetOpcode::COPY), ResultReg) 448 .addReg(II.ImplicitDefs[0])); 449 } 450 return ResultReg; 451} 452 453unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 454 const TargetRegisterClass *RC, 455 uint64_t Imm1, uint64_t Imm2) { 456 unsigned ResultReg = createResultReg(RC); 457 const MCInstrDesc &II = TII.get(MachineInstOpcode); 458 459 if (II.getNumDefs() >= 1) 460 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 461 .addImm(Imm1).addImm(Imm2)); 462 else { 463 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 464 .addImm(Imm1).addImm(Imm2)); 465 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 466 TII.get(TargetOpcode::COPY), 467 ResultReg) 468 .addReg(II.ImplicitDefs[0])); 469 } 470 return ResultReg; 471} 472 473unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 474 unsigned Op0, bool Op0IsKill, 475 uint32_t Idx) { 476 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 477 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 478 "Cannot yet extract from physregs"); 479 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 480 DL, TII.get(TargetOpcode::COPY), ResultReg) 481 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 482 return ResultReg; 483} 484 485// TODO: Don't worry about 64-bit now, but when this is fixed remove the 486// checks from the various callers. 487unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 488 if (VT == MVT::f64) return 0; 489 490 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 491 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 492 TII.get(ARM::VMOVRS), MoveReg) 493 .addReg(SrcReg)); 494 return MoveReg; 495} 496 497unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 498 if (VT == MVT::i64) return 0; 499 500 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 501 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 502 TII.get(ARM::VMOVSR), MoveReg) 503 .addReg(SrcReg)); 504 return MoveReg; 505} 506 507// For double width floating point we need to materialize two constants 508// (the high and the low) into integer registers then use a move to get 509// the combined constant into an FP reg. 510unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 511 const APFloat Val = CFP->getValueAPF(); 512 bool is64bit = VT == MVT::f64; 513 514 // This checks to see if we can use VFP3 instructions to materialize 515 // a constant, otherwise we have to go through the constant pool. 516 if (TLI.isFPImmLegal(Val, VT)) { 517 int Imm; 518 unsigned Opc; 519 if (is64bit) { 520 Imm = ARM_AM::getFP64Imm(Val); 521 Opc = ARM::FCONSTD; 522 } else { 523 Imm = ARM_AM::getFP32Imm(Val); 524 Opc = ARM::FCONSTS; 525 } 526 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 527 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 528 DestReg) 529 .addImm(Imm)); 530 return DestReg; 531 } 532 533 // Require VFP2 for loading fp constants. 534 if (!Subtarget->hasVFP2()) return false; 535 536 // MachineConstantPool wants an explicit alignment. 537 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 538 if (Align == 0) { 539 // TODO: Figure out if this is correct. 540 Align = TD.getTypeAllocSize(CFP->getType()); 541 } 542 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 543 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 544 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 545 546 // The extra reg is for addrmode5. 547 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 548 DestReg) 549 .addConstantPoolIndex(Idx) 550 .addReg(0)); 551 return DestReg; 552} 553 554unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 555 556 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 557 return false; 558 559 // If we can do this in a single instruction without a constant pool entry 560 // do so now. 561 const ConstantInt *CI = cast<ConstantInt>(C); 562 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 563 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 564 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 565 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 566 TII.get(Opc), ImmReg) 567 .addImm(CI->getZExtValue())); 568 return ImmReg; 569 } 570 571 // Use MVN to emit negative constants. 572 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 573 unsigned Imm = (unsigned)~(CI->getSExtValue()); 574 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 575 (ARM_AM::getSOImmVal(Imm) != -1); 576 if (UseImm) { 577 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 578 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 579 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 580 TII.get(Opc), ImmReg) 581 .addImm(Imm)); 582 return ImmReg; 583 } 584 } 585 586 // Load from constant pool. For now 32-bit only. 587 if (VT != MVT::i32) 588 return false; 589 590 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 591 592 // MachineConstantPool wants an explicit alignment. 593 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 594 if (Align == 0) { 595 // TODO: Figure out if this is correct. 596 Align = TD.getTypeAllocSize(C->getType()); 597 } 598 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 599 600 if (isThumb2) 601 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 602 TII.get(ARM::t2LDRpci), DestReg) 603 .addConstantPoolIndex(Idx)); 604 else 605 // The extra immediate is for addrmode2. 606 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 607 TII.get(ARM::LDRcp), DestReg) 608 .addConstantPoolIndex(Idx) 609 .addImm(0)); 610 611 return DestReg; 612} 613 614unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 615 // For now 32-bit only. 616 if (VT != MVT::i32) return 0; 617 618 Reloc::Model RelocM = TM.getRelocationModel(); 619 620 // TODO: Need more magic for ARM PIC. 621 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 622 623 // MachineConstantPool wants an explicit alignment. 624 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 625 if (Align == 0) { 626 // TODO: Figure out if this is correct. 627 Align = TD.getTypeAllocSize(GV->getType()); 628 } 629 630 // Grab index. 631 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 632 unsigned Id = AFI->createPICLabelUId(); 633 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 634 ARMCP::CPValue, 635 PCAdj); 636 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 637 638 // Load value. 639 MachineInstrBuilder MIB; 640 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 641 if (isThumb2) { 642 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 643 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 644 .addConstantPoolIndex(Idx); 645 if (RelocM == Reloc::PIC_) 646 MIB.addImm(Id); 647 } else { 648 // The extra immediate is for addrmode2. 649 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 650 DestReg) 651 .addConstantPoolIndex(Idx) 652 .addImm(0); 653 } 654 AddOptionalDefs(MIB); 655 656 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 657 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 658 if (isThumb2) 659 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 660 TII.get(ARM::t2LDRi12), NewDestReg) 661 .addReg(DestReg) 662 .addImm(0); 663 else 664 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 665 NewDestReg) 666 .addReg(DestReg) 667 .addImm(0); 668 DestReg = NewDestReg; 669 AddOptionalDefs(MIB); 670 } 671 672 return DestReg; 673} 674 675unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 676 EVT VT = TLI.getValueType(C->getType(), true); 677 678 // Only handle simple types. 679 if (!VT.isSimple()) return 0; 680 681 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 682 return ARMMaterializeFP(CFP, VT); 683 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 684 return ARMMaterializeGV(GV, VT); 685 else if (isa<ConstantInt>(C)) 686 return ARMMaterializeInt(C, VT); 687 688 return 0; 689} 690 691unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 692 // Don't handle dynamic allocas. 693 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 694 695 MVT VT; 696 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 697 698 DenseMap<const AllocaInst*, int>::iterator SI = 699 FuncInfo.StaticAllocaMap.find(AI); 700 701 // This will get lowered later into the correct offsets and registers 702 // via rewriteXFrameIndex. 703 if (SI != FuncInfo.StaticAllocaMap.end()) { 704 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 705 unsigned ResultReg = createResultReg(RC); 706 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 707 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 708 TII.get(Opc), ResultReg) 709 .addFrameIndex(SI->second) 710 .addImm(0)); 711 return ResultReg; 712 } 713 714 return 0; 715} 716 717bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 718 EVT evt = TLI.getValueType(Ty, true); 719 720 // Only handle simple types. 721 if (evt == MVT::Other || !evt.isSimple()) return false; 722 VT = evt.getSimpleVT(); 723 724 // Handle all legal types, i.e. a register that will directly hold this 725 // value. 726 return TLI.isTypeLegal(VT); 727} 728 729bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 730 if (isTypeLegal(Ty, VT)) return true; 731 732 // If this is a type than can be sign or zero-extended to a basic operation 733 // go ahead and accept it now. 734 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 735 return true; 736 737 return false; 738} 739 740// Computes the address to get to an object. 741bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 742 // Some boilerplate from the X86 FastISel. 743 const User *U = NULL; 744 unsigned Opcode = Instruction::UserOp1; 745 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 746 // Don't walk into other basic blocks unless the object is an alloca from 747 // another block, otherwise it may not have a virtual register assigned. 748 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 749 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 750 Opcode = I->getOpcode(); 751 U = I; 752 } 753 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 754 Opcode = C->getOpcode(); 755 U = C; 756 } 757 758 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 759 if (Ty->getAddressSpace() > 255) 760 // Fast instruction selection doesn't support the special 761 // address spaces. 762 return false; 763 764 switch (Opcode) { 765 default: 766 break; 767 case Instruction::BitCast: { 768 // Look through bitcasts. 769 return ARMComputeAddress(U->getOperand(0), Addr); 770 } 771 case Instruction::IntToPtr: { 772 // Look past no-op inttoptrs. 773 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 774 return ARMComputeAddress(U->getOperand(0), Addr); 775 break; 776 } 777 case Instruction::PtrToInt: { 778 // Look past no-op ptrtoints. 779 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 780 return ARMComputeAddress(U->getOperand(0), Addr); 781 break; 782 } 783 case Instruction::GetElementPtr: { 784 Address SavedAddr = Addr; 785 int TmpOffset = Addr.Offset; 786 787 // Iterate through the GEP folding the constants into offsets where 788 // we can. 789 gep_type_iterator GTI = gep_type_begin(U); 790 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 791 i != e; ++i, ++GTI) { 792 const Value *Op = *i; 793 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 794 const StructLayout *SL = TD.getStructLayout(STy); 795 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 796 TmpOffset += SL->getElementOffset(Idx); 797 } else { 798 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 799 for (;;) { 800 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 801 // Constant-offset addressing. 802 TmpOffset += CI->getSExtValue() * S; 803 break; 804 } 805 if (isa<AddOperator>(Op) && 806 (!isa<Instruction>(Op) || 807 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 808 == FuncInfo.MBB) && 809 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 810 // An add (in the same block) with a constant operand. Fold the 811 // constant. 812 ConstantInt *CI = 813 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 814 TmpOffset += CI->getSExtValue() * S; 815 // Iterate on the other operand. 816 Op = cast<AddOperator>(Op)->getOperand(0); 817 continue; 818 } 819 // Unsupported 820 goto unsupported_gep; 821 } 822 } 823 } 824 825 // Try to grab the base operand now. 826 Addr.Offset = TmpOffset; 827 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 828 829 // We failed, restore everything and try the other options. 830 Addr = SavedAddr; 831 832 unsupported_gep: 833 break; 834 } 835 case Instruction::Alloca: { 836 const AllocaInst *AI = cast<AllocaInst>(Obj); 837 DenseMap<const AllocaInst*, int>::iterator SI = 838 FuncInfo.StaticAllocaMap.find(AI); 839 if (SI != FuncInfo.StaticAllocaMap.end()) { 840 Addr.BaseType = Address::FrameIndexBase; 841 Addr.Base.FI = SI->second; 842 return true; 843 } 844 break; 845 } 846 } 847 848 // Materialize the global variable's address into a reg which can 849 // then be used later to load the variable. 850 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 851 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 852 if (Tmp == 0) return false; 853 854 Addr.Base.Reg = Tmp; 855 return true; 856 } 857 858 // Try to get this in a register if nothing else has worked. 859 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 860 return Addr.Base.Reg != 0; 861} 862 863void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 864 865 assert(VT.isSimple() && "Non-simple types are invalid here!"); 866 867 bool needsLowering = false; 868 switch (VT.getSimpleVT().SimpleTy) { 869 default: 870 assert(false && "Unhandled load/store type!"); 871 break; 872 case MVT::i1: 873 case MVT::i8: 874 case MVT::i16: 875 case MVT::i32: 876 if (!useAM3) { 877 // Integer loads/stores handle 12-bit offsets. 878 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 879 // Handle negative offsets. 880 if (needsLowering && isThumb2) 881 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 882 Addr.Offset > -256); 883 } else { 884 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 885 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 886 } 887 break; 888 case MVT::f32: 889 case MVT::f64: 890 // Floating point operands handle 8-bit offsets. 891 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 892 break; 893 } 894 895 // If this is a stack pointer and the offset needs to be simplified then 896 // put the alloca address into a register, set the base type back to 897 // register and continue. This should almost never happen. 898 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 899 TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass : 900 ARM::GPRRegisterClass; 901 unsigned ResultReg = createResultReg(RC); 902 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 903 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 904 TII.get(Opc), ResultReg) 905 .addFrameIndex(Addr.Base.FI) 906 .addImm(0)); 907 Addr.Base.Reg = ResultReg; 908 Addr.BaseType = Address::RegBase; 909 } 910 911 // Since the offset is too large for the load/store instruction 912 // get the reg+offset into a register. 913 if (needsLowering) { 914 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 915 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 916 Addr.Offset = 0; 917 } 918} 919 920void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 921 const MachineInstrBuilder &MIB, 922 unsigned Flags, bool useAM3) { 923 // addrmode5 output depends on the selection dag addressing dividing the 924 // offset by 4 that it then later multiplies. Do this here as well. 925 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 926 VT.getSimpleVT().SimpleTy == MVT::f64) 927 Addr.Offset /= 4; 928 929 // Frame base works a bit differently. Handle it separately. 930 if (Addr.BaseType == Address::FrameIndexBase) { 931 int FI = Addr.Base.FI; 932 int Offset = Addr.Offset; 933 MachineMemOperand *MMO = 934 FuncInfo.MF->getMachineMemOperand( 935 MachinePointerInfo::getFixedStack(FI, Offset), 936 Flags, 937 MFI.getObjectSize(FI), 938 MFI.getObjectAlignment(FI)); 939 // Now add the rest of the operands. 940 MIB.addFrameIndex(FI); 941 942 // ARM halfword load/stores and signed byte loads need an additional operand. 943 if (useAM3) { 944 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 945 MIB.addReg(0); 946 MIB.addImm(Imm); 947 } else { 948 MIB.addImm(Addr.Offset); 949 } 950 MIB.addMemOperand(MMO); 951 } else { 952 // Now add the rest of the operands. 953 MIB.addReg(Addr.Base.Reg); 954 955 // ARM halfword load/stores and signed byte loads need an additional operand. 956 if (useAM3) { 957 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 958 MIB.addReg(0); 959 MIB.addImm(Imm); 960 } else { 961 MIB.addImm(Addr.Offset); 962 } 963 } 964 AddOptionalDefs(MIB); 965} 966 967bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 968 bool isZExt = true, bool allocReg = true) { 969 assert(VT.isSimple() && "Non-simple types are invalid here!"); 970 unsigned Opc; 971 bool useAM3 = false; 972 TargetRegisterClass *RC; 973 switch (VT.getSimpleVT().SimpleTy) { 974 // This is mostly going to be Neon/vector support. 975 default: return false; 976 case MVT::i1: 977 case MVT::i8: 978 if (isThumb2) { 979 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 980 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 981 else 982 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 983 } else { 984 if (isZExt) { 985 Opc = ARM::LDRBi12; 986 } else { 987 Opc = ARM::LDRSB; 988 useAM3 = true; 989 } 990 } 991 RC = ARM::GPRRegisterClass; 992 break; 993 case MVT::i16: 994 if (isThumb2) { 995 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 996 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 997 else 998 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 999 } else { 1000 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1001 useAM3 = true; 1002 } 1003 RC = ARM::GPRRegisterClass; 1004 break; 1005 case MVT::i32: 1006 if (isThumb2) { 1007 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1008 Opc = ARM::t2LDRi8; 1009 else 1010 Opc = ARM::t2LDRi12; 1011 } else { 1012 Opc = ARM::LDRi12; 1013 } 1014 RC = ARM::GPRRegisterClass; 1015 break; 1016 case MVT::f32: 1017 Opc = ARM::VLDRS; 1018 RC = TLI.getRegClassFor(VT); 1019 break; 1020 case MVT::f64: 1021 Opc = ARM::VLDRD; 1022 RC = TLI.getRegClassFor(VT); 1023 break; 1024 } 1025 // Simplify this down to something we can handle. 1026 ARMSimplifyAddress(Addr, VT, useAM3); 1027 1028 // Create the base instruction, then add the operands. 1029 if (allocReg) 1030 ResultReg = createResultReg(RC); 1031 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1032 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1033 TII.get(Opc), ResultReg); 1034 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1035 return true; 1036} 1037 1038bool ARMFastISel::SelectLoad(const Instruction *I) { 1039 // Atomic loads need special handling. 1040 if (cast<LoadInst>(I)->isAtomic()) 1041 return false; 1042 1043 // Verify we have a legal type before going any further. 1044 MVT VT; 1045 if (!isLoadTypeLegal(I->getType(), VT)) 1046 return false; 1047 1048 // See if we can handle this address. 1049 Address Addr; 1050 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1051 1052 unsigned ResultReg; 1053 if (!ARMEmitLoad(VT, ResultReg, Addr)) return false; 1054 UpdateValueMap(I, ResultReg); 1055 return true; 1056} 1057 1058bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr) { 1059 unsigned StrOpc; 1060 bool useAM3 = false; 1061 switch (VT.getSimpleVT().SimpleTy) { 1062 // This is mostly going to be Neon/vector support. 1063 default: return false; 1064 case MVT::i1: { 1065 unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass : 1066 ARM::GPRRegisterClass); 1067 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1068 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1069 TII.get(Opc), Res) 1070 .addReg(SrcReg).addImm(1)); 1071 SrcReg = Res; 1072 } // Fallthrough here. 1073 case MVT::i8: 1074 if (isThumb2) { 1075 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1076 StrOpc = ARM::t2STRBi8; 1077 else 1078 StrOpc = ARM::t2STRBi12; 1079 } else { 1080 StrOpc = ARM::STRBi12; 1081 } 1082 break; 1083 case MVT::i16: 1084 if (isThumb2) { 1085 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1086 StrOpc = ARM::t2STRHi8; 1087 else 1088 StrOpc = ARM::t2STRHi12; 1089 } else { 1090 StrOpc = ARM::STRH; 1091 useAM3 = true; 1092 } 1093 break; 1094 case MVT::i32: 1095 if (isThumb2) { 1096 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1097 StrOpc = ARM::t2STRi8; 1098 else 1099 StrOpc = ARM::t2STRi12; 1100 } else { 1101 StrOpc = ARM::STRi12; 1102 } 1103 break; 1104 case MVT::f32: 1105 if (!Subtarget->hasVFP2()) return false; 1106 StrOpc = ARM::VSTRS; 1107 break; 1108 case MVT::f64: 1109 if (!Subtarget->hasVFP2()) return false; 1110 StrOpc = ARM::VSTRD; 1111 break; 1112 } 1113 // Simplify this down to something we can handle. 1114 ARMSimplifyAddress(Addr, VT, useAM3); 1115 1116 // Create the base instruction, then add the operands. 1117 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1118 TII.get(StrOpc)) 1119 .addReg(SrcReg, getKillRegState(true)); 1120 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1121 return true; 1122} 1123 1124bool ARMFastISel::SelectStore(const Instruction *I) { 1125 Value *Op0 = I->getOperand(0); 1126 unsigned SrcReg = 0; 1127 1128 // Atomic stores need special handling. 1129 if (cast<StoreInst>(I)->isAtomic()) 1130 return false; 1131 1132 // Verify we have a legal type before going any further. 1133 MVT VT; 1134 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1135 return false; 1136 1137 // Get the value to be stored into a register. 1138 SrcReg = getRegForValue(Op0); 1139 if (SrcReg == 0) return false; 1140 1141 // See if we can handle this address. 1142 Address Addr; 1143 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1144 return false; 1145 1146 if (!ARMEmitStore(VT, SrcReg, Addr)) return false; 1147 return true; 1148} 1149 1150static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1151 switch (Pred) { 1152 // Needs two compares... 1153 case CmpInst::FCMP_ONE: 1154 case CmpInst::FCMP_UEQ: 1155 default: 1156 // AL is our "false" for now. The other two need more compares. 1157 return ARMCC::AL; 1158 case CmpInst::ICMP_EQ: 1159 case CmpInst::FCMP_OEQ: 1160 return ARMCC::EQ; 1161 case CmpInst::ICMP_SGT: 1162 case CmpInst::FCMP_OGT: 1163 return ARMCC::GT; 1164 case CmpInst::ICMP_SGE: 1165 case CmpInst::FCMP_OGE: 1166 return ARMCC::GE; 1167 case CmpInst::ICMP_UGT: 1168 case CmpInst::FCMP_UGT: 1169 return ARMCC::HI; 1170 case CmpInst::FCMP_OLT: 1171 return ARMCC::MI; 1172 case CmpInst::ICMP_ULE: 1173 case CmpInst::FCMP_OLE: 1174 return ARMCC::LS; 1175 case CmpInst::FCMP_ORD: 1176 return ARMCC::VC; 1177 case CmpInst::FCMP_UNO: 1178 return ARMCC::VS; 1179 case CmpInst::FCMP_UGE: 1180 return ARMCC::PL; 1181 case CmpInst::ICMP_SLT: 1182 case CmpInst::FCMP_ULT: 1183 return ARMCC::LT; 1184 case CmpInst::ICMP_SLE: 1185 case CmpInst::FCMP_ULE: 1186 return ARMCC::LE; 1187 case CmpInst::FCMP_UNE: 1188 case CmpInst::ICMP_NE: 1189 return ARMCC::NE; 1190 case CmpInst::ICMP_UGE: 1191 return ARMCC::HS; 1192 case CmpInst::ICMP_ULT: 1193 return ARMCC::LO; 1194 } 1195} 1196 1197bool ARMFastISel::SelectBranch(const Instruction *I) { 1198 const BranchInst *BI = cast<BranchInst>(I); 1199 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1200 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1201 1202 // Simple branch support. 1203 1204 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1205 // behavior. 1206 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1207 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1208 1209 // Get the compare predicate. 1210 // Try to take advantage of fallthrough opportunities. 1211 CmpInst::Predicate Predicate = CI->getPredicate(); 1212 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1213 std::swap(TBB, FBB); 1214 Predicate = CmpInst::getInversePredicate(Predicate); 1215 } 1216 1217 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1218 1219 // We may not handle every CC for now. 1220 if (ARMPred == ARMCC::AL) return false; 1221 1222 // Emit the compare. 1223 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1224 return false; 1225 1226 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1227 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1228 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1229 FastEmitBranch(FBB, DL); 1230 FuncInfo.MBB->addSuccessor(TBB); 1231 return true; 1232 } 1233 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1234 MVT SourceVT; 1235 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1236 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1237 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1238 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1239 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1240 TII.get(TstOpc)) 1241 .addReg(OpReg).addImm(1)); 1242 1243 unsigned CCMode = ARMCC::NE; 1244 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1245 std::swap(TBB, FBB); 1246 CCMode = ARMCC::EQ; 1247 } 1248 1249 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1250 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1251 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1252 1253 FastEmitBranch(FBB, DL); 1254 FuncInfo.MBB->addSuccessor(TBB); 1255 return true; 1256 } 1257 } else if (const ConstantInt *CI = 1258 dyn_cast<ConstantInt>(BI->getCondition())) { 1259 uint64_t Imm = CI->getZExtValue(); 1260 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1261 FastEmitBranch(Target, DL); 1262 return true; 1263 } 1264 1265 unsigned CmpReg = getRegForValue(BI->getCondition()); 1266 if (CmpReg == 0) return false; 1267 1268 // We've been divorced from our compare! Our block was split, and 1269 // now our compare lives in a predecessor block. We musn't 1270 // re-compare here, as the children of the compare aren't guaranteed 1271 // live across the block boundary (we *could* check for this). 1272 // Regardless, the compare has been done in the predecessor block, 1273 // and it left a value for us in a virtual register. Ergo, we test 1274 // the one-bit value left in the virtual register. 1275 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1276 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1277 .addReg(CmpReg).addImm(1)); 1278 1279 unsigned CCMode = ARMCC::NE; 1280 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1281 std::swap(TBB, FBB); 1282 CCMode = ARMCC::EQ; 1283 } 1284 1285 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1286 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1287 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1288 FastEmitBranch(FBB, DL); 1289 FuncInfo.MBB->addSuccessor(TBB); 1290 return true; 1291} 1292 1293bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1294 bool isZExt) { 1295 Type *Ty = Src1Value->getType(); 1296 EVT SrcVT = TLI.getValueType(Ty, true); 1297 if (!SrcVT.isSimple()) return false; 1298 1299 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1300 if (isFloat && !Subtarget->hasVFP2()) 1301 return false; 1302 1303 // Check to see if the 2nd operand is a constant that we can encode directly 1304 // in the compare. 1305 int Imm = 0; 1306 bool UseImm = false; 1307 bool isNegativeImm = false; 1308 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1309 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1310 SrcVT == MVT::i1) { 1311 const APInt &CIVal = ConstInt->getValue(); 1312 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1313 if (Imm < 0) { 1314 isNegativeImm = true; 1315 Imm = -Imm; 1316 } 1317 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1318 (ARM_AM::getSOImmVal(Imm) != -1); 1319 } 1320 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1321 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1322 if (ConstFP->isZero() && !ConstFP->isNegative()) 1323 UseImm = true; 1324 } 1325 1326 unsigned CmpOpc; 1327 bool isICmp = true; 1328 bool needsExt = false; 1329 switch (SrcVT.getSimpleVT().SimpleTy) { 1330 default: return false; 1331 // TODO: Verify compares. 1332 case MVT::f32: 1333 isICmp = false; 1334 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1335 break; 1336 case MVT::f64: 1337 isICmp = false; 1338 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1339 break; 1340 case MVT::i1: 1341 case MVT::i8: 1342 case MVT::i16: 1343 needsExt = true; 1344 // Intentional fall-through. 1345 case MVT::i32: 1346 if (isThumb2) { 1347 if (!UseImm) 1348 CmpOpc = ARM::t2CMPrr; 1349 else 1350 CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri; 1351 } else { 1352 if (!UseImm) 1353 CmpOpc = ARM::CMPrr; 1354 else 1355 CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri; 1356 } 1357 break; 1358 } 1359 1360 unsigned SrcReg1 = getRegForValue(Src1Value); 1361 if (SrcReg1 == 0) return false; 1362 1363 unsigned SrcReg2; 1364 if (!UseImm) { 1365 SrcReg2 = getRegForValue(Src2Value); 1366 if (SrcReg2 == 0) return false; 1367 } 1368 1369 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1370 if (needsExt) { 1371 unsigned ResultReg; 1372 ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1373 if (ResultReg == 0) return false; 1374 SrcReg1 = ResultReg; 1375 if (!UseImm) { 1376 ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1377 if (ResultReg == 0) return false; 1378 SrcReg2 = ResultReg; 1379 } 1380 } 1381 1382 if (!UseImm) { 1383 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1384 TII.get(CmpOpc)) 1385 .addReg(SrcReg1).addReg(SrcReg2)); 1386 } else { 1387 MachineInstrBuilder MIB; 1388 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1389 .addReg(SrcReg1); 1390 1391 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1392 if (isICmp) 1393 MIB.addImm(Imm); 1394 AddOptionalDefs(MIB); 1395 } 1396 1397 // For floating point we need to move the result to a comparison register 1398 // that we can then use for branches. 1399 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1400 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1401 TII.get(ARM::FMSTAT))); 1402 return true; 1403} 1404 1405bool ARMFastISel::SelectCmp(const Instruction *I) { 1406 const CmpInst *CI = cast<CmpInst>(I); 1407 Type *Ty = CI->getOperand(0)->getType(); 1408 1409 // Get the compare predicate. 1410 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1411 1412 // We may not handle every CC for now. 1413 if (ARMPred == ARMCC::AL) return false; 1414 1415 // Emit the compare. 1416 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1417 return false; 1418 1419 // Now set a register based on the comparison. Explicitly set the predicates 1420 // here. 1421 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1422 TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass 1423 : ARM::GPRRegisterClass; 1424 unsigned DestReg = createResultReg(RC); 1425 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1426 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1427 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1428 unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR; 1429 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1430 .addReg(ZeroReg).addImm(1) 1431 .addImm(ARMPred).addReg(CondReg); 1432 1433 UpdateValueMap(I, DestReg); 1434 return true; 1435} 1436 1437bool ARMFastISel::SelectFPExt(const Instruction *I) { 1438 // Make sure we have VFP and that we're extending float to double. 1439 if (!Subtarget->hasVFP2()) return false; 1440 1441 Value *V = I->getOperand(0); 1442 if (!I->getType()->isDoubleTy() || 1443 !V->getType()->isFloatTy()) return false; 1444 1445 unsigned Op = getRegForValue(V); 1446 if (Op == 0) return false; 1447 1448 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1449 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1450 TII.get(ARM::VCVTDS), Result) 1451 .addReg(Op)); 1452 UpdateValueMap(I, Result); 1453 return true; 1454} 1455 1456bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1457 // Make sure we have VFP and that we're truncating double to float. 1458 if (!Subtarget->hasVFP2()) return false; 1459 1460 Value *V = I->getOperand(0); 1461 if (!(I->getType()->isFloatTy() && 1462 V->getType()->isDoubleTy())) return false; 1463 1464 unsigned Op = getRegForValue(V); 1465 if (Op == 0) return false; 1466 1467 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1468 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1469 TII.get(ARM::VCVTSD), Result) 1470 .addReg(Op)); 1471 UpdateValueMap(I, Result); 1472 return true; 1473} 1474 1475bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1476 // Make sure we have VFP. 1477 if (!Subtarget->hasVFP2()) return false; 1478 1479 MVT DstVT; 1480 Type *Ty = I->getType(); 1481 if (!isTypeLegal(Ty, DstVT)) 1482 return false; 1483 1484 Value *Src = I->getOperand(0); 1485 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1486 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1487 return false; 1488 1489 unsigned SrcReg = getRegForValue(Src); 1490 if (SrcReg == 0) return false; 1491 1492 // Handle sign-extension. 1493 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1494 EVT DestVT = MVT::i32; 1495 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, /*isZExt*/ false); 1496 if (ResultReg == 0) return false; 1497 SrcReg = ResultReg; 1498 } 1499 1500 // The conversion routine works on fp-reg to fp-reg and the operand above 1501 // was an integer, move it to the fp registers if possible. 1502 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1503 if (FP == 0) return false; 1504 1505 unsigned Opc; 1506 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1507 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1508 else return false; 1509 1510 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1511 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1512 ResultReg) 1513 .addReg(FP)); 1514 UpdateValueMap(I, ResultReg); 1515 return true; 1516} 1517 1518bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1519 // Make sure we have VFP. 1520 if (!Subtarget->hasVFP2()) return false; 1521 1522 MVT DstVT; 1523 Type *RetTy = I->getType(); 1524 if (!isTypeLegal(RetTy, DstVT)) 1525 return false; 1526 1527 unsigned Op = getRegForValue(I->getOperand(0)); 1528 if (Op == 0) return false; 1529 1530 unsigned Opc; 1531 Type *OpTy = I->getOperand(0)->getType(); 1532 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1533 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1534 else return false; 1535 1536 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1537 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1538 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1539 ResultReg) 1540 .addReg(Op)); 1541 1542 // This result needs to be in an integer register, but the conversion only 1543 // takes place in fp-regs. 1544 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1545 if (IntReg == 0) return false; 1546 1547 UpdateValueMap(I, IntReg); 1548 return true; 1549} 1550 1551bool ARMFastISel::SelectSelect(const Instruction *I) { 1552 MVT VT; 1553 if (!isTypeLegal(I->getType(), VT)) 1554 return false; 1555 1556 // Things need to be register sized for register moves. 1557 if (VT != MVT::i32) return false; 1558 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1559 1560 unsigned CondReg = getRegForValue(I->getOperand(0)); 1561 if (CondReg == 0) return false; 1562 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1563 if (Op1Reg == 0) return false; 1564 1565 // Check to see if we can use an immediate in the conditional move. 1566 int Imm = 0; 1567 bool UseImm = false; 1568 bool isNegativeImm = false; 1569 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1570 assert (VT == MVT::i32 && "Expecting an i32."); 1571 Imm = (int)ConstInt->getValue().getZExtValue(); 1572 if (Imm < 0) { 1573 isNegativeImm = true; 1574 Imm = ~Imm; 1575 } 1576 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1577 (ARM_AM::getSOImmVal(Imm) != -1); 1578 } 1579 1580 unsigned Op2Reg; 1581 if (!UseImm) { 1582 Op2Reg = getRegForValue(I->getOperand(2)); 1583 if (Op2Reg == 0) return false; 1584 } 1585 1586 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1587 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1588 .addReg(CondReg).addImm(0)); 1589 1590 unsigned MovCCOpc; 1591 if (!UseImm) { 1592 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1593 } else { 1594 if (!isNegativeImm) { 1595 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1596 } else { 1597 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1598 } 1599 } 1600 unsigned ResultReg = createResultReg(RC); 1601 if (!UseImm) 1602 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1603 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1604 else 1605 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1606 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1607 UpdateValueMap(I, ResultReg); 1608 return true; 1609} 1610 1611bool ARMFastISel::SelectSDiv(const Instruction *I) { 1612 MVT VT; 1613 Type *Ty = I->getType(); 1614 if (!isTypeLegal(Ty, VT)) 1615 return false; 1616 1617 // If we have integer div support we should have selected this automagically. 1618 // In case we have a real miss go ahead and return false and we'll pick 1619 // it up later. 1620 if (Subtarget->hasDivide()) return false; 1621 1622 // Otherwise emit a libcall. 1623 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1624 if (VT == MVT::i8) 1625 LC = RTLIB::SDIV_I8; 1626 else if (VT == MVT::i16) 1627 LC = RTLIB::SDIV_I16; 1628 else if (VT == MVT::i32) 1629 LC = RTLIB::SDIV_I32; 1630 else if (VT == MVT::i64) 1631 LC = RTLIB::SDIV_I64; 1632 else if (VT == MVT::i128) 1633 LC = RTLIB::SDIV_I128; 1634 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1635 1636 return ARMEmitLibcall(I, LC); 1637} 1638 1639bool ARMFastISel::SelectSRem(const Instruction *I) { 1640 MVT VT; 1641 Type *Ty = I->getType(); 1642 if (!isTypeLegal(Ty, VT)) 1643 return false; 1644 1645 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1646 if (VT == MVT::i8) 1647 LC = RTLIB::SREM_I8; 1648 else if (VT == MVT::i16) 1649 LC = RTLIB::SREM_I16; 1650 else if (VT == MVT::i32) 1651 LC = RTLIB::SREM_I32; 1652 else if (VT == MVT::i64) 1653 LC = RTLIB::SREM_I64; 1654 else if (VT == MVT::i128) 1655 LC = RTLIB::SREM_I128; 1656 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1657 1658 return ARMEmitLibcall(I, LC); 1659} 1660 1661bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1662 EVT VT = TLI.getValueType(I->getType(), true); 1663 1664 // We can get here in the case when we want to use NEON for our fp 1665 // operations, but can't figure out how to. Just use the vfp instructions 1666 // if we have them. 1667 // FIXME: It'd be nice to use NEON instructions. 1668 Type *Ty = I->getType(); 1669 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1670 if (isFloat && !Subtarget->hasVFP2()) 1671 return false; 1672 1673 unsigned Op1 = getRegForValue(I->getOperand(0)); 1674 if (Op1 == 0) return false; 1675 1676 unsigned Op2 = getRegForValue(I->getOperand(1)); 1677 if (Op2 == 0) return false; 1678 1679 unsigned Opc; 1680 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1681 switch (ISDOpcode) { 1682 default: return false; 1683 case ISD::FADD: 1684 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1685 break; 1686 case ISD::FSUB: 1687 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1688 break; 1689 case ISD::FMUL: 1690 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1691 break; 1692 } 1693 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1694 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1695 TII.get(Opc), ResultReg) 1696 .addReg(Op1).addReg(Op2)); 1697 UpdateValueMap(I, ResultReg); 1698 return true; 1699} 1700 1701// Call Handling Code 1702 1703bool ARMFastISel::FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, 1704 EVT SrcVT, unsigned &ResultReg) { 1705 unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, 1706 Src, /*TODO: Kill=*/false); 1707 1708 if (RR != 0) { 1709 ResultReg = RR; 1710 return true; 1711 } else 1712 return false; 1713} 1714 1715// This is largely taken directly from CCAssignFnForNode - we don't support 1716// varargs in FastISel so that part has been removed. 1717// TODO: We may not support all of this. 1718CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1719 switch (CC) { 1720 default: 1721 llvm_unreachable("Unsupported calling convention"); 1722 case CallingConv::Fast: 1723 // Ignore fastcc. Silence compiler warnings. 1724 (void)RetFastCC_ARM_APCS; 1725 (void)FastCC_ARM_APCS; 1726 // Fallthrough 1727 case CallingConv::C: 1728 // Use target triple & subtarget features to do actual dispatch. 1729 if (Subtarget->isAAPCS_ABI()) { 1730 if (Subtarget->hasVFP2() && 1731 FloatABIType == FloatABI::Hard) 1732 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1733 else 1734 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1735 } else 1736 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1737 case CallingConv::ARM_AAPCS_VFP: 1738 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1739 case CallingConv::ARM_AAPCS: 1740 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1741 case CallingConv::ARM_APCS: 1742 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1743 } 1744} 1745 1746bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1747 SmallVectorImpl<unsigned> &ArgRegs, 1748 SmallVectorImpl<MVT> &ArgVTs, 1749 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1750 SmallVectorImpl<unsigned> &RegArgs, 1751 CallingConv::ID CC, 1752 unsigned &NumBytes) { 1753 SmallVector<CCValAssign, 16> ArgLocs; 1754 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1755 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1756 1757 // Get a count of how many bytes are to be pushed on the stack. 1758 NumBytes = CCInfo.getNextStackOffset(); 1759 1760 // Issue CALLSEQ_START 1761 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1762 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1763 TII.get(AdjStackDown)) 1764 .addImm(NumBytes)); 1765 1766 // Process the args. 1767 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1768 CCValAssign &VA = ArgLocs[i]; 1769 unsigned Arg = ArgRegs[VA.getValNo()]; 1770 MVT ArgVT = ArgVTs[VA.getValNo()]; 1771 1772 // We don't handle NEON/vector parameters yet. 1773 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1774 return false; 1775 1776 // Handle arg promotion, etc. 1777 switch (VA.getLocInfo()) { 1778 case CCValAssign::Full: break; 1779 case CCValAssign::SExt: { 1780 EVT DestVT = VA.getLocVT(); 1781 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1782 /*isZExt*/false); 1783 assert (ResultReg != 0 && "Failed to emit a sext"); 1784 Arg = ResultReg; 1785 break; 1786 } 1787 case CCValAssign::AExt: 1788 // Intentional fall-through. Handle AExt and ZExt. 1789 case CCValAssign::ZExt: { 1790 EVT DestVT = VA.getLocVT(); 1791 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1792 /*isZExt*/true); 1793 assert (ResultReg != 0 && "Failed to emit a sext"); 1794 Arg = ResultReg; 1795 break; 1796 } 1797 case CCValAssign::BCvt: { 1798 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1799 /*TODO: Kill=*/false); 1800 assert(BC != 0 && "Failed to emit a bitcast!"); 1801 Arg = BC; 1802 ArgVT = VA.getLocVT(); 1803 break; 1804 } 1805 default: llvm_unreachable("Unknown arg promotion!"); 1806 } 1807 1808 // Now copy/store arg to correct locations. 1809 if (VA.isRegLoc() && !VA.needsCustom()) { 1810 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1811 VA.getLocReg()) 1812 .addReg(Arg); 1813 RegArgs.push_back(VA.getLocReg()); 1814 } else if (VA.needsCustom()) { 1815 // TODO: We need custom lowering for vector (v2f64) args. 1816 if (VA.getLocVT() != MVT::f64) return false; 1817 1818 CCValAssign &NextVA = ArgLocs[++i]; 1819 1820 // TODO: Only handle register args for now. 1821 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1822 1823 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1824 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1825 .addReg(NextVA.getLocReg(), RegState::Define) 1826 .addReg(Arg)); 1827 RegArgs.push_back(VA.getLocReg()); 1828 RegArgs.push_back(NextVA.getLocReg()); 1829 } else { 1830 assert(VA.isMemLoc()); 1831 // Need to store on the stack. 1832 Address Addr; 1833 Addr.BaseType = Address::RegBase; 1834 Addr.Base.Reg = ARM::SP; 1835 Addr.Offset = VA.getLocMemOffset(); 1836 1837 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1838 } 1839 } 1840 return true; 1841} 1842 1843bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1844 const Instruction *I, CallingConv::ID CC, 1845 unsigned &NumBytes) { 1846 // Issue CALLSEQ_END 1847 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1848 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1849 TII.get(AdjStackUp)) 1850 .addImm(NumBytes).addImm(0)); 1851 1852 // Now the return value. 1853 if (RetVT != MVT::isVoid) { 1854 SmallVector<CCValAssign, 16> RVLocs; 1855 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1856 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1857 1858 // Copy all of the result registers out of their specified physreg. 1859 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1860 // For this move we copy into two registers and then move into the 1861 // double fp reg we want. 1862 EVT DestVT = RVLocs[0].getValVT(); 1863 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1864 unsigned ResultReg = createResultReg(DstRC); 1865 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1866 TII.get(ARM::VMOVDRR), ResultReg) 1867 .addReg(RVLocs[0].getLocReg()) 1868 .addReg(RVLocs[1].getLocReg())); 1869 1870 UsedRegs.push_back(RVLocs[0].getLocReg()); 1871 UsedRegs.push_back(RVLocs[1].getLocReg()); 1872 1873 // Finally update the result. 1874 UpdateValueMap(I, ResultReg); 1875 } else { 1876 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1877 EVT CopyVT = RVLocs[0].getValVT(); 1878 1879 // Special handling for extended integers. 1880 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 1881 CopyVT = MVT::i32; 1882 1883 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1884 1885 unsigned ResultReg = createResultReg(DstRC); 1886 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1887 ResultReg).addReg(RVLocs[0].getLocReg()); 1888 UsedRegs.push_back(RVLocs[0].getLocReg()); 1889 1890 // Finally update the result. 1891 UpdateValueMap(I, ResultReg); 1892 } 1893 } 1894 1895 return true; 1896} 1897 1898bool ARMFastISel::SelectRet(const Instruction *I) { 1899 const ReturnInst *Ret = cast<ReturnInst>(I); 1900 const Function &F = *I->getParent()->getParent(); 1901 1902 if (!FuncInfo.CanLowerReturn) 1903 return false; 1904 1905 if (F.isVarArg()) 1906 return false; 1907 1908 CallingConv::ID CC = F.getCallingConv(); 1909 if (Ret->getNumOperands() > 0) { 1910 SmallVector<ISD::OutputArg, 4> Outs; 1911 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1912 Outs, TLI); 1913 1914 // Analyze operands of the call, assigning locations to each operand. 1915 SmallVector<CCValAssign, 16> ValLocs; 1916 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 1917 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1918 1919 const Value *RV = Ret->getOperand(0); 1920 unsigned Reg = getRegForValue(RV); 1921 if (Reg == 0) 1922 return false; 1923 1924 // Only handle a single return value for now. 1925 if (ValLocs.size() != 1) 1926 return false; 1927 1928 CCValAssign &VA = ValLocs[0]; 1929 1930 // Don't bother handling odd stuff for now. 1931 if (VA.getLocInfo() != CCValAssign::Full) 1932 return false; 1933 // Only handle register returns for now. 1934 if (!VA.isRegLoc()) 1935 return false; 1936 1937 unsigned SrcReg = Reg + VA.getValNo(); 1938 EVT RVVT = TLI.getValueType(RV->getType()); 1939 EVT DestVT = VA.getValVT(); 1940 // Special handling for extended integers. 1941 if (RVVT != DestVT) { 1942 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 1943 return false; 1944 1945 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt()) 1946 return false; 1947 1948 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 1949 1950 bool isZExt = Outs[0].Flags.isZExt(); 1951 unsigned ResultReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, isZExt); 1952 if (ResultReg == 0) return false; 1953 SrcReg = ResultReg; 1954 } 1955 1956 // Make the copy. 1957 unsigned DstReg = VA.getLocReg(); 1958 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1959 // Avoid a cross-class copy. This is very unlikely. 1960 if (!SrcRC->contains(DstReg)) 1961 return false; 1962 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1963 DstReg).addReg(SrcReg); 1964 1965 // Mark the register as live out of the function. 1966 MRI.addLiveOut(VA.getLocReg()); 1967 } 1968 1969 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 1970 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1971 TII.get(RetOpc))); 1972 return true; 1973} 1974 1975unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 1976 1977 // Darwin needs the r9 versions of the opcodes. 1978 bool isDarwin = Subtarget->isTargetDarwin(); 1979 if (isThumb2) { 1980 return isDarwin ? ARM::tBLr9 : ARM::tBL; 1981 } else { 1982 return isDarwin ? ARM::BLr9 : ARM::BL; 1983 } 1984} 1985 1986// A quick function that will emit a call for a named libcall in F with the 1987// vector of passed arguments for the Instruction in I. We can assume that we 1988// can emit a call for any libcall we can produce. This is an abridged version 1989// of the full call infrastructure since we won't need to worry about things 1990// like computed function pointers or strange arguments at call sites. 1991// TODO: Try to unify this and the normal call bits for ARM, then try to unify 1992// with X86. 1993bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 1994 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 1995 1996 // Handle *simple* calls for now. 1997 Type *RetTy = I->getType(); 1998 MVT RetVT; 1999 if (RetTy->isVoidTy()) 2000 RetVT = MVT::isVoid; 2001 else if (!isTypeLegal(RetTy, RetVT)) 2002 return false; 2003 2004 // TODO: For now if we have long calls specified we don't handle the call. 2005 if (EnableARMLongCalls) return false; 2006 2007 // Set up the argument vectors. 2008 SmallVector<Value*, 8> Args; 2009 SmallVector<unsigned, 8> ArgRegs; 2010 SmallVector<MVT, 8> ArgVTs; 2011 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2012 Args.reserve(I->getNumOperands()); 2013 ArgRegs.reserve(I->getNumOperands()); 2014 ArgVTs.reserve(I->getNumOperands()); 2015 ArgFlags.reserve(I->getNumOperands()); 2016 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2017 Value *Op = I->getOperand(i); 2018 unsigned Arg = getRegForValue(Op); 2019 if (Arg == 0) return false; 2020 2021 Type *ArgTy = Op->getType(); 2022 MVT ArgVT; 2023 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2024 2025 ISD::ArgFlagsTy Flags; 2026 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2027 Flags.setOrigAlign(OriginalAlignment); 2028 2029 Args.push_back(Op); 2030 ArgRegs.push_back(Arg); 2031 ArgVTs.push_back(ArgVT); 2032 ArgFlags.push_back(Flags); 2033 } 2034 2035 // Handle the arguments now that we've gotten them. 2036 SmallVector<unsigned, 4> RegArgs; 2037 unsigned NumBytes; 2038 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2039 return false; 2040 2041 // Issue the call, BLr9 for darwin, BL otherwise. 2042 // TODO: Turn this into the table of arm call ops. 2043 MachineInstrBuilder MIB; 2044 unsigned CallOpc = ARMSelectCallOp(NULL); 2045 if(isThumb2) 2046 // Explicitly adding the predicate here. 2047 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2048 TII.get(CallOpc))) 2049 .addExternalSymbol(TLI.getLibcallName(Call)); 2050 else 2051 // Explicitly adding the predicate here. 2052 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2053 TII.get(CallOpc)) 2054 .addExternalSymbol(TLI.getLibcallName(Call))); 2055 2056 // Add implicit physical register uses to the call. 2057 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2058 MIB.addReg(RegArgs[i]); 2059 2060 // Finish off the call including any return values. 2061 SmallVector<unsigned, 4> UsedRegs; 2062 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2063 2064 // Set all unused physreg defs as dead. 2065 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2066 2067 return true; 2068} 2069 2070bool ARMFastISel::SelectCall(const Instruction *I, 2071 const char *IntrMemName = 0) { 2072 const CallInst *CI = cast<CallInst>(I); 2073 const Value *Callee = CI->getCalledValue(); 2074 2075 // Can't handle inline asm. 2076 if (isa<InlineAsm>(Callee)) return false; 2077 2078 // Only handle global variable Callees. 2079 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2080 if (!GV) 2081 return false; 2082 2083 // Check the calling convention. 2084 ImmutableCallSite CS(CI); 2085 CallingConv::ID CC = CS.getCallingConv(); 2086 2087 // TODO: Avoid some calling conventions? 2088 2089 // Let SDISel handle vararg functions. 2090 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2091 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2092 if (FTy->isVarArg()) 2093 return false; 2094 2095 // Handle *simple* calls for now. 2096 Type *RetTy = I->getType(); 2097 MVT RetVT; 2098 if (RetTy->isVoidTy()) 2099 RetVT = MVT::isVoid; 2100 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2101 RetVT != MVT::i8 && RetVT != MVT::i1) 2102 return false; 2103 2104 // TODO: For now if we have long calls specified we don't handle the call. 2105 if (EnableARMLongCalls) return false; 2106 2107 // Set up the argument vectors. 2108 SmallVector<Value*, 8> Args; 2109 SmallVector<unsigned, 8> ArgRegs; 2110 SmallVector<MVT, 8> ArgVTs; 2111 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2112 Args.reserve(CS.arg_size()); 2113 ArgRegs.reserve(CS.arg_size()); 2114 ArgVTs.reserve(CS.arg_size()); 2115 ArgFlags.reserve(CS.arg_size()); 2116 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2117 i != e; ++i) { 2118 // If we're lowering a memory intrinsic instead of a regular call, skip the 2119 // last two arguments, which shouldn't be passed to the underlying function. 2120 if (IntrMemName && e-i <= 2) 2121 break; 2122 2123 unsigned Arg = getRegForValue(*i); 2124 if (Arg == 0) 2125 return false; 2126 ISD::ArgFlagsTy Flags; 2127 unsigned AttrInd = i - CS.arg_begin() + 1; 2128 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2129 Flags.setSExt(); 2130 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2131 Flags.setZExt(); 2132 2133 // FIXME: Only handle *easy* calls for now. 2134 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2135 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2136 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2137 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2138 return false; 2139 2140 Type *ArgTy = (*i)->getType(); 2141 MVT ArgVT; 2142 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2143 ArgVT != MVT::i1) 2144 return false; 2145 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2146 Flags.setOrigAlign(OriginalAlignment); 2147 2148 Args.push_back(*i); 2149 ArgRegs.push_back(Arg); 2150 ArgVTs.push_back(ArgVT); 2151 ArgFlags.push_back(Flags); 2152 } 2153 2154 // Handle the arguments now that we've gotten them. 2155 SmallVector<unsigned, 4> RegArgs; 2156 unsigned NumBytes; 2157 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2158 return false; 2159 2160 // Issue the call, BLr9 for darwin, BL otherwise. 2161 // TODO: Turn this into the table of arm call ops. 2162 MachineInstrBuilder MIB; 2163 unsigned CallOpc = ARMSelectCallOp(GV); 2164 // Explicitly adding the predicate here. 2165 if(isThumb2) { 2166 // Explicitly adding the predicate here. 2167 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2168 TII.get(CallOpc))); 2169 if (!IntrMemName) 2170 MIB.addGlobalAddress(GV, 0, 0); 2171 else 2172 MIB.addExternalSymbol(IntrMemName, 0); 2173 } else { 2174 if (!IntrMemName) 2175 // Explicitly adding the predicate here. 2176 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2177 TII.get(CallOpc)) 2178 .addGlobalAddress(GV, 0, 0)); 2179 else 2180 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2181 TII.get(CallOpc)) 2182 .addExternalSymbol(IntrMemName, 0)); 2183 } 2184 2185 // Add implicit physical register uses to the call. 2186 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2187 MIB.addReg(RegArgs[i]); 2188 2189 // Finish off the call including any return values. 2190 SmallVector<unsigned, 4> UsedRegs; 2191 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2192 2193 // Set all unused physreg defs as dead. 2194 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2195 2196 return true; 2197} 2198 2199bool ARMFastISel::ARMIsMemXferSmall(uint64_t Len) { 2200 return Len <= 16; 2201} 2202 2203bool ARMFastISel::ARMTryEmitSmallMemXfer(Address Dest, Address Src, uint64_t Len, 2204 bool isMemCpy) { 2205 // FIXME: Memmove's require a little more care because their source and 2206 // destination may overlap. 2207 if (!isMemCpy) 2208 return false; 2209 2210 // Make sure we don't bloat code by inlining very large memcpy's. 2211 if (!ARMIsMemXferSmall(Len)) 2212 return false; 2213 2214 // We don't care about alignment here since we just emit integer accesses. 2215 while (Len) { 2216 MVT VT; 2217 if (Len >= 4) 2218 VT = MVT::i32; 2219 else if (Len >= 2) 2220 VT = MVT::i16; 2221 else { 2222 assert(Len == 1); 2223 VT = MVT::i8; 2224 } 2225 2226 bool RV; 2227 unsigned ResultReg; 2228 RV = ARMEmitLoad(VT, ResultReg, Src); 2229 assert (RV = true && "Should be able to handle this load."); 2230 RV = ARMEmitStore(VT, ResultReg, Dest); 2231 assert (RV = true && "Should be able to handle this store."); 2232 2233 unsigned Size = VT.getSizeInBits()/8; 2234 Len -= Size; 2235 Dest.Offset += Size; 2236 Src.Offset += Size; 2237 } 2238 2239 return true; 2240} 2241 2242bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2243 // FIXME: Handle more intrinsics. 2244 switch (I.getIntrinsicID()) { 2245 default: return false; 2246 case Intrinsic::memcpy: 2247 case Intrinsic::memmove: { 2248 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2249 // Don't handle volatile. 2250 if (MTI.isVolatile()) 2251 return false; 2252 2253 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2254 // we would emit dead code because we don't currently handle memmoves. 2255 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2256 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2257 // Small memcpy/memmove's are common enough that we want to do them 2258 // without a call if possible. 2259 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2260 if (ARMIsMemXferSmall(Len)) { 2261 Address Dest, Src; 2262 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2263 !ARMComputeAddress(MTI.getRawSource(), Src)) 2264 return false; 2265 if (ARMTryEmitSmallMemXfer(Dest, Src, Len, isMemCpy)) 2266 return true; 2267 } 2268 } 2269 2270 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2271 return false; 2272 2273 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2274 return false; 2275 2276 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2277 return SelectCall(&I, IntrMemName); 2278 } 2279 case Intrinsic::memset: { 2280 const MemSetInst &MSI = cast<MemSetInst>(I); 2281 // Don't handle volatile. 2282 if (MSI.isVolatile()) 2283 return false; 2284 2285 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2286 return false; 2287 2288 if (MSI.getDestAddressSpace() > 255) 2289 return false; 2290 2291 return SelectCall(&I, "memset"); 2292 } 2293 } 2294 return false; 2295} 2296 2297bool ARMFastISel::SelectTrunc(const Instruction *I) { 2298 // The high bits for a type smaller than the register size are assumed to be 2299 // undefined. 2300 Value *Op = I->getOperand(0); 2301 2302 EVT SrcVT, DestVT; 2303 SrcVT = TLI.getValueType(Op->getType(), true); 2304 DestVT = TLI.getValueType(I->getType(), true); 2305 2306 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2307 return false; 2308 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2309 return false; 2310 2311 unsigned SrcReg = getRegForValue(Op); 2312 if (!SrcReg) return false; 2313 2314 // Because the high bits are undefined, a truncate doesn't generate 2315 // any code. 2316 UpdateValueMap(I, SrcReg); 2317 return true; 2318} 2319 2320unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2321 bool isZExt) { 2322 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2323 return 0; 2324 2325 unsigned Opc; 2326 bool isBoolZext = false; 2327 if (!SrcVT.isSimple()) return 0; 2328 switch (SrcVT.getSimpleVT().SimpleTy) { 2329 default: return 0; 2330 case MVT::i16: 2331 if (!Subtarget->hasV6Ops()) return 0; 2332 if (isZExt) 2333 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2334 else 2335 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2336 break; 2337 case MVT::i8: 2338 if (!Subtarget->hasV6Ops()) return 0; 2339 if (isZExt) 2340 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2341 else 2342 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2343 break; 2344 case MVT::i1: 2345 if (isZExt) { 2346 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2347 isBoolZext = true; 2348 break; 2349 } 2350 return 0; 2351 } 2352 2353 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2354 MachineInstrBuilder MIB; 2355 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2356 .addReg(SrcReg); 2357 if (isBoolZext) 2358 MIB.addImm(1); 2359 else 2360 MIB.addImm(0); 2361 AddOptionalDefs(MIB); 2362 return ResultReg; 2363} 2364 2365bool ARMFastISel::SelectIntExt(const Instruction *I) { 2366 // On ARM, in general, integer casts don't involve legal types; this code 2367 // handles promotable integers. 2368 Type *DestTy = I->getType(); 2369 Value *Src = I->getOperand(0); 2370 Type *SrcTy = Src->getType(); 2371 2372 EVT SrcVT, DestVT; 2373 SrcVT = TLI.getValueType(SrcTy, true); 2374 DestVT = TLI.getValueType(DestTy, true); 2375 2376 bool isZExt = isa<ZExtInst>(I); 2377 unsigned SrcReg = getRegForValue(Src); 2378 if (!SrcReg) return false; 2379 2380 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2381 if (ResultReg == 0) return false; 2382 UpdateValueMap(I, ResultReg); 2383 return true; 2384} 2385 2386// TODO: SoftFP support. 2387bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2388 2389 switch (I->getOpcode()) { 2390 case Instruction::Load: 2391 return SelectLoad(I); 2392 case Instruction::Store: 2393 return SelectStore(I); 2394 case Instruction::Br: 2395 return SelectBranch(I); 2396 case Instruction::ICmp: 2397 case Instruction::FCmp: 2398 return SelectCmp(I); 2399 case Instruction::FPExt: 2400 return SelectFPExt(I); 2401 case Instruction::FPTrunc: 2402 return SelectFPTrunc(I); 2403 case Instruction::SIToFP: 2404 return SelectSIToFP(I); 2405 case Instruction::FPToSI: 2406 return SelectFPToSI(I); 2407 case Instruction::FAdd: 2408 return SelectBinaryOp(I, ISD::FADD); 2409 case Instruction::FSub: 2410 return SelectBinaryOp(I, ISD::FSUB); 2411 case Instruction::FMul: 2412 return SelectBinaryOp(I, ISD::FMUL); 2413 case Instruction::SDiv: 2414 return SelectSDiv(I); 2415 case Instruction::SRem: 2416 return SelectSRem(I); 2417 case Instruction::Call: 2418 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2419 return SelectIntrinsicCall(*II); 2420 return SelectCall(I); 2421 case Instruction::Select: 2422 return SelectSelect(I); 2423 case Instruction::Ret: 2424 return SelectRet(I); 2425 case Instruction::Trunc: 2426 return SelectTrunc(I); 2427 case Instruction::ZExt: 2428 case Instruction::SExt: 2429 return SelectIntExt(I); 2430 default: break; 2431 } 2432 return false; 2433} 2434 2435/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2436/// vreg is being provided by the specified load instruction. If possible, 2437/// try to fold the load as an operand to the instruction, returning true if 2438/// successful. 2439bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2440 const LoadInst *LI) { 2441 // Verify we have a legal type before going any further. 2442 MVT VT; 2443 if (!isLoadTypeLegal(LI->getType(), VT)) 2444 return false; 2445 2446 // Combine load followed by zero- or sign-extend. 2447 // ldrb r1, [r0] ldrb r1, [r0] 2448 // uxtb r2, r1 => 2449 // mov r3, r2 mov r3, r1 2450 bool isZExt = true; 2451 switch(MI->getOpcode()) { 2452 default: return false; 2453 case ARM::SXTH: 2454 case ARM::t2SXTH: 2455 isZExt = false; 2456 case ARM::UXTH: 2457 case ARM::t2UXTH: 2458 if (VT != MVT::i16) 2459 return false; 2460 break; 2461 case ARM::SXTB: 2462 case ARM::t2SXTB: 2463 isZExt = false; 2464 case ARM::UXTB: 2465 case ARM::t2UXTB: 2466 if (VT != MVT::i8) 2467 return false; 2468 break; 2469 } 2470 // See if we can handle this address. 2471 Address Addr; 2472 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2473 2474 unsigned ResultReg = MI->getOperand(0).getReg(); 2475 if (!ARMEmitLoad(VT, ResultReg, Addr, isZExt, false)) 2476 return false; 2477 MI->eraseFromParent(); 2478 return true; 2479} 2480 2481namespace llvm { 2482 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2483 // Completely untested on non-darwin. 2484 const TargetMachine &TM = funcInfo.MF->getTarget(); 2485 2486 // Darwin and thumb1 only for now. 2487 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2488 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 2489 !DisableARMFastISel) 2490 return new ARMFastISel(funcInfo); 2491 return 0; 2492 } 2493} 2494