ARMFastISel.cpp revision 8a9bce978fa4ca60d3a0ba42a1d44c41463a3c33
1//===-- ARMFastISel.cpp - ARM FastISel implementation ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the ARM-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// ARMGenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "ARM.h" 17#include "ARMBaseInstrInfo.h" 18#include "ARMCallingConv.h" 19#include "ARMRegisterInfo.h" 20#include "ARMTargetMachine.h" 21#include "ARMSubtarget.h" 22#include "ARMConstantPoolValue.h" 23#include "MCTargetDesc/ARMAddressingModes.h" 24#include "llvm/CallingConv.h" 25#include "llvm/DerivedTypes.h" 26#include "llvm/GlobalVariable.h" 27#include "llvm/Instructions.h" 28#include "llvm/IntrinsicInst.h" 29#include "llvm/Module.h" 30#include "llvm/Operator.h" 31#include "llvm/CodeGen/Analysis.h" 32#include "llvm/CodeGen/FastISel.h" 33#include "llvm/CodeGen/FunctionLoweringInfo.h" 34#include "llvm/CodeGen/MachineInstrBuilder.h" 35#include "llvm/CodeGen/MachineModuleInfo.h" 36#include "llvm/CodeGen/MachineConstantPool.h" 37#include "llvm/CodeGen/MachineFrameInfo.h" 38#include "llvm/CodeGen/MachineMemOperand.h" 39#include "llvm/CodeGen/MachineRegisterInfo.h" 40#include "llvm/Support/CallSite.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/ErrorHandling.h" 43#include "llvm/Support/GetElementPtrTypeIterator.h" 44#include "llvm/Target/TargetData.h" 45#include "llvm/Target/TargetInstrInfo.h" 46#include "llvm/Target/TargetLowering.h" 47#include "llvm/Target/TargetMachine.h" 48#include "llvm/Target/TargetOptions.h" 49using namespace llvm; 50 51static cl::opt<bool> 52DisableARMFastISel("disable-arm-fast-isel", 53 cl::desc("Turn off experimental ARM fast-isel support"), 54 cl::init(false), cl::Hidden); 55 56extern cl::opt<bool> EnableARMLongCalls; 57 58namespace { 59 60 // All possible address modes, plus some. 61 typedef struct Address { 62 enum { 63 RegBase, 64 FrameIndexBase 65 } BaseType; 66 67 union { 68 unsigned Reg; 69 int FI; 70 } Base; 71 72 int Offset; 73 74 // Innocuous defaults for our address. 75 Address() 76 : BaseType(RegBase), Offset(0) { 77 Base.Reg = 0; 78 } 79 } Address; 80 81class ARMFastISel : public FastISel { 82 83 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 84 /// make the right decision when generating code for different targets. 85 const ARMSubtarget *Subtarget; 86 const TargetMachine &TM; 87 const TargetInstrInfo &TII; 88 const TargetLowering &TLI; 89 ARMFunctionInfo *AFI; 90 91 // Convenience variables to avoid some queries. 92 bool isThumb2; 93 LLVMContext *Context; 94 95 public: 96 explicit ARMFastISel(FunctionLoweringInfo &funcInfo) 97 : FastISel(funcInfo), 98 TM(funcInfo.MF->getTarget()), 99 TII(*TM.getInstrInfo()), 100 TLI(*TM.getTargetLowering()) { 101 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 102 AFI = funcInfo.MF->getInfo<ARMFunctionInfo>(); 103 isThumb2 = AFI->isThumbFunction(); 104 Context = &funcInfo.Fn->getContext(); 105 } 106 107 // Code from FastISel.cpp. 108 virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, 109 const TargetRegisterClass *RC); 110 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, 111 const TargetRegisterClass *RC, 112 unsigned Op0, bool Op0IsKill); 113 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, 114 const TargetRegisterClass *RC, 115 unsigned Op0, bool Op0IsKill, 116 unsigned Op1, bool Op1IsKill); 117 virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, 118 const TargetRegisterClass *RC, 119 unsigned Op0, bool Op0IsKill, 120 unsigned Op1, bool Op1IsKill, 121 unsigned Op2, bool Op2IsKill); 122 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, 123 const TargetRegisterClass *RC, 124 unsigned Op0, bool Op0IsKill, 125 uint64_t Imm); 126 virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, 127 const TargetRegisterClass *RC, 128 unsigned Op0, bool Op0IsKill, 129 const ConstantFP *FPImm); 130 virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, 131 const TargetRegisterClass *RC, 132 unsigned Op0, bool Op0IsKill, 133 unsigned Op1, bool Op1IsKill, 134 uint64_t Imm); 135 virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, 136 const TargetRegisterClass *RC, 137 uint64_t Imm); 138 virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, 139 const TargetRegisterClass *RC, 140 uint64_t Imm1, uint64_t Imm2); 141 142 virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, 143 unsigned Op0, bool Op0IsKill, 144 uint32_t Idx); 145 146 // Backend specific FastISel code. 147 virtual bool TargetSelectInstruction(const Instruction *I); 148 virtual unsigned TargetMaterializeConstant(const Constant *C); 149 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); 150 virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 151 const LoadInst *LI); 152 153 #include "ARMGenFastISel.inc" 154 155 // Instruction selection routines. 156 private: 157 bool SelectLoad(const Instruction *I); 158 bool SelectStore(const Instruction *I); 159 bool SelectBranch(const Instruction *I); 160 bool SelectCmp(const Instruction *I); 161 bool SelectFPExt(const Instruction *I); 162 bool SelectFPTrunc(const Instruction *I); 163 bool SelectBinaryOp(const Instruction *I, unsigned ISDOpcode); 164 bool SelectSIToFP(const Instruction *I); 165 bool SelectFPToSI(const Instruction *I); 166 bool SelectSDiv(const Instruction *I); 167 bool SelectSRem(const Instruction *I); 168 bool SelectCall(const Instruction *I, const char *IntrMemName); 169 bool SelectIntrinsicCall(const IntrinsicInst &I); 170 bool SelectSelect(const Instruction *I); 171 bool SelectRet(const Instruction *I); 172 bool SelectTrunc(const Instruction *I); 173 bool SelectIntExt(const Instruction *I); 174 175 // Utility routines. 176 private: 177 bool isTypeLegal(Type *Ty, MVT &VT); 178 bool isLoadTypeLegal(Type *Ty, MVT &VT); 179 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 180 bool isZExt); 181 bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, unsigned Alignment = 0, 182 bool isZExt = true, bool allocReg = true); 183 184 bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 185 unsigned Alignment = 0); 186 bool ARMComputeAddress(const Value *Obj, Address &Addr); 187 void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3); 188 bool ARMIsMemCpySmall(uint64_t Len); 189 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len); 190 unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt); 191 unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT); 192 unsigned ARMMaterializeInt(const Constant *C, EVT VT); 193 unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT); 194 unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); 195 unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); 196 unsigned ARMSelectCallOp(const GlobalValue *GV); 197 198 // Call handling routines. 199 private: 200 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool Return); 201 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args, 202 SmallVectorImpl<unsigned> &ArgRegs, 203 SmallVectorImpl<MVT> &ArgVTs, 204 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 205 SmallVectorImpl<unsigned> &RegArgs, 206 CallingConv::ID CC, 207 unsigned &NumBytes); 208 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 209 const Instruction *I, CallingConv::ID CC, 210 unsigned &NumBytes); 211 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call); 212 213 // OptionalDef handling routines. 214 private: 215 bool isARMNEONPred(const MachineInstr *MI); 216 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR); 217 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB); 218 void AddLoadStoreOperands(EVT VT, Address &Addr, 219 const MachineInstrBuilder &MIB, 220 unsigned Flags, bool useAM3); 221}; 222 223} // end anonymous namespace 224 225#include "ARMGenCallingConv.inc" 226 227// DefinesOptionalPredicate - This is different from DefinesPredicate in that 228// we don't care about implicit defs here, just places we'll need to add a 229// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR. 230bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) { 231 if (!MI->hasOptionalDef()) 232 return false; 233 234 // Look to see if our OptionalDef is defining CPSR or CCR. 235 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 236 const MachineOperand &MO = MI->getOperand(i); 237 if (!MO.isReg() || !MO.isDef()) continue; 238 if (MO.getReg() == ARM::CPSR) 239 *CPSR = true; 240 } 241 return true; 242} 243 244bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) { 245 const MCInstrDesc &MCID = MI->getDesc(); 246 247 // If we're a thumb2 or not NEON function we were handled via isPredicable. 248 if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON || 249 AFI->isThumb2Function()) 250 return false; 251 252 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 253 if (MCID.OpInfo[i].isPredicate()) 254 return true; 255 256 return false; 257} 258 259// If the machine is predicable go ahead and add the predicate operands, if 260// it needs default CC operands add those. 261// TODO: If we want to support thumb1 then we'll need to deal with optional 262// CPSR defs that need to be added before the remaining operands. See s_cc_out 263// for descriptions why. 264const MachineInstrBuilder & 265ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) { 266 MachineInstr *MI = &*MIB; 267 268 // Do we use a predicate? or... 269 // Are we NEON in ARM mode and have a predicate operand? If so, I know 270 // we're not predicable but add it anyways. 271 if (TII.isPredicable(MI) || isARMNEONPred(MI)) 272 AddDefaultPred(MIB); 273 274 // Do we optionally set a predicate? Preds is size > 0 iff the predicate 275 // defines CPSR. All other OptionalDefines in ARM are the CCR register. 276 bool CPSR = false; 277 if (DefinesOptionalPredicate(MI, &CPSR)) { 278 if (CPSR) 279 AddDefaultT1CC(MIB); 280 else 281 AddDefaultCC(MIB); 282 } 283 return MIB; 284} 285 286unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode, 287 const TargetRegisterClass* RC) { 288 unsigned ResultReg = createResultReg(RC); 289 const MCInstrDesc &II = TII.get(MachineInstOpcode); 290 291 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)); 292 return ResultReg; 293} 294 295unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode, 296 const TargetRegisterClass *RC, 297 unsigned Op0, bool Op0IsKill) { 298 unsigned ResultReg = createResultReg(RC); 299 const MCInstrDesc &II = TII.get(MachineInstOpcode); 300 301 if (II.getNumDefs() >= 1) 302 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 303 .addReg(Op0, Op0IsKill * RegState::Kill)); 304 else { 305 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 306 .addReg(Op0, Op0IsKill * RegState::Kill)); 307 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 308 TII.get(TargetOpcode::COPY), ResultReg) 309 .addReg(II.ImplicitDefs[0])); 310 } 311 return ResultReg; 312} 313 314unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode, 315 const TargetRegisterClass *RC, 316 unsigned Op0, bool Op0IsKill, 317 unsigned Op1, bool Op1IsKill) { 318 unsigned ResultReg = createResultReg(RC); 319 const MCInstrDesc &II = TII.get(MachineInstOpcode); 320 321 if (II.getNumDefs() >= 1) 322 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 323 .addReg(Op0, Op0IsKill * RegState::Kill) 324 .addReg(Op1, Op1IsKill * RegState::Kill)); 325 else { 326 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 327 .addReg(Op0, Op0IsKill * RegState::Kill) 328 .addReg(Op1, Op1IsKill * RegState::Kill)); 329 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 330 TII.get(TargetOpcode::COPY), ResultReg) 331 .addReg(II.ImplicitDefs[0])); 332 } 333 return ResultReg; 334} 335 336unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode, 337 const TargetRegisterClass *RC, 338 unsigned Op0, bool Op0IsKill, 339 unsigned Op1, bool Op1IsKill, 340 unsigned Op2, bool Op2IsKill) { 341 unsigned ResultReg = createResultReg(RC); 342 const MCInstrDesc &II = TII.get(MachineInstOpcode); 343 344 if (II.getNumDefs() >= 1) 345 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 346 .addReg(Op0, Op0IsKill * RegState::Kill) 347 .addReg(Op1, Op1IsKill * RegState::Kill) 348 .addReg(Op2, Op2IsKill * RegState::Kill)); 349 else { 350 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 351 .addReg(Op0, Op0IsKill * RegState::Kill) 352 .addReg(Op1, Op1IsKill * RegState::Kill) 353 .addReg(Op2, Op2IsKill * RegState::Kill)); 354 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 355 TII.get(TargetOpcode::COPY), ResultReg) 356 .addReg(II.ImplicitDefs[0])); 357 } 358 return ResultReg; 359} 360 361unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode, 362 const TargetRegisterClass *RC, 363 unsigned Op0, bool Op0IsKill, 364 uint64_t Imm) { 365 unsigned ResultReg = createResultReg(RC); 366 const MCInstrDesc &II = TII.get(MachineInstOpcode); 367 368 if (II.getNumDefs() >= 1) 369 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 370 .addReg(Op0, Op0IsKill * RegState::Kill) 371 .addImm(Imm)); 372 else { 373 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 374 .addReg(Op0, Op0IsKill * RegState::Kill) 375 .addImm(Imm)); 376 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 377 TII.get(TargetOpcode::COPY), ResultReg) 378 .addReg(II.ImplicitDefs[0])); 379 } 380 return ResultReg; 381} 382 383unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode, 384 const TargetRegisterClass *RC, 385 unsigned Op0, bool Op0IsKill, 386 const ConstantFP *FPImm) { 387 unsigned ResultReg = createResultReg(RC); 388 const MCInstrDesc &II = TII.get(MachineInstOpcode); 389 390 if (II.getNumDefs() >= 1) 391 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 392 .addReg(Op0, Op0IsKill * RegState::Kill) 393 .addFPImm(FPImm)); 394 else { 395 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 396 .addReg(Op0, Op0IsKill * RegState::Kill) 397 .addFPImm(FPImm)); 398 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 399 TII.get(TargetOpcode::COPY), ResultReg) 400 .addReg(II.ImplicitDefs[0])); 401 } 402 return ResultReg; 403} 404 405unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode, 406 const TargetRegisterClass *RC, 407 unsigned Op0, bool Op0IsKill, 408 unsigned Op1, bool Op1IsKill, 409 uint64_t Imm) { 410 unsigned ResultReg = createResultReg(RC); 411 const MCInstrDesc &II = TII.get(MachineInstOpcode); 412 413 if (II.getNumDefs() >= 1) 414 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 415 .addReg(Op0, Op0IsKill * RegState::Kill) 416 .addReg(Op1, Op1IsKill * RegState::Kill) 417 .addImm(Imm)); 418 else { 419 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 420 .addReg(Op0, Op0IsKill * RegState::Kill) 421 .addReg(Op1, Op1IsKill * RegState::Kill) 422 .addImm(Imm)); 423 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 424 TII.get(TargetOpcode::COPY), ResultReg) 425 .addReg(II.ImplicitDefs[0])); 426 } 427 return ResultReg; 428} 429 430unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode, 431 const TargetRegisterClass *RC, 432 uint64_t Imm) { 433 unsigned ResultReg = createResultReg(RC); 434 const MCInstrDesc &II = TII.get(MachineInstOpcode); 435 436 if (II.getNumDefs() >= 1) 437 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 438 .addImm(Imm)); 439 else { 440 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 441 .addImm(Imm)); 442 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 443 TII.get(TargetOpcode::COPY), ResultReg) 444 .addReg(II.ImplicitDefs[0])); 445 } 446 return ResultReg; 447} 448 449unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode, 450 const TargetRegisterClass *RC, 451 uint64_t Imm1, uint64_t Imm2) { 452 unsigned ResultReg = createResultReg(RC); 453 const MCInstrDesc &II = TII.get(MachineInstOpcode); 454 455 if (II.getNumDefs() >= 1) 456 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) 457 .addImm(Imm1).addImm(Imm2)); 458 else { 459 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 460 .addImm(Imm1).addImm(Imm2)); 461 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 462 TII.get(TargetOpcode::COPY), 463 ResultReg) 464 .addReg(II.ImplicitDefs[0])); 465 } 466 return ResultReg; 467} 468 469unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT, 470 unsigned Op0, bool Op0IsKill, 471 uint32_t Idx) { 472 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 473 assert(TargetRegisterInfo::isVirtualRegister(Op0) && 474 "Cannot yet extract from physregs"); 475 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, 476 DL, TII.get(TargetOpcode::COPY), ResultReg) 477 .addReg(Op0, getKillRegState(Op0IsKill), Idx)); 478 return ResultReg; 479} 480 481// TODO: Don't worry about 64-bit now, but when this is fixed remove the 482// checks from the various callers. 483unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) { 484 if (VT == MVT::f64) return 0; 485 486 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 487 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 488 TII.get(ARM::VMOVRS), MoveReg) 489 .addReg(SrcReg)); 490 return MoveReg; 491} 492 493unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) { 494 if (VT == MVT::i64) return 0; 495 496 unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT)); 497 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 498 TII.get(ARM::VMOVSR), MoveReg) 499 .addReg(SrcReg)); 500 return MoveReg; 501} 502 503// For double width floating point we need to materialize two constants 504// (the high and the low) into integer registers then use a move to get 505// the combined constant into an FP reg. 506unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) { 507 const APFloat Val = CFP->getValueAPF(); 508 bool is64bit = VT == MVT::f64; 509 510 // This checks to see if we can use VFP3 instructions to materialize 511 // a constant, otherwise we have to go through the constant pool. 512 if (TLI.isFPImmLegal(Val, VT)) { 513 int Imm; 514 unsigned Opc; 515 if (is64bit) { 516 Imm = ARM_AM::getFP64Imm(Val); 517 Opc = ARM::FCONSTD; 518 } else { 519 Imm = ARM_AM::getFP32Imm(Val); 520 Opc = ARM::FCONSTS; 521 } 522 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 523 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 524 DestReg) 525 .addImm(Imm)); 526 return DestReg; 527 } 528 529 // Require VFP2 for loading fp constants. 530 if (!Subtarget->hasVFP2()) return false; 531 532 // MachineConstantPool wants an explicit alignment. 533 unsigned Align = TD.getPrefTypeAlignment(CFP->getType()); 534 if (Align == 0) { 535 // TODO: Figure out if this is correct. 536 Align = TD.getTypeAllocSize(CFP->getType()); 537 } 538 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 539 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 540 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS; 541 542 // The extra reg is for addrmode5. 543 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 544 DestReg) 545 .addConstantPoolIndex(Idx) 546 .addReg(0)); 547 return DestReg; 548} 549 550unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) { 551 552 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 553 return false; 554 555 // If we can do this in a single instruction without a constant pool entry 556 // do so now. 557 const ConstantInt *CI = cast<ConstantInt>(C); 558 if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { 559 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; 560 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 561 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 562 TII.get(Opc), ImmReg) 563 .addImm(CI->getZExtValue())); 564 return ImmReg; 565 } 566 567 // Use MVN to emit negative constants. 568 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) { 569 unsigned Imm = (unsigned)~(CI->getSExtValue()); 570 bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 571 (ARM_AM::getSOImmVal(Imm) != -1); 572 if (UseImm) { 573 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi; 574 unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 575 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 576 TII.get(Opc), ImmReg) 577 .addImm(Imm)); 578 return ImmReg; 579 } 580 } 581 582 // Load from constant pool. For now 32-bit only. 583 if (VT != MVT::i32) 584 return false; 585 586 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 587 588 // MachineConstantPool wants an explicit alignment. 589 unsigned Align = TD.getPrefTypeAlignment(C->getType()); 590 if (Align == 0) { 591 // TODO: Figure out if this is correct. 592 Align = TD.getTypeAllocSize(C->getType()); 593 } 594 unsigned Idx = MCP.getConstantPoolIndex(C, Align); 595 596 if (isThumb2) 597 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 598 TII.get(ARM::t2LDRpci), DestReg) 599 .addConstantPoolIndex(Idx)); 600 else 601 // The extra immediate is for addrmode2. 602 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 603 TII.get(ARM::LDRcp), DestReg) 604 .addConstantPoolIndex(Idx) 605 .addImm(0)); 606 607 return DestReg; 608} 609 610unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { 611 // For now 32-bit only. 612 if (VT != MVT::i32) return 0; 613 614 Reloc::Model RelocM = TM.getRelocationModel(); 615 616 // TODO: Need more magic for ARM PIC. 617 if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; 618 619 // MachineConstantPool wants an explicit alignment. 620 unsigned Align = TD.getPrefTypeAlignment(GV->getType()); 621 if (Align == 0) { 622 // TODO: Figure out if this is correct. 623 Align = TD.getTypeAllocSize(GV->getType()); 624 } 625 626 // Grab index. 627 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); 628 unsigned Id = AFI->createPICLabelUId(); 629 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id, 630 ARMCP::CPValue, 631 PCAdj); 632 unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); 633 634 // Load value. 635 MachineInstrBuilder MIB; 636 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); 637 if (isThumb2) { 638 unsigned Opc = (RelocM != Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic; 639 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg) 640 .addConstantPoolIndex(Idx); 641 if (RelocM == Reloc::PIC_) 642 MIB.addImm(Id); 643 } else { 644 // The extra immediate is for addrmode2. 645 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), 646 DestReg) 647 .addConstantPoolIndex(Idx) 648 .addImm(0); 649 } 650 AddOptionalDefs(MIB); 651 652 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { 653 unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); 654 if (isThumb2) 655 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 656 TII.get(ARM::t2LDRi12), NewDestReg) 657 .addReg(DestReg) 658 .addImm(0); 659 else 660 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12), 661 NewDestReg) 662 .addReg(DestReg) 663 .addImm(0); 664 DestReg = NewDestReg; 665 AddOptionalDefs(MIB); 666 } 667 668 return DestReg; 669} 670 671unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) { 672 EVT VT = TLI.getValueType(C->getType(), true); 673 674 // Only handle simple types. 675 if (!VT.isSimple()) return 0; 676 677 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 678 return ARMMaterializeFP(CFP, VT); 679 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 680 return ARMMaterializeGV(GV, VT); 681 else if (isa<ConstantInt>(C)) 682 return ARMMaterializeInt(C, VT); 683 684 return 0; 685} 686 687// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF); 688 689unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 690 // Don't handle dynamic allocas. 691 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0; 692 693 MVT VT; 694 if (!isLoadTypeLegal(AI->getType(), VT)) return false; 695 696 DenseMap<const AllocaInst*, int>::iterator SI = 697 FuncInfo.StaticAllocaMap.find(AI); 698 699 // This will get lowered later into the correct offsets and registers 700 // via rewriteXFrameIndex. 701 if (SI != FuncInfo.StaticAllocaMap.end()) { 702 TargetRegisterClass* RC = TLI.getRegClassFor(VT); 703 unsigned ResultReg = createResultReg(RC); 704 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 705 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 706 TII.get(Opc), ResultReg) 707 .addFrameIndex(SI->second) 708 .addImm(0)); 709 return ResultReg; 710 } 711 712 return 0; 713} 714 715bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) { 716 EVT evt = TLI.getValueType(Ty, true); 717 718 // Only handle simple types. 719 if (evt == MVT::Other || !evt.isSimple()) return false; 720 VT = evt.getSimpleVT(); 721 722 // Handle all legal types, i.e. a register that will directly hold this 723 // value. 724 return TLI.isTypeLegal(VT); 725} 726 727bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 728 if (isTypeLegal(Ty, VT)) return true; 729 730 // If this is a type than can be sign or zero-extended to a basic operation 731 // go ahead and accept it now. 732 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 733 return true; 734 735 return false; 736} 737 738// Computes the address to get to an object. 739bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { 740 // Some boilerplate from the X86 FastISel. 741 const User *U = NULL; 742 unsigned Opcode = Instruction::UserOp1; 743 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 744 // Don't walk into other basic blocks unless the object is an alloca from 745 // another block, otherwise it may not have a virtual register assigned. 746 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 747 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 748 Opcode = I->getOpcode(); 749 U = I; 750 } 751 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 752 Opcode = C->getOpcode(); 753 U = C; 754 } 755 756 if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 757 if (Ty->getAddressSpace() > 255) 758 // Fast instruction selection doesn't support the special 759 // address spaces. 760 return false; 761 762 switch (Opcode) { 763 default: 764 break; 765 case Instruction::BitCast: { 766 // Look through bitcasts. 767 return ARMComputeAddress(U->getOperand(0), Addr); 768 } 769 case Instruction::IntToPtr: { 770 // Look past no-op inttoptrs. 771 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 772 return ARMComputeAddress(U->getOperand(0), Addr); 773 break; 774 } 775 case Instruction::PtrToInt: { 776 // Look past no-op ptrtoints. 777 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 778 return ARMComputeAddress(U->getOperand(0), Addr); 779 break; 780 } 781 case Instruction::GetElementPtr: { 782 Address SavedAddr = Addr; 783 int TmpOffset = Addr.Offset; 784 785 // Iterate through the GEP folding the constants into offsets where 786 // we can. 787 gep_type_iterator GTI = gep_type_begin(U); 788 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); 789 i != e; ++i, ++GTI) { 790 const Value *Op = *i; 791 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 792 const StructLayout *SL = TD.getStructLayout(STy); 793 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 794 TmpOffset += SL->getElementOffset(Idx); 795 } else { 796 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType()); 797 for (;;) { 798 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 799 // Constant-offset addressing. 800 TmpOffset += CI->getSExtValue() * S; 801 break; 802 } 803 if (isa<AddOperator>(Op) && 804 (!isa<Instruction>(Op) || 805 FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()] 806 == FuncInfo.MBB) && 807 isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) { 808 // An add (in the same block) with a constant operand. Fold the 809 // constant. 810 ConstantInt *CI = 811 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 812 TmpOffset += CI->getSExtValue() * S; 813 // Iterate on the other operand. 814 Op = cast<AddOperator>(Op)->getOperand(0); 815 continue; 816 } 817 // Unsupported 818 goto unsupported_gep; 819 } 820 } 821 } 822 823 // Try to grab the base operand now. 824 Addr.Offset = TmpOffset; 825 if (ARMComputeAddress(U->getOperand(0), Addr)) return true; 826 827 // We failed, restore everything and try the other options. 828 Addr = SavedAddr; 829 830 unsupported_gep: 831 break; 832 } 833 case Instruction::Alloca: { 834 const AllocaInst *AI = cast<AllocaInst>(Obj); 835 DenseMap<const AllocaInst*, int>::iterator SI = 836 FuncInfo.StaticAllocaMap.find(AI); 837 if (SI != FuncInfo.StaticAllocaMap.end()) { 838 Addr.BaseType = Address::FrameIndexBase; 839 Addr.Base.FI = SI->second; 840 return true; 841 } 842 break; 843 } 844 } 845 846 // Materialize the global variable's address into a reg which can 847 // then be used later to load the variable. 848 if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) { 849 unsigned Tmp = ARMMaterializeGV(GV, TLI.getValueType(Obj->getType())); 850 if (Tmp == 0) return false; 851 852 Addr.Base.Reg = Tmp; 853 return true; 854 } 855 856 // Try to get this in a register if nothing else has worked. 857 if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj); 858 return Addr.Base.Reg != 0; 859} 860 861void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) { 862 863 assert(VT.isSimple() && "Non-simple types are invalid here!"); 864 865 bool needsLowering = false; 866 switch (VT.getSimpleVT().SimpleTy) { 867 default: 868 assert(false && "Unhandled load/store type!"); 869 break; 870 case MVT::i1: 871 case MVT::i8: 872 case MVT::i16: 873 case MVT::i32: 874 if (!useAM3) { 875 // Integer loads/stores handle 12-bit offsets. 876 needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset); 877 // Handle negative offsets. 878 if (needsLowering && isThumb2) 879 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 && 880 Addr.Offset > -256); 881 } else { 882 // ARM halfword load/stores and signed byte loads use +/-imm8 offsets. 883 needsLowering = (Addr.Offset > 255 || Addr.Offset < -255); 884 } 885 break; 886 case MVT::f32: 887 case MVT::f64: 888 // Floating point operands handle 8-bit offsets. 889 needsLowering = ((Addr.Offset & 0xff) != Addr.Offset); 890 break; 891 } 892 893 // If this is a stack pointer and the offset needs to be simplified then 894 // put the alloca address into a register, set the base type back to 895 // register and continue. This should almost never happen. 896 if (needsLowering && Addr.BaseType == Address::FrameIndexBase) { 897 TargetRegisterClass *RC = isThumb2 ? ARM::tGPRRegisterClass : 898 ARM::GPRRegisterClass; 899 unsigned ResultReg = createResultReg(RC); 900 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri; 901 AddOptionalDefs(BuildMI(*FuncInfo.MBB, *FuncInfo.InsertPt, DL, 902 TII.get(Opc), ResultReg) 903 .addFrameIndex(Addr.Base.FI) 904 .addImm(0)); 905 Addr.Base.Reg = ResultReg; 906 Addr.BaseType = Address::RegBase; 907 } 908 909 // Since the offset is too large for the load/store instruction 910 // get the reg+offset into a register. 911 if (needsLowering) { 912 Addr.Base.Reg = FastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg, 913 /*Op0IsKill*/false, Addr.Offset, MVT::i32); 914 Addr.Offset = 0; 915 } 916} 917 918void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr, 919 const MachineInstrBuilder &MIB, 920 unsigned Flags, bool useAM3) { 921 // addrmode5 output depends on the selection dag addressing dividing the 922 // offset by 4 that it then later multiplies. Do this here as well. 923 if (VT.getSimpleVT().SimpleTy == MVT::f32 || 924 VT.getSimpleVT().SimpleTy == MVT::f64) 925 Addr.Offset /= 4; 926 927 // Frame base works a bit differently. Handle it separately. 928 if (Addr.BaseType == Address::FrameIndexBase) { 929 int FI = Addr.Base.FI; 930 int Offset = Addr.Offset; 931 MachineMemOperand *MMO = 932 FuncInfo.MF->getMachineMemOperand( 933 MachinePointerInfo::getFixedStack(FI, Offset), 934 Flags, 935 MFI.getObjectSize(FI), 936 MFI.getObjectAlignment(FI)); 937 // Now add the rest of the operands. 938 MIB.addFrameIndex(FI); 939 940 // ARM halfword load/stores and signed byte loads need an additional 941 // operand. 942 if (useAM3) { 943 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 944 MIB.addReg(0); 945 MIB.addImm(Imm); 946 } else { 947 MIB.addImm(Addr.Offset); 948 } 949 MIB.addMemOperand(MMO); 950 } else { 951 // Now add the rest of the operands. 952 MIB.addReg(Addr.Base.Reg); 953 954 // ARM halfword load/stores and signed byte loads need an additional 955 // operand. 956 if (useAM3) { 957 signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset; 958 MIB.addReg(0); 959 MIB.addImm(Imm); 960 } else { 961 MIB.addImm(Addr.Offset); 962 } 963 } 964 AddOptionalDefs(MIB); 965} 966 967bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, 968 unsigned Alignment, bool isZExt, bool allocReg) { 969 assert(VT.isSimple() && "Non-simple types are invalid here!"); 970 unsigned Opc; 971 bool useAM3 = false; 972 bool needVMOV = false; 973 TargetRegisterClass *RC; 974 switch (VT.getSimpleVT().SimpleTy) { 975 // This is mostly going to be Neon/vector support. 976 default: return false; 977 case MVT::i1: 978 case MVT::i8: 979 if (isThumb2) { 980 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 981 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8; 982 else 983 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12; 984 } else { 985 if (isZExt) { 986 Opc = ARM::LDRBi12; 987 } else { 988 Opc = ARM::LDRSB; 989 useAM3 = true; 990 } 991 } 992 RC = ARM::GPRRegisterClass; 993 break; 994 case MVT::i16: 995 if (isThumb2) { 996 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 997 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; 998 else 999 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12; 1000 } else { 1001 Opc = isZExt ? ARM::LDRH : ARM::LDRSH; 1002 useAM3 = true; 1003 } 1004 RC = ARM::GPRRegisterClass; 1005 break; 1006 case MVT::i32: 1007 if (isThumb2) { 1008 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1009 Opc = ARM::t2LDRi8; 1010 else 1011 Opc = ARM::t2LDRi12; 1012 } else { 1013 Opc = ARM::LDRi12; 1014 } 1015 RC = ARM::GPRRegisterClass; 1016 break; 1017 case MVT::f32: 1018 // Unaligned loads need special handling. Floats require word-alignment. 1019 if (Alignment && Alignment < 4) { 1020 needVMOV = true; 1021 VT = MVT::i32; 1022 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12; 1023 RC = ARM::GPRRegisterClass; 1024 } else { 1025 Opc = ARM::VLDRS; 1026 RC = TLI.getRegClassFor(VT); 1027 } 1028 break; 1029 case MVT::f64: 1030 if (Alignment && Alignment < 4) { 1031 // FIXME: Unaligned loads need special handling. Doublewords require 1032 // word-alignment. 1033 return false; 1034 } 1035 Opc = ARM::VLDRD; 1036 RC = TLI.getRegClassFor(VT); 1037 break; 1038 } 1039 // Simplify this down to something we can handle. 1040 ARMSimplifyAddress(Addr, VT, useAM3); 1041 1042 // Create the base instruction, then add the operands. 1043 if (allocReg) 1044 ResultReg = createResultReg(RC); 1045 assert (ResultReg > 255 && "Expected an allocated virtual register."); 1046 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1047 TII.get(Opc), ResultReg); 1048 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3); 1049 1050 // If we had an unaligned load of a float we've converted it to an regular 1051 // load. Now we must move from the GRP to the FP register. 1052 if (needVMOV) { 1053 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1054 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1055 TII.get(ARM::VMOVSR), MoveReg) 1056 .addReg(ResultReg)); 1057 ResultReg = MoveReg; 1058 } 1059 return true; 1060} 1061 1062bool ARMFastISel::SelectLoad(const Instruction *I) { 1063 // Atomic loads need special handling. 1064 if (cast<LoadInst>(I)->isAtomic()) 1065 return false; 1066 1067 // Verify we have a legal type before going any further. 1068 MVT VT; 1069 if (!isLoadTypeLegal(I->getType(), VT)) 1070 return false; 1071 1072 // See if we can handle this address. 1073 Address Addr; 1074 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false; 1075 1076 unsigned ResultReg; 1077 if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 1078 return false; 1079 UpdateValueMap(I, ResultReg); 1080 return true; 1081} 1082 1083bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, 1084 unsigned Alignment) { 1085 unsigned StrOpc; 1086 bool useAM3 = false; 1087 switch (VT.getSimpleVT().SimpleTy) { 1088 // This is mostly going to be Neon/vector support. 1089 default: return false; 1090 case MVT::i1: { 1091 unsigned Res = createResultReg(isThumb2 ? ARM::tGPRRegisterClass : 1092 ARM::GPRRegisterClass); 1093 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 1094 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1095 TII.get(Opc), Res) 1096 .addReg(SrcReg).addImm(1)); 1097 SrcReg = Res; 1098 } // Fallthrough here. 1099 case MVT::i8: 1100 if (isThumb2) { 1101 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1102 StrOpc = ARM::t2STRBi8; 1103 else 1104 StrOpc = ARM::t2STRBi12; 1105 } else { 1106 StrOpc = ARM::STRBi12; 1107 } 1108 break; 1109 case MVT::i16: 1110 if (isThumb2) { 1111 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1112 StrOpc = ARM::t2STRHi8; 1113 else 1114 StrOpc = ARM::t2STRHi12; 1115 } else { 1116 StrOpc = ARM::STRH; 1117 useAM3 = true; 1118 } 1119 break; 1120 case MVT::i32: 1121 if (isThumb2) { 1122 if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) 1123 StrOpc = ARM::t2STRi8; 1124 else 1125 StrOpc = ARM::t2STRi12; 1126 } else { 1127 StrOpc = ARM::STRi12; 1128 } 1129 break; 1130 case MVT::f32: 1131 if (!Subtarget->hasVFP2()) return false; 1132 StrOpc = ARM::VSTRS; 1133 // Unaligned stores need special handling. Floats require word-alignment. 1134 if (Alignment && Alignment < 4) { 1135 unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 1136 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1137 TII.get(ARM::VMOVRS), MoveReg) 1138 .addReg(SrcReg)); 1139 SrcReg = MoveReg; 1140 VT = MVT::i32; 1141 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12; 1142 } 1143 break; 1144 case MVT::f64: 1145 if (!Subtarget->hasVFP2()) return false; 1146 // FIXME: Unaligned stores need special handling. Doublewords require 1147 // word-alignment. 1148 if (Alignment && Alignment < 4) { 1149 return false; 1150 } 1151 StrOpc = ARM::VSTRD; 1152 break; 1153 } 1154 // Simplify this down to something we can handle. 1155 ARMSimplifyAddress(Addr, VT, useAM3); 1156 1157 // Create the base instruction, then add the operands. 1158 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1159 TII.get(StrOpc)) 1160 .addReg(SrcReg); 1161 AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3); 1162 return true; 1163} 1164 1165bool ARMFastISel::SelectStore(const Instruction *I) { 1166 Value *Op0 = I->getOperand(0); 1167 unsigned SrcReg = 0; 1168 1169 // Atomic stores need special handling. 1170 if (cast<StoreInst>(I)->isAtomic()) 1171 return false; 1172 1173 // Verify we have a legal type before going any further. 1174 MVT VT; 1175 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 1176 return false; 1177 1178 // Get the value to be stored into a register. 1179 SrcReg = getRegForValue(Op0); 1180 if (SrcReg == 0) return false; 1181 1182 // See if we can handle this address. 1183 Address Addr; 1184 if (!ARMComputeAddress(I->getOperand(1), Addr)) 1185 return false; 1186 1187 if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 1188 return false; 1189 return true; 1190} 1191 1192static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) { 1193 switch (Pred) { 1194 // Needs two compares... 1195 case CmpInst::FCMP_ONE: 1196 case CmpInst::FCMP_UEQ: 1197 default: 1198 // AL is our "false" for now. The other two need more compares. 1199 return ARMCC::AL; 1200 case CmpInst::ICMP_EQ: 1201 case CmpInst::FCMP_OEQ: 1202 return ARMCC::EQ; 1203 case CmpInst::ICMP_SGT: 1204 case CmpInst::FCMP_OGT: 1205 return ARMCC::GT; 1206 case CmpInst::ICMP_SGE: 1207 case CmpInst::FCMP_OGE: 1208 return ARMCC::GE; 1209 case CmpInst::ICMP_UGT: 1210 case CmpInst::FCMP_UGT: 1211 return ARMCC::HI; 1212 case CmpInst::FCMP_OLT: 1213 return ARMCC::MI; 1214 case CmpInst::ICMP_ULE: 1215 case CmpInst::FCMP_OLE: 1216 return ARMCC::LS; 1217 case CmpInst::FCMP_ORD: 1218 return ARMCC::VC; 1219 case CmpInst::FCMP_UNO: 1220 return ARMCC::VS; 1221 case CmpInst::FCMP_UGE: 1222 return ARMCC::PL; 1223 case CmpInst::ICMP_SLT: 1224 case CmpInst::FCMP_ULT: 1225 return ARMCC::LT; 1226 case CmpInst::ICMP_SLE: 1227 case CmpInst::FCMP_ULE: 1228 return ARMCC::LE; 1229 case CmpInst::FCMP_UNE: 1230 case CmpInst::ICMP_NE: 1231 return ARMCC::NE; 1232 case CmpInst::ICMP_UGE: 1233 return ARMCC::HS; 1234 case CmpInst::ICMP_ULT: 1235 return ARMCC::LO; 1236 } 1237} 1238 1239bool ARMFastISel::SelectBranch(const Instruction *I) { 1240 const BranchInst *BI = cast<BranchInst>(I); 1241 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 1242 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 1243 1244 // Simple branch support. 1245 1246 // If we can, avoid recomputing the compare - redoing it could lead to wonky 1247 // behavior. 1248 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 1249 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 1250 1251 // Get the compare predicate. 1252 // Try to take advantage of fallthrough opportunities. 1253 CmpInst::Predicate Predicate = CI->getPredicate(); 1254 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1255 std::swap(TBB, FBB); 1256 Predicate = CmpInst::getInversePredicate(Predicate); 1257 } 1258 1259 ARMCC::CondCodes ARMPred = getComparePred(Predicate); 1260 1261 // We may not handle every CC for now. 1262 if (ARMPred == ARMCC::AL) return false; 1263 1264 // Emit the compare. 1265 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1266 return false; 1267 1268 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1269 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1270 .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR); 1271 FastEmitBranch(FBB, DL); 1272 FuncInfo.MBB->addSuccessor(TBB); 1273 return true; 1274 } 1275 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 1276 MVT SourceVT; 1277 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 1278 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { 1279 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1280 unsigned OpReg = getRegForValue(TI->getOperand(0)); 1281 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1282 TII.get(TstOpc)) 1283 .addReg(OpReg).addImm(1)); 1284 1285 unsigned CCMode = ARMCC::NE; 1286 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1287 std::swap(TBB, FBB); 1288 CCMode = ARMCC::EQ; 1289 } 1290 1291 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1292 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1293 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1294 1295 FastEmitBranch(FBB, DL); 1296 FuncInfo.MBB->addSuccessor(TBB); 1297 return true; 1298 } 1299 } else if (const ConstantInt *CI = 1300 dyn_cast<ConstantInt>(BI->getCondition())) { 1301 uint64_t Imm = CI->getZExtValue(); 1302 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 1303 FastEmitBranch(Target, DL); 1304 return true; 1305 } 1306 1307 unsigned CmpReg = getRegForValue(BI->getCondition()); 1308 if (CmpReg == 0) return false; 1309 1310 // We've been divorced from our compare! Our block was split, and 1311 // now our compare lives in a predecessor block. We musn't 1312 // re-compare here, as the children of the compare aren't guaranteed 1313 // live across the block boundary (we *could* check for this). 1314 // Regardless, the compare has been done in the predecessor block, 1315 // and it left a value for us in a virtual register. Ergo, we test 1316 // the one-bit value left in the virtual register. 1317 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri; 1318 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc)) 1319 .addReg(CmpReg).addImm(1)); 1320 1321 unsigned CCMode = ARMCC::NE; 1322 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 1323 std::swap(TBB, FBB); 1324 CCMode = ARMCC::EQ; 1325 } 1326 1327 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; 1328 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc)) 1329 .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR); 1330 FastEmitBranch(FBB, DL); 1331 FuncInfo.MBB->addSuccessor(TBB); 1332 return true; 1333} 1334 1335bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, 1336 bool isZExt) { 1337 Type *Ty = Src1Value->getType(); 1338 EVT SrcVT = TLI.getValueType(Ty, true); 1339 if (!SrcVT.isSimple()) return false; 1340 1341 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1342 if (isFloat && !Subtarget->hasVFP2()) 1343 return false; 1344 1345 // Check to see if the 2nd operand is a constant that we can encode directly 1346 // in the compare. 1347 int Imm = 0; 1348 bool UseImm = false; 1349 bool isNegativeImm = false; 1350 // FIXME: At -O0 we don't have anything that canonicalizes operand order. 1351 // Thus, Src1Value may be a ConstantInt, but we're missing it. 1352 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 1353 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || 1354 SrcVT == MVT::i1) { 1355 const APInt &CIVal = ConstInt->getValue(); 1356 Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue(); 1357 if (Imm < 0) { 1358 isNegativeImm = true; 1359 Imm = -Imm; 1360 } 1361 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1362 (ARM_AM::getSOImmVal(Imm) != -1); 1363 } 1364 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 1365 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 1366 if (ConstFP->isZero() && !ConstFP->isNegative()) 1367 UseImm = true; 1368 } 1369 1370 unsigned CmpOpc; 1371 bool isICmp = true; 1372 bool needsExt = false; 1373 switch (SrcVT.getSimpleVT().SimpleTy) { 1374 default: return false; 1375 // TODO: Verify compares. 1376 case MVT::f32: 1377 isICmp = false; 1378 CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; 1379 break; 1380 case MVT::f64: 1381 isICmp = false; 1382 CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; 1383 break; 1384 case MVT::i1: 1385 case MVT::i8: 1386 case MVT::i16: 1387 needsExt = true; 1388 // Intentional fall-through. 1389 case MVT::i32: 1390 if (isThumb2) { 1391 if (!UseImm) 1392 CmpOpc = ARM::t2CMPrr; 1393 else 1394 CmpOpc = isNegativeImm ? ARM::t2CMNzri : ARM::t2CMPri; 1395 } else { 1396 if (!UseImm) 1397 CmpOpc = ARM::CMPrr; 1398 else 1399 CmpOpc = isNegativeImm ? ARM::CMNzri : ARM::CMPri; 1400 } 1401 break; 1402 } 1403 1404 unsigned SrcReg1 = getRegForValue(Src1Value); 1405 if (SrcReg1 == 0) return false; 1406 1407 unsigned SrcReg2 = 0; 1408 if (!UseImm) { 1409 SrcReg2 = getRegForValue(Src2Value); 1410 if (SrcReg2 == 0) return false; 1411 } 1412 1413 // We have i1, i8, or i16, we need to either zero extend or sign extend. 1414 if (needsExt) { 1415 unsigned ResultReg; 1416 ResultReg = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 1417 if (ResultReg == 0) return false; 1418 SrcReg1 = ResultReg; 1419 if (!UseImm) { 1420 ResultReg = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 1421 if (ResultReg == 0) return false; 1422 SrcReg2 = ResultReg; 1423 } 1424 } 1425 1426 if (!UseImm) { 1427 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1428 TII.get(CmpOpc)) 1429 .addReg(SrcReg1).addReg(SrcReg2)); 1430 } else { 1431 MachineInstrBuilder MIB; 1432 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1433 .addReg(SrcReg1); 1434 1435 // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0. 1436 if (isICmp) 1437 MIB.addImm(Imm); 1438 AddOptionalDefs(MIB); 1439 } 1440 1441 // For floating point we need to move the result to a comparison register 1442 // that we can then use for branches. 1443 if (Ty->isFloatTy() || Ty->isDoubleTy()) 1444 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1445 TII.get(ARM::FMSTAT))); 1446 return true; 1447} 1448 1449bool ARMFastISel::SelectCmp(const Instruction *I) { 1450 const CmpInst *CI = cast<CmpInst>(I); 1451 Type *Ty = CI->getOperand(0)->getType(); 1452 1453 // Get the compare predicate. 1454 ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); 1455 1456 // We may not handle every CC for now. 1457 if (ARMPred == ARMCC::AL) return false; 1458 1459 // Emit the compare. 1460 if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1461 return false; 1462 1463 // Now set a register based on the comparison. Explicitly set the predicates 1464 // here. 1465 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1466 TargetRegisterClass *RC = isThumb2 ? ARM::rGPRRegisterClass 1467 : ARM::GPRRegisterClass; 1468 unsigned DestReg = createResultReg(RC); 1469 Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0); 1470 unsigned ZeroReg = TargetMaterializeConstant(Zero); 1471 bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy()); 1472 unsigned CondReg = isFloat ? ARM::FPSCR : ARM::CPSR; 1473 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg) 1474 .addReg(ZeroReg).addImm(1) 1475 .addImm(ARMPred).addReg(CondReg); 1476 1477 UpdateValueMap(I, DestReg); 1478 return true; 1479} 1480 1481bool ARMFastISel::SelectFPExt(const Instruction *I) { 1482 // Make sure we have VFP and that we're extending float to double. 1483 if (!Subtarget->hasVFP2()) return false; 1484 1485 Value *V = I->getOperand(0); 1486 if (!I->getType()->isDoubleTy() || 1487 !V->getType()->isFloatTy()) return false; 1488 1489 unsigned Op = getRegForValue(V); 1490 if (Op == 0) return false; 1491 1492 unsigned Result = createResultReg(ARM::DPRRegisterClass); 1493 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1494 TII.get(ARM::VCVTDS), Result) 1495 .addReg(Op)); 1496 UpdateValueMap(I, Result); 1497 return true; 1498} 1499 1500bool ARMFastISel::SelectFPTrunc(const Instruction *I) { 1501 // Make sure we have VFP and that we're truncating double to float. 1502 if (!Subtarget->hasVFP2()) return false; 1503 1504 Value *V = I->getOperand(0); 1505 if (!(I->getType()->isFloatTy() && 1506 V->getType()->isDoubleTy())) return false; 1507 1508 unsigned Op = getRegForValue(V); 1509 if (Op == 0) return false; 1510 1511 unsigned Result = createResultReg(ARM::SPRRegisterClass); 1512 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1513 TII.get(ARM::VCVTSD), Result) 1514 .addReg(Op)); 1515 UpdateValueMap(I, Result); 1516 return true; 1517} 1518 1519bool ARMFastISel::SelectSIToFP(const Instruction *I) { 1520 // Make sure we have VFP. 1521 if (!Subtarget->hasVFP2()) return false; 1522 1523 MVT DstVT; 1524 Type *Ty = I->getType(); 1525 if (!isTypeLegal(Ty, DstVT)) 1526 return false; 1527 1528 Value *Src = I->getOperand(0); 1529 EVT SrcVT = TLI.getValueType(Src->getType(), true); 1530 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1531 return false; 1532 1533 unsigned SrcReg = getRegForValue(Src); 1534 if (SrcReg == 0) return false; 1535 1536 // Handle sign-extension. 1537 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) { 1538 EVT DestVT = MVT::i32; 1539 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, /*isZExt*/ false); 1540 if (ResultReg == 0) return false; 1541 SrcReg = ResultReg; 1542 } 1543 1544 // The conversion routine works on fp-reg to fp-reg and the operand above 1545 // was an integer, move it to the fp registers if possible. 1546 unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg); 1547 if (FP == 0) return false; 1548 1549 unsigned Opc; 1550 if (Ty->isFloatTy()) Opc = ARM::VSITOS; 1551 else if (Ty->isDoubleTy()) Opc = ARM::VSITOD; 1552 else return false; 1553 1554 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT)); 1555 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1556 ResultReg) 1557 .addReg(FP)); 1558 UpdateValueMap(I, ResultReg); 1559 return true; 1560} 1561 1562bool ARMFastISel::SelectFPToSI(const Instruction *I) { 1563 // Make sure we have VFP. 1564 if (!Subtarget->hasVFP2()) return false; 1565 1566 MVT DstVT; 1567 Type *RetTy = I->getType(); 1568 if (!isTypeLegal(RetTy, DstVT)) 1569 return false; 1570 1571 unsigned Op = getRegForValue(I->getOperand(0)); 1572 if (Op == 0) return false; 1573 1574 unsigned Opc; 1575 Type *OpTy = I->getOperand(0)->getType(); 1576 if (OpTy->isFloatTy()) Opc = ARM::VTOSIZS; 1577 else if (OpTy->isDoubleTy()) Opc = ARM::VTOSIZD; 1578 else return false; 1579 1580 // f64->s32 or f32->s32 both need an intermediate f32 reg. 1581 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32)); 1582 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), 1583 ResultReg) 1584 .addReg(Op)); 1585 1586 // This result needs to be in an integer register, but the conversion only 1587 // takes place in fp-regs. 1588 unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg); 1589 if (IntReg == 0) return false; 1590 1591 UpdateValueMap(I, IntReg); 1592 return true; 1593} 1594 1595bool ARMFastISel::SelectSelect(const Instruction *I) { 1596 MVT VT; 1597 if (!isTypeLegal(I->getType(), VT)) 1598 return false; 1599 1600 // Things need to be register sized for register moves. 1601 if (VT != MVT::i32) return false; 1602 const TargetRegisterClass *RC = TLI.getRegClassFor(VT); 1603 1604 unsigned CondReg = getRegForValue(I->getOperand(0)); 1605 if (CondReg == 0) return false; 1606 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1607 if (Op1Reg == 0) return false; 1608 1609 // Check to see if we can use an immediate in the conditional move. 1610 int Imm = 0; 1611 bool UseImm = false; 1612 bool isNegativeImm = false; 1613 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) { 1614 assert (VT == MVT::i32 && "Expecting an i32."); 1615 Imm = (int)ConstInt->getValue().getZExtValue(); 1616 if (Imm < 0) { 1617 isNegativeImm = true; 1618 Imm = ~Imm; 1619 } 1620 UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : 1621 (ARM_AM::getSOImmVal(Imm) != -1); 1622 } 1623 1624 unsigned Op2Reg = 0; 1625 if (!UseImm) { 1626 Op2Reg = getRegForValue(I->getOperand(2)); 1627 if (Op2Reg == 0) return false; 1628 } 1629 1630 unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri; 1631 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc)) 1632 .addReg(CondReg).addImm(0)); 1633 1634 unsigned MovCCOpc; 1635 if (!UseImm) { 1636 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr; 1637 } else { 1638 if (!isNegativeImm) { 1639 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi; 1640 } else { 1641 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi; 1642 } 1643 } 1644 unsigned ResultReg = createResultReg(RC); 1645 if (!UseImm) 1646 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1647 .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR); 1648 else 1649 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg) 1650 .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR); 1651 UpdateValueMap(I, ResultReg); 1652 return true; 1653} 1654 1655bool ARMFastISel::SelectSDiv(const Instruction *I) { 1656 MVT VT; 1657 Type *Ty = I->getType(); 1658 if (!isTypeLegal(Ty, VT)) 1659 return false; 1660 1661 // If we have integer div support we should have selected this automagically. 1662 // In case we have a real miss go ahead and return false and we'll pick 1663 // it up later. 1664 if (Subtarget->hasDivide()) return false; 1665 1666 // Otherwise emit a libcall. 1667 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1668 if (VT == MVT::i8) 1669 LC = RTLIB::SDIV_I8; 1670 else if (VT == MVT::i16) 1671 LC = RTLIB::SDIV_I16; 1672 else if (VT == MVT::i32) 1673 LC = RTLIB::SDIV_I32; 1674 else if (VT == MVT::i64) 1675 LC = RTLIB::SDIV_I64; 1676 else if (VT == MVT::i128) 1677 LC = RTLIB::SDIV_I128; 1678 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); 1679 1680 return ARMEmitLibcall(I, LC); 1681} 1682 1683bool ARMFastISel::SelectSRem(const Instruction *I) { 1684 MVT VT; 1685 Type *Ty = I->getType(); 1686 if (!isTypeLegal(Ty, VT)) 1687 return false; 1688 1689 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 1690 if (VT == MVT::i8) 1691 LC = RTLIB::SREM_I8; 1692 else if (VT == MVT::i16) 1693 LC = RTLIB::SREM_I16; 1694 else if (VT == MVT::i32) 1695 LC = RTLIB::SREM_I32; 1696 else if (VT == MVT::i64) 1697 LC = RTLIB::SREM_I64; 1698 else if (VT == MVT::i128) 1699 LC = RTLIB::SREM_I128; 1700 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); 1701 1702 return ARMEmitLibcall(I, LC); 1703} 1704 1705bool ARMFastISel::SelectBinaryOp(const Instruction *I, unsigned ISDOpcode) { 1706 EVT VT = TLI.getValueType(I->getType(), true); 1707 1708 // We can get here in the case when we want to use NEON for our fp 1709 // operations, but can't figure out how to. Just use the vfp instructions 1710 // if we have them. 1711 // FIXME: It'd be nice to use NEON instructions. 1712 Type *Ty = I->getType(); 1713 bool isFloat = (Ty->isDoubleTy() || Ty->isFloatTy()); 1714 if (isFloat && !Subtarget->hasVFP2()) 1715 return false; 1716 1717 unsigned Opc; 1718 bool is64bit = VT == MVT::f64 || VT == MVT::i64; 1719 switch (ISDOpcode) { 1720 default: return false; 1721 case ISD::FADD: 1722 Opc = is64bit ? ARM::VADDD : ARM::VADDS; 1723 break; 1724 case ISD::FSUB: 1725 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS; 1726 break; 1727 case ISD::FMUL: 1728 Opc = is64bit ? ARM::VMULD : ARM::VMULS; 1729 break; 1730 } 1731 unsigned Op1 = getRegForValue(I->getOperand(0)); 1732 if (Op1 == 0) return false; 1733 1734 unsigned Op2 = getRegForValue(I->getOperand(1)); 1735 if (Op2 == 0) return false; 1736 1737 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 1738 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1739 TII.get(Opc), ResultReg) 1740 .addReg(Op1).addReg(Op2)); 1741 UpdateValueMap(I, ResultReg); 1742 return true; 1743} 1744 1745// Call Handling Code 1746 1747// This is largely taken directly from CCAssignFnForNode - we don't support 1748// varargs in FastISel so that part has been removed. 1749// TODO: We may not support all of this. 1750CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, bool Return) { 1751 switch (CC) { 1752 default: 1753 llvm_unreachable("Unsupported calling convention"); 1754 case CallingConv::Fast: 1755 // Ignore fastcc. Silence compiler warnings. 1756 (void)RetFastCC_ARM_APCS; 1757 (void)FastCC_ARM_APCS; 1758 // Fallthrough 1759 case CallingConv::C: 1760 // Use target triple & subtarget features to do actual dispatch. 1761 if (Subtarget->isAAPCS_ABI()) { 1762 if (Subtarget->hasVFP2() && 1763 TM.Options.FloatABIType == FloatABI::Hard) 1764 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1765 else 1766 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1767 } else 1768 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1769 case CallingConv::ARM_AAPCS_VFP: 1770 return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); 1771 case CallingConv::ARM_AAPCS: 1772 return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS); 1773 case CallingConv::ARM_APCS: 1774 return (Return ? RetCC_ARM_APCS: CC_ARM_APCS); 1775 } 1776} 1777 1778bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args, 1779 SmallVectorImpl<unsigned> &ArgRegs, 1780 SmallVectorImpl<MVT> &ArgVTs, 1781 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1782 SmallVectorImpl<unsigned> &RegArgs, 1783 CallingConv::ID CC, 1784 unsigned &NumBytes) { 1785 SmallVector<CCValAssign, 16> ArgLocs; 1786 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1787 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false)); 1788 1789 // Get a count of how many bytes are to be pushed on the stack. 1790 NumBytes = CCInfo.getNextStackOffset(); 1791 1792 // Issue CALLSEQ_START 1793 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1794 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1795 TII.get(AdjStackDown)) 1796 .addImm(NumBytes)); 1797 1798 // Process the args. 1799 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1800 CCValAssign &VA = ArgLocs[i]; 1801 unsigned Arg = ArgRegs[VA.getValNo()]; 1802 MVT ArgVT = ArgVTs[VA.getValNo()]; 1803 1804 // We don't handle NEON/vector parameters yet. 1805 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1806 return false; 1807 1808 // Handle arg promotion, etc. 1809 switch (VA.getLocInfo()) { 1810 case CCValAssign::Full: break; 1811 case CCValAssign::SExt: { 1812 MVT DestVT = VA.getLocVT(); 1813 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1814 /*isZExt*/false); 1815 assert (ResultReg != 0 && "Failed to emit a sext"); 1816 Arg = ResultReg; 1817 ArgVT = DestVT; 1818 break; 1819 } 1820 case CCValAssign::AExt: 1821 // Intentional fall-through. Handle AExt and ZExt. 1822 case CCValAssign::ZExt: { 1823 MVT DestVT = VA.getLocVT(); 1824 unsigned ResultReg = ARMEmitIntExt(ArgVT, Arg, DestVT, 1825 /*isZExt*/true); 1826 assert (ResultReg != 0 && "Failed to emit a sext"); 1827 Arg = ResultReg; 1828 ArgVT = DestVT; 1829 break; 1830 } 1831 case CCValAssign::BCvt: { 1832 unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg, 1833 /*TODO: Kill=*/false); 1834 assert(BC != 0 && "Failed to emit a bitcast!"); 1835 Arg = BC; 1836 ArgVT = VA.getLocVT(); 1837 break; 1838 } 1839 default: llvm_unreachable("Unknown arg promotion!"); 1840 } 1841 1842 // Now copy/store arg to correct locations. 1843 if (VA.isRegLoc() && !VA.needsCustom()) { 1844 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1845 VA.getLocReg()) 1846 .addReg(Arg); 1847 RegArgs.push_back(VA.getLocReg()); 1848 } else if (VA.needsCustom()) { 1849 // TODO: We need custom lowering for vector (v2f64) args. 1850 if (VA.getLocVT() != MVT::f64) return false; 1851 1852 CCValAssign &NextVA = ArgLocs[++i]; 1853 1854 // TODO: Only handle register args for now. 1855 if(!(VA.isRegLoc() && NextVA.isRegLoc())) return false; 1856 1857 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1858 TII.get(ARM::VMOVRRD), VA.getLocReg()) 1859 .addReg(NextVA.getLocReg(), RegState::Define) 1860 .addReg(Arg)); 1861 RegArgs.push_back(VA.getLocReg()); 1862 RegArgs.push_back(NextVA.getLocReg()); 1863 } else { 1864 assert(VA.isMemLoc()); 1865 // Need to store on the stack. 1866 Address Addr; 1867 Addr.BaseType = Address::RegBase; 1868 Addr.Base.Reg = ARM::SP; 1869 Addr.Offset = VA.getLocMemOffset(); 1870 1871 if (!ARMEmitStore(ArgVT, Arg, Addr)) return false; 1872 } 1873 } 1874 return true; 1875} 1876 1877bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1878 const Instruction *I, CallingConv::ID CC, 1879 unsigned &NumBytes) { 1880 // Issue CALLSEQ_END 1881 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1882 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1883 TII.get(AdjStackUp)) 1884 .addImm(NumBytes).addImm(0)); 1885 1886 // Now the return value. 1887 if (RetVT != MVT::isVoid) { 1888 SmallVector<CCValAssign, 16> RVLocs; 1889 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1890 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true)); 1891 1892 // Copy all of the result registers out of their specified physreg. 1893 if (RVLocs.size() == 2 && RetVT == MVT::f64) { 1894 // For this move we copy into two registers and then move into the 1895 // double fp reg we want. 1896 EVT DestVT = RVLocs[0].getValVT(); 1897 TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT); 1898 unsigned ResultReg = createResultReg(DstRC); 1899 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1900 TII.get(ARM::VMOVDRR), ResultReg) 1901 .addReg(RVLocs[0].getLocReg()) 1902 .addReg(RVLocs[1].getLocReg())); 1903 1904 UsedRegs.push_back(RVLocs[0].getLocReg()); 1905 UsedRegs.push_back(RVLocs[1].getLocReg()); 1906 1907 // Finally update the result. 1908 UpdateValueMap(I, ResultReg); 1909 } else { 1910 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!"); 1911 EVT CopyVT = RVLocs[0].getValVT(); 1912 1913 // Special handling for extended integers. 1914 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 1915 CopyVT = MVT::i32; 1916 1917 TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); 1918 1919 unsigned ResultReg = createResultReg(DstRC); 1920 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1921 ResultReg).addReg(RVLocs[0].getLocReg()); 1922 UsedRegs.push_back(RVLocs[0].getLocReg()); 1923 1924 // Finally update the result. 1925 UpdateValueMap(I, ResultReg); 1926 } 1927 } 1928 1929 return true; 1930} 1931 1932bool ARMFastISel::SelectRet(const Instruction *I) { 1933 const ReturnInst *Ret = cast<ReturnInst>(I); 1934 const Function &F = *I->getParent()->getParent(); 1935 1936 if (!FuncInfo.CanLowerReturn) 1937 return false; 1938 1939 if (F.isVarArg()) 1940 return false; 1941 1942 CallingConv::ID CC = F.getCallingConv(); 1943 if (Ret->getNumOperands() > 0) { 1944 SmallVector<ISD::OutputArg, 4> Outs; 1945 GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), 1946 Outs, TLI); 1947 1948 // Analyze operands of the call, assigning locations to each operand. 1949 SmallVector<CCValAssign, 16> ValLocs; 1950 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); 1951 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */)); 1952 1953 const Value *RV = Ret->getOperand(0); 1954 unsigned Reg = getRegForValue(RV); 1955 if (Reg == 0) 1956 return false; 1957 1958 // Only handle a single return value for now. 1959 if (ValLocs.size() != 1) 1960 return false; 1961 1962 CCValAssign &VA = ValLocs[0]; 1963 1964 // Don't bother handling odd stuff for now. 1965 if (VA.getLocInfo() != CCValAssign::Full) 1966 return false; 1967 // Only handle register returns for now. 1968 if (!VA.isRegLoc()) 1969 return false; 1970 1971 unsigned SrcReg = Reg + VA.getValNo(); 1972 EVT RVVT = TLI.getValueType(RV->getType()); 1973 EVT DestVT = VA.getValVT(); 1974 // Special handling for extended integers. 1975 if (RVVT != DestVT) { 1976 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 1977 return false; 1978 1979 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt()) 1980 return false; 1981 1982 assert(DestVT == MVT::i32 && "ARM should always ext to i32"); 1983 1984 bool isZExt = Outs[0].Flags.isZExt(); 1985 unsigned ResultReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, isZExt); 1986 if (ResultReg == 0) return false; 1987 SrcReg = ResultReg; 1988 } 1989 1990 // Make the copy. 1991 unsigned DstReg = VA.getLocReg(); 1992 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); 1993 // Avoid a cross-class copy. This is very unlikely. 1994 if (!SrcRC->contains(DstReg)) 1995 return false; 1996 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), 1997 DstReg).addReg(SrcReg); 1998 1999 // Mark the register as live out of the function. 2000 MRI.addLiveOut(VA.getLocReg()); 2001 } 2002 2003 unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET; 2004 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2005 TII.get(RetOpc))); 2006 return true; 2007} 2008 2009unsigned ARMFastISel::ARMSelectCallOp(const GlobalValue *GV) { 2010 2011 // Darwin needs the r9 versions of the opcodes. 2012 bool isDarwin = Subtarget->isTargetDarwin(); 2013 if (isThumb2) { 2014 return isDarwin ? ARM::tBLr9 : ARM::tBL; 2015 } else { 2016 return isDarwin ? ARM::BLr9 : ARM::BL; 2017 } 2018} 2019 2020// A quick function that will emit a call for a named libcall in F with the 2021// vector of passed arguments for the Instruction in I. We can assume that we 2022// can emit a call for any libcall we can produce. This is an abridged version 2023// of the full call infrastructure since we won't need to worry about things 2024// like computed function pointers or strange arguments at call sites. 2025// TODO: Try to unify this and the normal call bits for ARM, then try to unify 2026// with X86. 2027bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { 2028 CallingConv::ID CC = TLI.getLibcallCallingConv(Call); 2029 2030 // Handle *simple* calls for now. 2031 Type *RetTy = I->getType(); 2032 MVT RetVT; 2033 if (RetTy->isVoidTy()) 2034 RetVT = MVT::isVoid; 2035 else if (!isTypeLegal(RetTy, RetVT)) 2036 return false; 2037 2038 // TODO: For now if we have long calls specified we don't handle the call. 2039 if (EnableARMLongCalls) return false; 2040 2041 // Set up the argument vectors. 2042 SmallVector<Value*, 8> Args; 2043 SmallVector<unsigned, 8> ArgRegs; 2044 SmallVector<MVT, 8> ArgVTs; 2045 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2046 Args.reserve(I->getNumOperands()); 2047 ArgRegs.reserve(I->getNumOperands()); 2048 ArgVTs.reserve(I->getNumOperands()); 2049 ArgFlags.reserve(I->getNumOperands()); 2050 for (unsigned i = 0; i < I->getNumOperands(); ++i) { 2051 Value *Op = I->getOperand(i); 2052 unsigned Arg = getRegForValue(Op); 2053 if (Arg == 0) return false; 2054 2055 Type *ArgTy = Op->getType(); 2056 MVT ArgVT; 2057 if (!isTypeLegal(ArgTy, ArgVT)) return false; 2058 2059 ISD::ArgFlagsTy Flags; 2060 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2061 Flags.setOrigAlign(OriginalAlignment); 2062 2063 Args.push_back(Op); 2064 ArgRegs.push_back(Arg); 2065 ArgVTs.push_back(ArgVT); 2066 ArgFlags.push_back(Flags); 2067 } 2068 2069 // Handle the arguments now that we've gotten them. 2070 SmallVector<unsigned, 4> RegArgs; 2071 unsigned NumBytes; 2072 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2073 return false; 2074 2075 // Issue the call, BLr9 for darwin, BL otherwise. 2076 // TODO: Turn this into the table of arm call ops. 2077 MachineInstrBuilder MIB; 2078 unsigned CallOpc = ARMSelectCallOp(NULL); 2079 if(isThumb2) 2080 // Explicitly adding the predicate here. 2081 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2082 TII.get(CallOpc))) 2083 .addExternalSymbol(TLI.getLibcallName(Call)); 2084 else 2085 // Explicitly adding the predicate here. 2086 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2087 TII.get(CallOpc)) 2088 .addExternalSymbol(TLI.getLibcallName(Call))); 2089 2090 // Add implicit physical register uses to the call. 2091 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2092 MIB.addReg(RegArgs[i]); 2093 2094 // Finish off the call including any return values. 2095 SmallVector<unsigned, 4> UsedRegs; 2096 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2097 2098 // Set all unused physreg defs as dead. 2099 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2100 2101 return true; 2102} 2103 2104bool ARMFastISel::SelectCall(const Instruction *I, 2105 const char *IntrMemName = 0) { 2106 const CallInst *CI = cast<CallInst>(I); 2107 const Value *Callee = CI->getCalledValue(); 2108 2109 // Can't handle inline asm. 2110 if (isa<InlineAsm>(Callee)) return false; 2111 2112 // Only handle global variable Callees. 2113 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 2114 if (!GV) 2115 return false; 2116 2117 // Check the calling convention. 2118 ImmutableCallSite CS(CI); 2119 CallingConv::ID CC = CS.getCallingConv(); 2120 2121 // TODO: Avoid some calling conventions? 2122 2123 // Let SDISel handle vararg functions. 2124 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 2125 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 2126 if (FTy->isVarArg()) 2127 return false; 2128 2129 // Handle *simple* calls for now. 2130 Type *RetTy = I->getType(); 2131 MVT RetVT; 2132 if (RetTy->isVoidTy()) 2133 RetVT = MVT::isVoid; 2134 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 && 2135 RetVT != MVT::i8 && RetVT != MVT::i1) 2136 return false; 2137 2138 // TODO: For now if we have long calls specified we don't handle the call. 2139 if (EnableARMLongCalls) return false; 2140 2141 // Set up the argument vectors. 2142 SmallVector<Value*, 8> Args; 2143 SmallVector<unsigned, 8> ArgRegs; 2144 SmallVector<MVT, 8> ArgVTs; 2145 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 2146 Args.reserve(CS.arg_size()); 2147 ArgRegs.reserve(CS.arg_size()); 2148 ArgVTs.reserve(CS.arg_size()); 2149 ArgFlags.reserve(CS.arg_size()); 2150 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 2151 i != e; ++i) { 2152 // If we're lowering a memory intrinsic instead of a regular call, skip the 2153 // last two arguments, which shouldn't be passed to the underlying function. 2154 if (IntrMemName && e-i <= 2) 2155 break; 2156 2157 ISD::ArgFlagsTy Flags; 2158 unsigned AttrInd = i - CS.arg_begin() + 1; 2159 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 2160 Flags.setSExt(); 2161 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 2162 Flags.setZExt(); 2163 2164 // FIXME: Only handle *easy* calls for now. 2165 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 2166 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 2167 CS.paramHasAttr(AttrInd, Attribute::Nest) || 2168 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 2169 return false; 2170 2171 Type *ArgTy = (*i)->getType(); 2172 MVT ArgVT; 2173 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 && 2174 ArgVT != MVT::i1) 2175 return false; 2176 2177 unsigned Arg = getRegForValue(*i); 2178 if (Arg == 0) 2179 return false; 2180 2181 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy); 2182 Flags.setOrigAlign(OriginalAlignment); 2183 2184 Args.push_back(*i); 2185 ArgRegs.push_back(Arg); 2186 ArgVTs.push_back(ArgVT); 2187 ArgFlags.push_back(Flags); 2188 } 2189 2190 // Handle the arguments now that we've gotten them. 2191 SmallVector<unsigned, 4> RegArgs; 2192 unsigned NumBytes; 2193 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 2194 return false; 2195 2196 // Issue the call, BLr9 for darwin, BL otherwise. 2197 // TODO: Turn this into the table of arm call ops. 2198 MachineInstrBuilder MIB; 2199 unsigned CallOpc = ARMSelectCallOp(GV); 2200 // Explicitly adding the predicate here. 2201 if(isThumb2) { 2202 // Explicitly adding the predicate here. 2203 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2204 TII.get(CallOpc))); 2205 if (!IntrMemName) 2206 MIB.addGlobalAddress(GV, 0, 0); 2207 else 2208 MIB.addExternalSymbol(IntrMemName, 0); 2209 } else { 2210 if (!IntrMemName) 2211 // Explicitly adding the predicate here. 2212 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2213 TII.get(CallOpc)) 2214 .addGlobalAddress(GV, 0, 0)); 2215 else 2216 MIB = AddDefaultPred(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 2217 TII.get(CallOpc)) 2218 .addExternalSymbol(IntrMemName, 0)); 2219 } 2220 2221 // Add implicit physical register uses to the call. 2222 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 2223 MIB.addReg(RegArgs[i]); 2224 2225 // Finish off the call including any return values. 2226 SmallVector<unsigned, 4> UsedRegs; 2227 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) return false; 2228 2229 // Set all unused physreg defs as dead. 2230 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 2231 2232 return true; 2233} 2234 2235bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) { 2236 return Len <= 16; 2237} 2238 2239bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len) { 2240 // Make sure we don't bloat code by inlining very large memcpy's. 2241 if (!ARMIsMemCpySmall(Len)) 2242 return false; 2243 2244 // We don't care about alignment here since we just emit integer accesses. 2245 while (Len) { 2246 MVT VT; 2247 if (Len >= 4) 2248 VT = MVT::i32; 2249 else if (Len >= 2) 2250 VT = MVT::i16; 2251 else { 2252 assert(Len == 1); 2253 VT = MVT::i8; 2254 } 2255 2256 bool RV; 2257 unsigned ResultReg; 2258 RV = ARMEmitLoad(VT, ResultReg, Src); 2259 assert (RV = true && "Should be able to handle this load."); 2260 RV = ARMEmitStore(VT, ResultReg, Dest); 2261 assert (RV = true && "Should be able to handle this store."); 2262 2263 unsigned Size = VT.getSizeInBits()/8; 2264 Len -= Size; 2265 Dest.Offset += Size; 2266 Src.Offset += Size; 2267 } 2268 2269 return true; 2270} 2271 2272bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 2273 // FIXME: Handle more intrinsics. 2274 switch (I.getIntrinsicID()) { 2275 default: return false; 2276 case Intrinsic::memcpy: 2277 case Intrinsic::memmove: { 2278 const MemTransferInst &MTI = cast<MemTransferInst>(I); 2279 // Don't handle volatile. 2280 if (MTI.isVolatile()) 2281 return false; 2282 2283 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 2284 // we would emit dead code because we don't currently handle memmoves. 2285 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 2286 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 2287 // Small memcpy's are common enough that we want to do them without a call 2288 // if possible. 2289 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 2290 if (ARMIsMemCpySmall(Len)) { 2291 Address Dest, Src; 2292 if (!ARMComputeAddress(MTI.getRawDest(), Dest) || 2293 !ARMComputeAddress(MTI.getRawSource(), Src)) 2294 return false; 2295 if (ARMTryEmitSmallMemCpy(Dest, Src, Len)) 2296 return true; 2297 } 2298 } 2299 2300 if (!MTI.getLength()->getType()->isIntegerTy(32)) 2301 return false; 2302 2303 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 2304 return false; 2305 2306 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 2307 return SelectCall(&I, IntrMemName); 2308 } 2309 case Intrinsic::memset: { 2310 const MemSetInst &MSI = cast<MemSetInst>(I); 2311 // Don't handle volatile. 2312 if (MSI.isVolatile()) 2313 return false; 2314 2315 if (!MSI.getLength()->getType()->isIntegerTy(32)) 2316 return false; 2317 2318 if (MSI.getDestAddressSpace() > 255) 2319 return false; 2320 2321 return SelectCall(&I, "memset"); 2322 } 2323 } 2324 return false; 2325} 2326 2327bool ARMFastISel::SelectTrunc(const Instruction *I) { 2328 // The high bits for a type smaller than the register size are assumed to be 2329 // undefined. 2330 Value *Op = I->getOperand(0); 2331 2332 EVT SrcVT, DestVT; 2333 SrcVT = TLI.getValueType(Op->getType(), true); 2334 DestVT = TLI.getValueType(I->getType(), true); 2335 2336 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 2337 return false; 2338 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 2339 return false; 2340 2341 unsigned SrcReg = getRegForValue(Op); 2342 if (!SrcReg) return false; 2343 2344 // Because the high bits are undefined, a truncate doesn't generate 2345 // any code. 2346 UpdateValueMap(I, SrcReg); 2347 return true; 2348} 2349 2350unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, 2351 bool isZExt) { 2352 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8) 2353 return 0; 2354 2355 unsigned Opc; 2356 bool isBoolZext = false; 2357 if (!SrcVT.isSimple()) return 0; 2358 switch (SrcVT.getSimpleVT().SimpleTy) { 2359 default: return 0; 2360 case MVT::i16: 2361 if (!Subtarget->hasV6Ops()) return 0; 2362 if (isZExt) 2363 Opc = isThumb2 ? ARM::t2UXTH : ARM::UXTH; 2364 else 2365 Opc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 2366 break; 2367 case MVT::i8: 2368 if (!Subtarget->hasV6Ops()) return 0; 2369 if (isZExt) 2370 Opc = isThumb2 ? ARM::t2UXTB : ARM::UXTB; 2371 else 2372 Opc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 2373 break; 2374 case MVT::i1: 2375 if (isZExt) { 2376 Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri; 2377 isBoolZext = true; 2378 break; 2379 } 2380 return 0; 2381 } 2382 2383 unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::i32)); 2384 MachineInstrBuilder MIB; 2385 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) 2386 .addReg(SrcReg); 2387 if (isBoolZext) 2388 MIB.addImm(1); 2389 else 2390 MIB.addImm(0); 2391 AddOptionalDefs(MIB); 2392 return ResultReg; 2393} 2394 2395bool ARMFastISel::SelectIntExt(const Instruction *I) { 2396 // On ARM, in general, integer casts don't involve legal types; this code 2397 // handles promotable integers. 2398 Type *DestTy = I->getType(); 2399 Value *Src = I->getOperand(0); 2400 Type *SrcTy = Src->getType(); 2401 2402 EVT SrcVT, DestVT; 2403 SrcVT = TLI.getValueType(SrcTy, true); 2404 DestVT = TLI.getValueType(DestTy, true); 2405 2406 bool isZExt = isa<ZExtInst>(I); 2407 unsigned SrcReg = getRegForValue(Src); 2408 if (!SrcReg) return false; 2409 2410 unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 2411 if (ResultReg == 0) return false; 2412 UpdateValueMap(I, ResultReg); 2413 return true; 2414} 2415 2416// TODO: SoftFP support. 2417bool ARMFastISel::TargetSelectInstruction(const Instruction *I) { 2418 2419 switch (I->getOpcode()) { 2420 case Instruction::Load: 2421 return SelectLoad(I); 2422 case Instruction::Store: 2423 return SelectStore(I); 2424 case Instruction::Br: 2425 return SelectBranch(I); 2426 case Instruction::ICmp: 2427 case Instruction::FCmp: 2428 return SelectCmp(I); 2429 case Instruction::FPExt: 2430 return SelectFPExt(I); 2431 case Instruction::FPTrunc: 2432 return SelectFPTrunc(I); 2433 case Instruction::SIToFP: 2434 return SelectSIToFP(I); 2435 case Instruction::FPToSI: 2436 return SelectFPToSI(I); 2437 case Instruction::FAdd: 2438 return SelectBinaryOp(I, ISD::FADD); 2439 case Instruction::FSub: 2440 return SelectBinaryOp(I, ISD::FSUB); 2441 case Instruction::FMul: 2442 return SelectBinaryOp(I, ISD::FMUL); 2443 case Instruction::SDiv: 2444 return SelectSDiv(I); 2445 case Instruction::SRem: 2446 return SelectSRem(I); 2447 case Instruction::Call: 2448 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2449 return SelectIntrinsicCall(*II); 2450 return SelectCall(I); 2451 case Instruction::Select: 2452 return SelectSelect(I); 2453 case Instruction::Ret: 2454 return SelectRet(I); 2455 case Instruction::Trunc: 2456 return SelectTrunc(I); 2457 case Instruction::ZExt: 2458 case Instruction::SExt: 2459 return SelectIntExt(I); 2460 default: break; 2461 } 2462 return false; 2463} 2464 2465/// TryToFoldLoad - The specified machine instr operand is a vreg, and that 2466/// vreg is being provided by the specified load instruction. If possible, 2467/// try to fold the load as an operand to the instruction, returning true if 2468/// successful. 2469bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, 2470 const LoadInst *LI) { 2471 // Verify we have a legal type before going any further. 2472 MVT VT; 2473 if (!isLoadTypeLegal(LI->getType(), VT)) 2474 return false; 2475 2476 // Combine load followed by zero- or sign-extend. 2477 // ldrb r1, [r0] ldrb r1, [r0] 2478 // uxtb r2, r1 => 2479 // mov r3, r2 mov r3, r1 2480 bool isZExt = true; 2481 switch(MI->getOpcode()) { 2482 default: return false; 2483 case ARM::SXTH: 2484 case ARM::t2SXTH: 2485 isZExt = false; 2486 case ARM::UXTH: 2487 case ARM::t2UXTH: 2488 if (VT != MVT::i16) 2489 return false; 2490 break; 2491 case ARM::SXTB: 2492 case ARM::t2SXTB: 2493 isZExt = false; 2494 case ARM::UXTB: 2495 case ARM::t2UXTB: 2496 if (VT != MVT::i8) 2497 return false; 2498 break; 2499 } 2500 // See if we can handle this address. 2501 Address Addr; 2502 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false; 2503 2504 unsigned ResultReg = MI->getOperand(0).getReg(); 2505 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlignment(), isZExt, false)) 2506 return false; 2507 MI->eraseFromParent(); 2508 return true; 2509} 2510 2511namespace llvm { 2512 llvm::FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo) { 2513 // Completely untested on non-darwin. 2514 const TargetMachine &TM = funcInfo.MF->getTarget(); 2515 2516 // Darwin and thumb1 only for now. 2517 const ARMSubtarget *Subtarget = &TM.getSubtarget<ARMSubtarget>(); 2518 if (Subtarget->isTargetDarwin() && !Subtarget->isThumb1Only() && 2519 !DisableARMFastISel) 2520 return new ARMFastISel(funcInfo); 2521 return 0; 2522 } 2523} 2524