AArch64FastISel.cpp revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//===-- AArch6464FastISel.cpp - AArch64 FastISel implementation -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the AArch64-specific support for the FastISel class. Some 11// of the target-specific code is generated by tablegen in the file 12// AArch64GenFastISel.inc, which is #included here. 13// 14//===----------------------------------------------------------------------===// 15 16#include "AArch64.h" 17#include "AArch64TargetMachine.h" 18#include "AArch64Subtarget.h" 19#include "MCTargetDesc/AArch64AddressingModes.h" 20#include "llvm/CodeGen/CallingConvLower.h" 21#include "llvm/CodeGen/FastISel.h" 22#include "llvm/CodeGen/FunctionLoweringInfo.h" 23#include "llvm/CodeGen/MachineConstantPool.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/IR/CallingConv.h" 28#include "llvm/IR/DataLayout.h" 29#include "llvm/IR/DerivedTypes.h" 30#include "llvm/IR/Function.h" 31#include "llvm/IR/GetElementPtrTypeIterator.h" 32#include "llvm/IR/GlobalAlias.h" 33#include "llvm/IR/GlobalVariable.h" 34#include "llvm/IR/Instructions.h" 35#include "llvm/IR/IntrinsicInst.h" 36#include "llvm/IR/Operator.h" 37#include "llvm/Support/CommandLine.h" 38using namespace llvm; 39 40namespace { 41 42class AArch64FastISel : public FastISel { 43 44 class Address { 45 public: 46 typedef enum { 47 RegBase, 48 FrameIndexBase 49 } BaseKind; 50 51 private: 52 BaseKind Kind; 53 union { 54 unsigned Reg; 55 int FI; 56 } Base; 57 int64_t Offset; 58 59 public: 60 Address() : Kind(RegBase), Offset(0) { Base.Reg = 0; } 61 void setKind(BaseKind K) { Kind = K; } 62 BaseKind getKind() const { return Kind; } 63 bool isRegBase() const { return Kind == RegBase; } 64 bool isFIBase() const { return Kind == FrameIndexBase; } 65 void setReg(unsigned Reg) { 66 assert(isRegBase() && "Invalid base register access!"); 67 Base.Reg = Reg; 68 } 69 unsigned getReg() const { 70 assert(isRegBase() && "Invalid base register access!"); 71 return Base.Reg; 72 } 73 void setFI(unsigned FI) { 74 assert(isFIBase() && "Invalid base frame index access!"); 75 Base.FI = FI; 76 } 77 unsigned getFI() const { 78 assert(isFIBase() && "Invalid base frame index access!"); 79 return Base.FI; 80 } 81 void setOffset(int64_t O) { Offset = O; } 82 int64_t getOffset() { return Offset; } 83 84 bool isValid() { return isFIBase() || (isRegBase() && getReg() != 0); } 85 }; 86 87 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can 88 /// make the right decision when generating code for different targets. 89 const AArch64Subtarget *Subtarget; 90 LLVMContext *Context; 91 92private: 93 // Selection routines. 94 bool SelectLoad(const Instruction *I); 95 bool SelectStore(const Instruction *I); 96 bool SelectBranch(const Instruction *I); 97 bool SelectIndirectBr(const Instruction *I); 98 bool SelectCmp(const Instruction *I); 99 bool SelectSelect(const Instruction *I); 100 bool SelectFPExt(const Instruction *I); 101 bool SelectFPTrunc(const Instruction *I); 102 bool SelectFPToInt(const Instruction *I, bool Signed); 103 bool SelectIntToFP(const Instruction *I, bool Signed); 104 bool SelectRem(const Instruction *I, unsigned ISDOpcode); 105 bool SelectCall(const Instruction *I, const char *IntrMemName); 106 bool SelectIntrinsicCall(const IntrinsicInst &I); 107 bool SelectRet(const Instruction *I); 108 bool SelectTrunc(const Instruction *I); 109 bool SelectIntExt(const Instruction *I); 110 bool SelectMul(const Instruction *I); 111 112 // Utility helper routines. 113 bool isTypeLegal(Type *Ty, MVT &VT); 114 bool isLoadStoreTypeLegal(Type *Ty, MVT &VT); 115 bool ComputeAddress(const Value *Obj, Address &Addr); 116 bool SimplifyAddress(Address &Addr, MVT VT, int64_t ScaleFactor, 117 bool UseUnscaled); 118 void AddLoadStoreOperands(Address &Addr, const MachineInstrBuilder &MIB, 119 unsigned Flags, bool UseUnscaled); 120 bool IsMemCpySmall(uint64_t Len, unsigned Alignment); 121 bool TryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len, 122 unsigned Alignment); 123 // Emit functions. 124 bool EmitCmp(Value *Src1Value, Value *Src2Value, bool isZExt); 125 bool EmitLoad(MVT VT, unsigned &ResultReg, Address Addr, 126 bool UseUnscaled = false); 127 bool EmitStore(MVT VT, unsigned SrcReg, Address Addr, 128 bool UseUnscaled = false); 129 unsigned EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 130 unsigned Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt); 131 132 unsigned AArch64MaterializeFP(const ConstantFP *CFP, MVT VT); 133 unsigned AArch64MaterializeGV(const GlobalValue *GV); 134 135 // Call handling routines. 136private: 137 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const; 138 bool ProcessCallArgs(SmallVectorImpl<Value *> &Args, 139 SmallVectorImpl<unsigned> &ArgRegs, 140 SmallVectorImpl<MVT> &ArgVTs, 141 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 142 SmallVectorImpl<unsigned> &RegArgs, CallingConv::ID CC, 143 unsigned &NumBytes); 144 bool FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 145 const Instruction *I, CallingConv::ID CC, unsigned &NumBytes); 146 147public: 148 // Backend specific FastISel code. 149 unsigned TargetMaterializeAlloca(const AllocaInst *AI) override; 150 unsigned TargetMaterializeConstant(const Constant *C) override; 151 152 explicit AArch64FastISel(FunctionLoweringInfo &funcInfo, 153 const TargetLibraryInfo *libInfo) 154 : FastISel(funcInfo, libInfo) { 155 Subtarget = &TM.getSubtarget<AArch64Subtarget>(); 156 Context = &funcInfo.Fn->getContext(); 157 } 158 159 bool TargetSelectInstruction(const Instruction *I) override; 160 161#include "AArch64GenFastISel.inc" 162}; 163 164} // end anonymous namespace 165 166#include "AArch64GenCallingConv.inc" 167 168CCAssignFn *AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC) const { 169 if (CC == CallingConv::WebKit_JS) 170 return CC_AArch64_WebKit_JS; 171 return Subtarget->isTargetDarwin() ? CC_AArch64_DarwinPCS : CC_AArch64_AAPCS; 172} 173 174unsigned AArch64FastISel::TargetMaterializeAlloca(const AllocaInst *AI) { 175 assert(TLI.getValueType(AI->getType(), true) == MVT::i64 && 176 "Alloca should always return a pointer."); 177 178 // Don't handle dynamic allocas. 179 if (!FuncInfo.StaticAllocaMap.count(AI)) 180 return 0; 181 182 DenseMap<const AllocaInst *, int>::iterator SI = 183 FuncInfo.StaticAllocaMap.find(AI); 184 185 if (SI != FuncInfo.StaticAllocaMap.end()) { 186 unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass); 187 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri), 188 ResultReg) 189 .addFrameIndex(SI->second) 190 .addImm(0) 191 .addImm(0); 192 return ResultReg; 193 } 194 195 return 0; 196} 197 198unsigned AArch64FastISel::AArch64MaterializeFP(const ConstantFP *CFP, MVT VT) { 199 if (VT != MVT::f32 && VT != MVT::f64) 200 return 0; 201 202 const APFloat Val = CFP->getValueAPF(); 203 bool is64bit = (VT == MVT::f64); 204 205 // This checks to see if we can use FMOV instructions to materialize 206 // a constant, otherwise we have to materialize via the constant pool. 207 if (TLI.isFPImmLegal(Val, VT)) { 208 int Imm; 209 unsigned Opc; 210 if (is64bit) { 211 Imm = AArch64_AM::getFP64Imm(Val); 212 Opc = AArch64::FMOVDi; 213 } else { 214 Imm = AArch64_AM::getFP32Imm(Val); 215 Opc = AArch64::FMOVSi; 216 } 217 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 218 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) 219 .addImm(Imm); 220 return ResultReg; 221 } 222 223 // Materialize via constant pool. MachineConstantPool wants an explicit 224 // alignment. 225 unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); 226 if (Align == 0) 227 Align = DL.getTypeAllocSize(CFP->getType()); 228 229 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align); 230 unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass); 231 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), 232 ADRPReg).addConstantPoolIndex(Idx, 0, AArch64II::MO_PAGE); 233 234 unsigned Opc = is64bit ? AArch64::LDRDui : AArch64::LDRSui; 235 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); 236 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) 237 .addReg(ADRPReg) 238 .addConstantPoolIndex(Idx, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC); 239 return ResultReg; 240} 241 242unsigned AArch64FastISel::AArch64MaterializeGV(const GlobalValue *GV) { 243 // We can't handle thread-local variables quickly yet. Unfortunately we have 244 // to peer through any aliases to find out if that rule applies. 245 const GlobalValue *TLSGV = GV; 246 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 247 TLSGV = GA->getAliasee(); 248 249 // MachO still uses GOT for large code-model accesses, but ELF requires 250 // movz/movk sequences, which FastISel doesn't handle yet. 251 if (TM.getCodeModel() != CodeModel::Small && !Subtarget->isTargetMachO()) 252 return 0; 253 254 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(TLSGV)) 255 if (GVar->isThreadLocal()) 256 return 0; 257 258 unsigned char OpFlags = Subtarget->ClassifyGlobalReference(GV, TM); 259 260 EVT DestEVT = TLI.getValueType(GV->getType(), true); 261 if (!DestEVT.isSimple()) 262 return 0; 263 264 unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass); 265 unsigned ResultReg; 266 267 if (OpFlags & AArch64II::MO_GOT) { 268 // ADRP + LDRX 269 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), 270 ADRPReg) 271 .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGE); 272 273 ResultReg = createResultReg(&AArch64::GPR64RegClass); 274 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::LDRXui), 275 ResultReg) 276 .addReg(ADRPReg) 277 .addGlobalAddress(GV, 0, AArch64II::MO_GOT | AArch64II::MO_PAGEOFF | 278 AArch64II::MO_NC); 279 } else { 280 // ADRP + ADDX 281 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP), 282 ADRPReg).addGlobalAddress(GV, 0, AArch64II::MO_PAGE); 283 284 ResultReg = createResultReg(&AArch64::GPR64spRegClass); 285 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri), 286 ResultReg) 287 .addReg(ADRPReg) 288 .addGlobalAddress(GV, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC) 289 .addImm(0); 290 } 291 return ResultReg; 292} 293 294unsigned AArch64FastISel::TargetMaterializeConstant(const Constant *C) { 295 EVT CEVT = TLI.getValueType(C->getType(), true); 296 297 // Only handle simple types. 298 if (!CEVT.isSimple()) 299 return 0; 300 MVT VT = CEVT.getSimpleVT(); 301 302 // FIXME: Handle ConstantInt. 303 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 304 return AArch64MaterializeFP(CFP, VT); 305 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 306 return AArch64MaterializeGV(GV); 307 308 return 0; 309} 310 311// Computes the address to get to an object. 312bool AArch64FastISel::ComputeAddress(const Value *Obj, Address &Addr) { 313 const User *U = nullptr; 314 unsigned Opcode = Instruction::UserOp1; 315 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 316 // Don't walk into other basic blocks unless the object is an alloca from 317 // another block, otherwise it may not have a virtual register assigned. 318 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 319 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 320 Opcode = I->getOpcode(); 321 U = I; 322 } 323 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 324 Opcode = C->getOpcode(); 325 U = C; 326 } 327 328 if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType())) 329 if (Ty->getAddressSpace() > 255) 330 // Fast instruction selection doesn't support the special 331 // address spaces. 332 return false; 333 334 switch (Opcode) { 335 default: 336 break; 337 case Instruction::BitCast: { 338 // Look through bitcasts. 339 return ComputeAddress(U->getOperand(0), Addr); 340 } 341 case Instruction::IntToPtr: { 342 // Look past no-op inttoptrs. 343 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) 344 return ComputeAddress(U->getOperand(0), Addr); 345 break; 346 } 347 case Instruction::PtrToInt: { 348 // Look past no-op ptrtoints. 349 if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) 350 return ComputeAddress(U->getOperand(0), Addr); 351 break; 352 } 353 case Instruction::GetElementPtr: { 354 Address SavedAddr = Addr; 355 uint64_t TmpOffset = Addr.getOffset(); 356 357 // Iterate through the GEP folding the constants into offsets where 358 // we can. 359 gep_type_iterator GTI = gep_type_begin(U); 360 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; 361 ++i, ++GTI) { 362 const Value *Op = *i; 363 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 364 const StructLayout *SL = DL.getStructLayout(STy); 365 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 366 TmpOffset += SL->getElementOffset(Idx); 367 } else { 368 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); 369 for (;;) { 370 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 371 // Constant-offset addressing. 372 TmpOffset += CI->getSExtValue() * S; 373 break; 374 } 375 if (canFoldAddIntoGEP(U, Op)) { 376 // A compatible add with a constant operand. Fold the constant. 377 ConstantInt *CI = 378 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 379 TmpOffset += CI->getSExtValue() * S; 380 // Iterate on the other operand. 381 Op = cast<AddOperator>(Op)->getOperand(0); 382 continue; 383 } 384 // Unsupported 385 goto unsupported_gep; 386 } 387 } 388 } 389 390 // Try to grab the base operand now. 391 Addr.setOffset(TmpOffset); 392 if (ComputeAddress(U->getOperand(0), Addr)) 393 return true; 394 395 // We failed, restore everything and try the other options. 396 Addr = SavedAddr; 397 398 unsupported_gep: 399 break; 400 } 401 case Instruction::Alloca: { 402 const AllocaInst *AI = cast<AllocaInst>(Obj); 403 DenseMap<const AllocaInst *, int>::iterator SI = 404 FuncInfo.StaticAllocaMap.find(AI); 405 if (SI != FuncInfo.StaticAllocaMap.end()) { 406 Addr.setKind(Address::FrameIndexBase); 407 Addr.setFI(SI->second); 408 return true; 409 } 410 break; 411 } 412 } 413 414 // Try to get this in a register if nothing else has worked. 415 if (!Addr.isValid()) 416 Addr.setReg(getRegForValue(Obj)); 417 return Addr.isValid(); 418} 419 420bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) { 421 EVT evt = TLI.getValueType(Ty, true); 422 423 // Only handle simple types. 424 if (evt == MVT::Other || !evt.isSimple()) 425 return false; 426 VT = evt.getSimpleVT(); 427 428 // This is a legal type, but it's not something we handle in fast-isel. 429 if (VT == MVT::f128) 430 return false; 431 432 // Handle all other legal types, i.e. a register that will directly hold this 433 // value. 434 return TLI.isTypeLegal(VT); 435} 436 437bool AArch64FastISel::isLoadStoreTypeLegal(Type *Ty, MVT &VT) { 438 if (isTypeLegal(Ty, VT)) 439 return true; 440 441 // If this is a type than can be sign or zero-extended to a basic operation 442 // go ahead and accept it now. For stores, this reflects truncation. 443 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 444 return true; 445 446 return false; 447} 448 449bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT, 450 int64_t ScaleFactor, bool UseUnscaled) { 451 bool needsLowering = false; 452 int64_t Offset = Addr.getOffset(); 453 switch (VT.SimpleTy) { 454 default: 455 return false; 456 case MVT::i1: 457 case MVT::i8: 458 case MVT::i16: 459 case MVT::i32: 460 case MVT::i64: 461 case MVT::f32: 462 case MVT::f64: 463 if (!UseUnscaled) 464 // Using scaled, 12-bit, unsigned immediate offsets. 465 needsLowering = ((Offset & 0xfff) != Offset); 466 else 467 // Using unscaled, 9-bit, signed immediate offsets. 468 needsLowering = (Offset > 256 || Offset < -256); 469 break; 470 } 471 472 // FIXME: If this is a stack pointer and the offset needs to be simplified 473 // then put the alloca address into a register, set the base type back to 474 // register and continue. This should almost never happen. 475 if (needsLowering && Addr.getKind() == Address::FrameIndexBase) { 476 return false; 477 } 478 479 // Since the offset is too large for the load/store instruction get the 480 // reg+offset into a register. 481 if (needsLowering) { 482 uint64_t UnscaledOffset = Addr.getOffset() * ScaleFactor; 483 unsigned ResultReg = FastEmit_ri_(MVT::i64, ISD::ADD, Addr.getReg(), false, 484 UnscaledOffset, MVT::i64); 485 if (ResultReg == 0) 486 return false; 487 Addr.setReg(ResultReg); 488 Addr.setOffset(0); 489 } 490 return true; 491} 492 493void AArch64FastISel::AddLoadStoreOperands(Address &Addr, 494 const MachineInstrBuilder &MIB, 495 unsigned Flags, bool UseUnscaled) { 496 int64_t Offset = Addr.getOffset(); 497 // Frame base works a bit differently. Handle it separately. 498 if (Addr.getKind() == Address::FrameIndexBase) { 499 int FI = Addr.getFI(); 500 // FIXME: We shouldn't be using getObjectSize/getObjectAlignment. The size 501 // and alignment should be based on the VT. 502 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( 503 MachinePointerInfo::getFixedStack(FI, Offset), Flags, 504 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 505 // Now add the rest of the operands. 506 MIB.addFrameIndex(FI).addImm(Offset).addMemOperand(MMO); 507 } else { 508 // Now add the rest of the operands. 509 MIB.addReg(Addr.getReg()); 510 MIB.addImm(Offset); 511 } 512} 513 514bool AArch64FastISel::EmitLoad(MVT VT, unsigned &ResultReg, Address Addr, 515 bool UseUnscaled) { 516 // Negative offsets require unscaled, 9-bit, signed immediate offsets. 517 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets. 518 if (!UseUnscaled && Addr.getOffset() < 0) 519 UseUnscaled = true; 520 521 unsigned Opc; 522 const TargetRegisterClass *RC; 523 bool VTIsi1 = false; 524 int64_t ScaleFactor = 0; 525 switch (VT.SimpleTy) { 526 default: 527 return false; 528 case MVT::i1: 529 VTIsi1 = true; 530 // Intentional fall-through. 531 case MVT::i8: 532 Opc = UseUnscaled ? AArch64::LDURBBi : AArch64::LDRBBui; 533 RC = &AArch64::GPR32RegClass; 534 ScaleFactor = 1; 535 break; 536 case MVT::i16: 537 Opc = UseUnscaled ? AArch64::LDURHHi : AArch64::LDRHHui; 538 RC = &AArch64::GPR32RegClass; 539 ScaleFactor = 2; 540 break; 541 case MVT::i32: 542 Opc = UseUnscaled ? AArch64::LDURWi : AArch64::LDRWui; 543 RC = &AArch64::GPR32RegClass; 544 ScaleFactor = 4; 545 break; 546 case MVT::i64: 547 Opc = UseUnscaled ? AArch64::LDURXi : AArch64::LDRXui; 548 RC = &AArch64::GPR64RegClass; 549 ScaleFactor = 8; 550 break; 551 case MVT::f32: 552 Opc = UseUnscaled ? AArch64::LDURSi : AArch64::LDRSui; 553 RC = TLI.getRegClassFor(VT); 554 ScaleFactor = 4; 555 break; 556 case MVT::f64: 557 Opc = UseUnscaled ? AArch64::LDURDi : AArch64::LDRDui; 558 RC = TLI.getRegClassFor(VT); 559 ScaleFactor = 8; 560 break; 561 } 562 // Scale the offset. 563 if (!UseUnscaled) { 564 int64_t Offset = Addr.getOffset(); 565 if (Offset & (ScaleFactor - 1)) 566 // Retry using an unscaled, 9-bit, signed immediate offset. 567 return EmitLoad(VT, ResultReg, Addr, /*UseUnscaled*/ true); 568 569 Addr.setOffset(Offset / ScaleFactor); 570 } 571 572 // Simplify this down to something we can handle. 573 if (!SimplifyAddress(Addr, VT, UseUnscaled ? 1 : ScaleFactor, UseUnscaled)) 574 return false; 575 576 // Create the base instruction, then add the operands. 577 ResultReg = createResultReg(RC); 578 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 579 TII.get(Opc), ResultReg); 580 AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, UseUnscaled); 581 582 // Loading an i1 requires special handling. 583 if (VTIsi1) { 584 MRI.constrainRegClass(ResultReg, &AArch64::GPR32RegClass); 585 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass); 586 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri), 587 ANDReg) 588 .addReg(ResultReg) 589 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); 590 ResultReg = ANDReg; 591 } 592 return true; 593} 594 595bool AArch64FastISel::SelectLoad(const Instruction *I) { 596 MVT VT; 597 // Verify we have a legal type before going any further. Currently, we handle 598 // simple types that will directly fit in a register (i32/f32/i64/f64) or 599 // those that can be sign or zero-extended to a basic operation (i1/i8/i16). 600 if (!isLoadStoreTypeLegal(I->getType(), VT) || cast<LoadInst>(I)->isAtomic()) 601 return false; 602 603 // See if we can handle this address. 604 Address Addr; 605 if (!ComputeAddress(I->getOperand(0), Addr)) 606 return false; 607 608 unsigned ResultReg; 609 if (!EmitLoad(VT, ResultReg, Addr)) 610 return false; 611 612 UpdateValueMap(I, ResultReg); 613 return true; 614} 615 616bool AArch64FastISel::EmitStore(MVT VT, unsigned SrcReg, Address Addr, 617 bool UseUnscaled) { 618 // Negative offsets require unscaled, 9-bit, signed immediate offsets. 619 // Otherwise, we try using scaled, 12-bit, unsigned immediate offsets. 620 if (!UseUnscaled && Addr.getOffset() < 0) 621 UseUnscaled = true; 622 623 unsigned StrOpc; 624 bool VTIsi1 = false; 625 int64_t ScaleFactor = 0; 626 // Using scaled, 12-bit, unsigned immediate offsets. 627 switch (VT.SimpleTy) { 628 default: 629 return false; 630 case MVT::i1: 631 VTIsi1 = true; 632 case MVT::i8: 633 StrOpc = UseUnscaled ? AArch64::STURBBi : AArch64::STRBBui; 634 ScaleFactor = 1; 635 break; 636 case MVT::i16: 637 StrOpc = UseUnscaled ? AArch64::STURHHi : AArch64::STRHHui; 638 ScaleFactor = 2; 639 break; 640 case MVT::i32: 641 StrOpc = UseUnscaled ? AArch64::STURWi : AArch64::STRWui; 642 ScaleFactor = 4; 643 break; 644 case MVT::i64: 645 StrOpc = UseUnscaled ? AArch64::STURXi : AArch64::STRXui; 646 ScaleFactor = 8; 647 break; 648 case MVT::f32: 649 StrOpc = UseUnscaled ? AArch64::STURSi : AArch64::STRSui; 650 ScaleFactor = 4; 651 break; 652 case MVT::f64: 653 StrOpc = UseUnscaled ? AArch64::STURDi : AArch64::STRDui; 654 ScaleFactor = 8; 655 break; 656 } 657 // Scale the offset. 658 if (!UseUnscaled) { 659 int64_t Offset = Addr.getOffset(); 660 if (Offset & (ScaleFactor - 1)) 661 // Retry using an unscaled, 9-bit, signed immediate offset. 662 return EmitStore(VT, SrcReg, Addr, /*UseUnscaled*/ true); 663 664 Addr.setOffset(Offset / ScaleFactor); 665 } 666 667 // Simplify this down to something we can handle. 668 if (!SimplifyAddress(Addr, VT, UseUnscaled ? 1 : ScaleFactor, UseUnscaled)) 669 return false; 670 671 // Storing an i1 requires special handling. 672 if (VTIsi1) { 673 MRI.constrainRegClass(SrcReg, &AArch64::GPR32RegClass); 674 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass); 675 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri), 676 ANDReg) 677 .addReg(SrcReg) 678 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); 679 SrcReg = ANDReg; 680 } 681 // Create the base instruction, then add the operands. 682 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 683 TII.get(StrOpc)).addReg(SrcReg); 684 AddLoadStoreOperands(Addr, MIB, MachineMemOperand::MOStore, UseUnscaled); 685 return true; 686} 687 688bool AArch64FastISel::SelectStore(const Instruction *I) { 689 MVT VT; 690 Value *Op0 = I->getOperand(0); 691 // Verify we have a legal type before going any further. Currently, we handle 692 // simple types that will directly fit in a register (i32/f32/i64/f64) or 693 // those that can be sign or zero-extended to a basic operation (i1/i8/i16). 694 if (!isLoadStoreTypeLegal(Op0->getType(), VT) || 695 cast<StoreInst>(I)->isAtomic()) 696 return false; 697 698 // Get the value to be stored into a register. 699 unsigned SrcReg = getRegForValue(Op0); 700 if (SrcReg == 0) 701 return false; 702 703 // See if we can handle this address. 704 Address Addr; 705 if (!ComputeAddress(I->getOperand(1), Addr)) 706 return false; 707 708 if (!EmitStore(VT, SrcReg, Addr)) 709 return false; 710 return true; 711} 712 713static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred) { 714 switch (Pred) { 715 case CmpInst::FCMP_ONE: 716 case CmpInst::FCMP_UEQ: 717 default: 718 // AL is our "false" for now. The other two need more compares. 719 return AArch64CC::AL; 720 case CmpInst::ICMP_EQ: 721 case CmpInst::FCMP_OEQ: 722 return AArch64CC::EQ; 723 case CmpInst::ICMP_SGT: 724 case CmpInst::FCMP_OGT: 725 return AArch64CC::GT; 726 case CmpInst::ICMP_SGE: 727 case CmpInst::FCMP_OGE: 728 return AArch64CC::GE; 729 case CmpInst::ICMP_UGT: 730 case CmpInst::FCMP_UGT: 731 return AArch64CC::HI; 732 case CmpInst::FCMP_OLT: 733 return AArch64CC::MI; 734 case CmpInst::ICMP_ULE: 735 case CmpInst::FCMP_OLE: 736 return AArch64CC::LS; 737 case CmpInst::FCMP_ORD: 738 return AArch64CC::VC; 739 case CmpInst::FCMP_UNO: 740 return AArch64CC::VS; 741 case CmpInst::FCMP_UGE: 742 return AArch64CC::PL; 743 case CmpInst::ICMP_SLT: 744 case CmpInst::FCMP_ULT: 745 return AArch64CC::LT; 746 case CmpInst::ICMP_SLE: 747 case CmpInst::FCMP_ULE: 748 return AArch64CC::LE; 749 case CmpInst::FCMP_UNE: 750 case CmpInst::ICMP_NE: 751 return AArch64CC::NE; 752 case CmpInst::ICMP_UGE: 753 return AArch64CC::HS; 754 case CmpInst::ICMP_ULT: 755 return AArch64CC::LO; 756 } 757} 758 759bool AArch64FastISel::SelectBranch(const Instruction *I) { 760 const BranchInst *BI = cast<BranchInst>(I); 761 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 762 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 763 764 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 765 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { 766 // We may not handle every CC for now. 767 AArch64CC::CondCode CC = getCompareCC(CI->getPredicate()); 768 if (CC == AArch64CC::AL) 769 return false; 770 771 // Emit the cmp. 772 if (!EmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 773 return false; 774 775 // Emit the branch. 776 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc)) 777 .addImm(CC) 778 .addMBB(TBB); 779 FuncInfo.MBB->addSuccessor(TBB); 780 781 FastEmitBranch(FBB, DbgLoc); 782 return true; 783 } 784 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) { 785 MVT SrcVT; 786 if (TI->hasOneUse() && TI->getParent() == I->getParent() && 787 (isLoadStoreTypeLegal(TI->getOperand(0)->getType(), SrcVT))) { 788 unsigned CondReg = getRegForValue(TI->getOperand(0)); 789 if (CondReg == 0) 790 return false; 791 792 // Issue an extract_subreg to get the lower 32-bits. 793 if (SrcVT == MVT::i64) 794 CondReg = FastEmitInst_extractsubreg(MVT::i32, CondReg, /*Kill=*/true, 795 AArch64::sub_32); 796 797 MRI.constrainRegClass(CondReg, &AArch64::GPR32RegClass); 798 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass); 799 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 800 TII.get(AArch64::ANDWri), ANDReg) 801 .addReg(CondReg) 802 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); 803 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 804 TII.get(AArch64::SUBSWri)) 805 .addReg(ANDReg) 806 .addReg(ANDReg) 807 .addImm(0) 808 .addImm(0); 809 810 unsigned CC = AArch64CC::NE; 811 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 812 std::swap(TBB, FBB); 813 CC = AArch64CC::EQ; 814 } 815 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc)) 816 .addImm(CC) 817 .addMBB(TBB); 818 FuncInfo.MBB->addSuccessor(TBB); 819 FastEmitBranch(FBB, DbgLoc); 820 return true; 821 } 822 } else if (const ConstantInt *CI = 823 dyn_cast<ConstantInt>(BI->getCondition())) { 824 uint64_t Imm = CI->getZExtValue(); 825 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; 826 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::B)) 827 .addMBB(Target); 828 FuncInfo.MBB->addSuccessor(Target); 829 return true; 830 } 831 832 unsigned CondReg = getRegForValue(BI->getCondition()); 833 if (CondReg == 0) 834 return false; 835 836 // We've been divorced from our compare! Our block was split, and 837 // now our compare lives in a predecessor block. We musn't 838 // re-compare here, as the children of the compare aren't guaranteed 839 // live across the block boundary (we *could* check for this). 840 // Regardless, the compare has been done in the predecessor block, 841 // and it left a value for us in a virtual register. Ergo, we test 842 // the one-bit value left in the virtual register. 843 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBSWri), 844 AArch64::WZR) 845 .addReg(CondReg) 846 .addImm(0) 847 .addImm(0); 848 849 unsigned CC = AArch64CC::NE; 850 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) { 851 std::swap(TBB, FBB); 852 CC = AArch64CC::EQ; 853 } 854 855 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::Bcc)) 856 .addImm(CC) 857 .addMBB(TBB); 858 FuncInfo.MBB->addSuccessor(TBB); 859 FastEmitBranch(FBB, DbgLoc); 860 return true; 861} 862 863bool AArch64FastISel::SelectIndirectBr(const Instruction *I) { 864 const IndirectBrInst *BI = cast<IndirectBrInst>(I); 865 unsigned AddrReg = getRegForValue(BI->getOperand(0)); 866 if (AddrReg == 0) 867 return false; 868 869 // Emit the indirect branch. 870 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BR)) 871 .addReg(AddrReg); 872 873 // Make sure the CFG is up-to-date. 874 for (unsigned i = 0, e = BI->getNumSuccessors(); i != e; ++i) 875 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[BI->getSuccessor(i)]); 876 877 return true; 878} 879 880bool AArch64FastISel::EmitCmp(Value *Src1Value, Value *Src2Value, bool isZExt) { 881 Type *Ty = Src1Value->getType(); 882 EVT SrcEVT = TLI.getValueType(Ty, true); 883 if (!SrcEVT.isSimple()) 884 return false; 885 MVT SrcVT = SrcEVT.getSimpleVT(); 886 887 // Check to see if the 2nd operand is a constant that we can encode directly 888 // in the compare. 889 uint64_t Imm; 890 bool UseImm = false; 891 bool isNegativeImm = false; 892 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) { 893 if (SrcVT == MVT::i64 || SrcVT == MVT::i32 || SrcVT == MVT::i16 || 894 SrcVT == MVT::i8 || SrcVT == MVT::i1) { 895 const APInt &CIVal = ConstInt->getValue(); 896 897 Imm = (isZExt) ? CIVal.getZExtValue() : CIVal.getSExtValue(); 898 if (CIVal.isNegative()) { 899 isNegativeImm = true; 900 Imm = -Imm; 901 } 902 // FIXME: We can handle more immediates using shifts. 903 UseImm = ((Imm & 0xfff) == Imm); 904 } 905 } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) { 906 if (SrcVT == MVT::f32 || SrcVT == MVT::f64) 907 if (ConstFP->isZero() && !ConstFP->isNegative()) 908 UseImm = true; 909 } 910 911 unsigned ZReg; 912 unsigned CmpOpc; 913 bool isICmp = true; 914 bool needsExt = false; 915 switch (SrcVT.SimpleTy) { 916 default: 917 return false; 918 case MVT::i1: 919 case MVT::i8: 920 case MVT::i16: 921 needsExt = true; 922 // Intentional fall-through. 923 case MVT::i32: 924 ZReg = AArch64::WZR; 925 if (UseImm) 926 CmpOpc = isNegativeImm ? AArch64::ADDSWri : AArch64::SUBSWri; 927 else 928 CmpOpc = AArch64::SUBSWrr; 929 break; 930 case MVT::i64: 931 ZReg = AArch64::XZR; 932 if (UseImm) 933 CmpOpc = isNegativeImm ? AArch64::ADDSXri : AArch64::SUBSXri; 934 else 935 CmpOpc = AArch64::SUBSXrr; 936 break; 937 case MVT::f32: 938 isICmp = false; 939 CmpOpc = UseImm ? AArch64::FCMPSri : AArch64::FCMPSrr; 940 break; 941 case MVT::f64: 942 isICmp = false; 943 CmpOpc = UseImm ? AArch64::FCMPDri : AArch64::FCMPDrr; 944 break; 945 } 946 947 unsigned SrcReg1 = getRegForValue(Src1Value); 948 if (SrcReg1 == 0) 949 return false; 950 951 unsigned SrcReg2; 952 if (!UseImm) { 953 SrcReg2 = getRegForValue(Src2Value); 954 if (SrcReg2 == 0) 955 return false; 956 } 957 958 // We have i1, i8, or i16, we need to either zero extend or sign extend. 959 if (needsExt) { 960 SrcReg1 = EmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt); 961 if (SrcReg1 == 0) 962 return false; 963 if (!UseImm) { 964 SrcReg2 = EmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt); 965 if (SrcReg2 == 0) 966 return false; 967 } 968 } 969 970 if (isICmp) { 971 if (UseImm) 972 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc)) 973 .addReg(ZReg) 974 .addReg(SrcReg1) 975 .addImm(Imm) 976 .addImm(0); 977 else 978 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc)) 979 .addReg(ZReg) 980 .addReg(SrcReg1) 981 .addReg(SrcReg2); 982 } else { 983 if (UseImm) 984 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc)) 985 .addReg(SrcReg1); 986 else 987 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc)) 988 .addReg(SrcReg1) 989 .addReg(SrcReg2); 990 } 991 return true; 992} 993 994bool AArch64FastISel::SelectCmp(const Instruction *I) { 995 const CmpInst *CI = cast<CmpInst>(I); 996 997 // We may not handle every CC for now. 998 AArch64CC::CondCode CC = getCompareCC(CI->getPredicate()); 999 if (CC == AArch64CC::AL) 1000 return false; 1001 1002 // Emit the cmp. 1003 if (!EmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) 1004 return false; 1005 1006 // Now set a register based on the comparison. 1007 AArch64CC::CondCode invertedCC = getInvertedCondCode(CC); 1008 unsigned ResultReg = createResultReg(&AArch64::GPR32RegClass); 1009 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr), 1010 ResultReg) 1011 .addReg(AArch64::WZR) 1012 .addReg(AArch64::WZR) 1013 .addImm(invertedCC); 1014 1015 UpdateValueMap(I, ResultReg); 1016 return true; 1017} 1018 1019bool AArch64FastISel::SelectSelect(const Instruction *I) { 1020 const SelectInst *SI = cast<SelectInst>(I); 1021 1022 EVT DestEVT = TLI.getValueType(SI->getType(), true); 1023 if (!DestEVT.isSimple()) 1024 return false; 1025 1026 MVT DestVT = DestEVT.getSimpleVT(); 1027 if (DestVT != MVT::i32 && DestVT != MVT::i64 && DestVT != MVT::f32 && 1028 DestVT != MVT::f64) 1029 return false; 1030 1031 unsigned CondReg = getRegForValue(SI->getCondition()); 1032 if (CondReg == 0) 1033 return false; 1034 unsigned TrueReg = getRegForValue(SI->getTrueValue()); 1035 if (TrueReg == 0) 1036 return false; 1037 unsigned FalseReg = getRegForValue(SI->getFalseValue()); 1038 if (FalseReg == 0) 1039 return false; 1040 1041 1042 MRI.constrainRegClass(CondReg, &AArch64::GPR32RegClass); 1043 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass); 1044 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri), 1045 ANDReg) 1046 .addReg(CondReg) 1047 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); 1048 1049 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SUBSWri)) 1050 .addReg(ANDReg) 1051 .addReg(ANDReg) 1052 .addImm(0) 1053 .addImm(0); 1054 1055 unsigned SelectOpc; 1056 switch (DestVT.SimpleTy) { 1057 default: 1058 return false; 1059 case MVT::i32: 1060 SelectOpc = AArch64::CSELWr; 1061 break; 1062 case MVT::i64: 1063 SelectOpc = AArch64::CSELXr; 1064 break; 1065 case MVT::f32: 1066 SelectOpc = AArch64::FCSELSrrr; 1067 break; 1068 case MVT::f64: 1069 SelectOpc = AArch64::FCSELDrrr; 1070 break; 1071 } 1072 1073 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT)); 1074 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SelectOpc), 1075 ResultReg) 1076 .addReg(TrueReg) 1077 .addReg(FalseReg) 1078 .addImm(AArch64CC::NE); 1079 1080 UpdateValueMap(I, ResultReg); 1081 return true; 1082} 1083 1084bool AArch64FastISel::SelectFPExt(const Instruction *I) { 1085 Value *V = I->getOperand(0); 1086 if (!I->getType()->isDoubleTy() || !V->getType()->isFloatTy()) 1087 return false; 1088 1089 unsigned Op = getRegForValue(V); 1090 if (Op == 0) 1091 return false; 1092 1093 unsigned ResultReg = createResultReg(&AArch64::FPR64RegClass); 1094 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr), 1095 ResultReg).addReg(Op); 1096 UpdateValueMap(I, ResultReg); 1097 return true; 1098} 1099 1100bool AArch64FastISel::SelectFPTrunc(const Instruction *I) { 1101 Value *V = I->getOperand(0); 1102 if (!I->getType()->isFloatTy() || !V->getType()->isDoubleTy()) 1103 return false; 1104 1105 unsigned Op = getRegForValue(V); 1106 if (Op == 0) 1107 return false; 1108 1109 unsigned ResultReg = createResultReg(&AArch64::FPR32RegClass); 1110 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr), 1111 ResultReg).addReg(Op); 1112 UpdateValueMap(I, ResultReg); 1113 return true; 1114} 1115 1116// FPToUI and FPToSI 1117bool AArch64FastISel::SelectFPToInt(const Instruction *I, bool Signed) { 1118 MVT DestVT; 1119 if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector()) 1120 return false; 1121 1122 unsigned SrcReg = getRegForValue(I->getOperand(0)); 1123 if (SrcReg == 0) 1124 return false; 1125 1126 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType(), true); 1127 if (SrcVT == MVT::f128) 1128 return false; 1129 1130 unsigned Opc; 1131 if (SrcVT == MVT::f64) { 1132 if (Signed) 1133 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWDr : AArch64::FCVTZSUXDr; 1134 else 1135 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWDr : AArch64::FCVTZUUXDr; 1136 } else { 1137 if (Signed) 1138 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZSUWSr : AArch64::FCVTZSUXSr; 1139 else 1140 Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr; 1141 } 1142 unsigned ResultReg = createResultReg( 1143 DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass); 1144 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) 1145 .addReg(SrcReg); 1146 UpdateValueMap(I, ResultReg); 1147 return true; 1148} 1149 1150bool AArch64FastISel::SelectIntToFP(const Instruction *I, bool Signed) { 1151 MVT DestVT; 1152 if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector()) 1153 return false; 1154 assert ((DestVT == MVT::f32 || DestVT == MVT::f64) && 1155 "Unexpected value type."); 1156 1157 unsigned SrcReg = getRegForValue(I->getOperand(0)); 1158 if (SrcReg == 0) 1159 return false; 1160 1161 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType(), true); 1162 1163 // Handle sign-extension. 1164 if (SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) { 1165 SrcReg = 1166 EmitIntExt(SrcVT.getSimpleVT(), SrcReg, MVT::i32, /*isZExt*/ !Signed); 1167 if (SrcReg == 0) 1168 return false; 1169 } 1170 1171 MRI.constrainRegClass(SrcReg, SrcVT == MVT::i64 ? &AArch64::GPR64RegClass 1172 : &AArch64::GPR32RegClass); 1173 1174 unsigned Opc; 1175 if (SrcVT == MVT::i64) { 1176 if (Signed) 1177 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUXSri : AArch64::SCVTFUXDri; 1178 else 1179 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUXSri : AArch64::UCVTFUXDri; 1180 } else { 1181 if (Signed) 1182 Opc = (DestVT == MVT::f32) ? AArch64::SCVTFUWSri : AArch64::SCVTFUWDri; 1183 else 1184 Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri; 1185 } 1186 1187 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT)); 1188 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) 1189 .addReg(SrcReg); 1190 UpdateValueMap(I, ResultReg); 1191 return true; 1192} 1193 1194bool AArch64FastISel::ProcessCallArgs( 1195 SmallVectorImpl<Value *> &Args, SmallVectorImpl<unsigned> &ArgRegs, 1196 SmallVectorImpl<MVT> &ArgVTs, SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags, 1197 SmallVectorImpl<unsigned> &RegArgs, CallingConv::ID CC, 1198 unsigned &NumBytes) { 1199 SmallVector<CCValAssign, 16> ArgLocs; 1200 CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); 1201 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC)); 1202 1203 // Get a count of how many bytes are to be pushed on the stack. 1204 NumBytes = CCInfo.getNextStackOffset(); 1205 1206 // Issue CALLSEQ_START 1207 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 1208 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) 1209 .addImm(NumBytes); 1210 1211 // Process the args. 1212 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1213 CCValAssign &VA = ArgLocs[i]; 1214 unsigned Arg = ArgRegs[VA.getValNo()]; 1215 MVT ArgVT = ArgVTs[VA.getValNo()]; 1216 1217 // Handle arg promotion: SExt, ZExt, AExt. 1218 switch (VA.getLocInfo()) { 1219 case CCValAssign::Full: 1220 break; 1221 case CCValAssign::SExt: { 1222 MVT DestVT = VA.getLocVT(); 1223 MVT SrcVT = ArgVT; 1224 Arg = EmitIntExt(SrcVT, Arg, DestVT, /*isZExt*/ false); 1225 if (Arg == 0) 1226 return false; 1227 ArgVT = DestVT; 1228 break; 1229 } 1230 case CCValAssign::AExt: 1231 // Intentional fall-through. 1232 case CCValAssign::ZExt: { 1233 MVT DestVT = VA.getLocVT(); 1234 MVT SrcVT = ArgVT; 1235 Arg = EmitIntExt(SrcVT, Arg, DestVT, /*isZExt*/ true); 1236 if (Arg == 0) 1237 return false; 1238 ArgVT = DestVT; 1239 break; 1240 } 1241 default: 1242 llvm_unreachable("Unknown arg promotion!"); 1243 } 1244 1245 // Now copy/store arg to correct locations. 1246 if (VA.isRegLoc() && !VA.needsCustom()) { 1247 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1248 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); 1249 RegArgs.push_back(VA.getLocReg()); 1250 } else if (VA.needsCustom()) { 1251 // FIXME: Handle custom args. 1252 return false; 1253 } else { 1254 assert(VA.isMemLoc() && "Assuming store on stack."); 1255 1256 // Need to store on the stack. 1257 unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8; 1258 1259 unsigned BEAlign = 0; 1260 if (ArgSize < 8 && !Subtarget->isLittleEndian()) 1261 BEAlign = 8 - ArgSize; 1262 1263 Address Addr; 1264 Addr.setKind(Address::RegBase); 1265 Addr.setReg(AArch64::SP); 1266 Addr.setOffset(VA.getLocMemOffset() + BEAlign); 1267 1268 if (!EmitStore(ArgVT, Arg, Addr)) 1269 return false; 1270 } 1271 } 1272 return true; 1273} 1274 1275bool AArch64FastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs, 1276 const Instruction *I, CallingConv::ID CC, 1277 unsigned &NumBytes) { 1278 // Issue CALLSEQ_END 1279 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 1280 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) 1281 .addImm(NumBytes) 1282 .addImm(0); 1283 1284 // Now the return value. 1285 if (RetVT != MVT::isVoid) { 1286 SmallVector<CCValAssign, 16> RVLocs; 1287 CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); 1288 CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC)); 1289 1290 // Only handle a single return value. 1291 if (RVLocs.size() != 1) 1292 return false; 1293 1294 // Copy all of the result registers out of their specified physreg. 1295 MVT CopyVT = RVLocs[0].getValVT(); 1296 unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT)); 1297 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1298 TII.get(TargetOpcode::COPY), 1299 ResultReg).addReg(RVLocs[0].getLocReg()); 1300 UsedRegs.push_back(RVLocs[0].getLocReg()); 1301 1302 // Finally update the result. 1303 UpdateValueMap(I, ResultReg); 1304 } 1305 1306 return true; 1307} 1308 1309bool AArch64FastISel::SelectCall(const Instruction *I, 1310 const char *IntrMemName = nullptr) { 1311 const CallInst *CI = cast<CallInst>(I); 1312 const Value *Callee = CI->getCalledValue(); 1313 1314 // Don't handle inline asm or intrinsics. 1315 if (isa<InlineAsm>(Callee)) 1316 return false; 1317 1318 // Only handle global variable Callees. 1319 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee); 1320 if (!GV) 1321 return false; 1322 1323 // Check the calling convention. 1324 ImmutableCallSite CS(CI); 1325 CallingConv::ID CC = CS.getCallingConv(); 1326 1327 // Let SDISel handle vararg functions. 1328 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 1329 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 1330 if (FTy->isVarArg()) 1331 return false; 1332 1333 // Handle *simple* calls for now. 1334 MVT RetVT; 1335 Type *RetTy = I->getType(); 1336 if (RetTy->isVoidTy()) 1337 RetVT = MVT::isVoid; 1338 else if (!isTypeLegal(RetTy, RetVT)) 1339 return false; 1340 1341 // Set up the argument vectors. 1342 SmallVector<Value *, 8> Args; 1343 SmallVector<unsigned, 8> ArgRegs; 1344 SmallVector<MVT, 8> ArgVTs; 1345 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags; 1346 Args.reserve(CS.arg_size()); 1347 ArgRegs.reserve(CS.arg_size()); 1348 ArgVTs.reserve(CS.arg_size()); 1349 ArgFlags.reserve(CS.arg_size()); 1350 1351 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 1352 i != e; ++i) { 1353 // If we're lowering a memory intrinsic instead of a regular call, skip the 1354 // last two arguments, which shouldn't be passed to the underlying function. 1355 if (IntrMemName && e - i <= 2) 1356 break; 1357 1358 unsigned Arg = getRegForValue(*i); 1359 if (Arg == 0) 1360 return false; 1361 1362 ISD::ArgFlagsTy Flags; 1363 unsigned AttrInd = i - CS.arg_begin() + 1; 1364 if (CS.paramHasAttr(AttrInd, Attribute::SExt)) 1365 Flags.setSExt(); 1366 if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) 1367 Flags.setZExt(); 1368 1369 // FIXME: Only handle *easy* calls for now. 1370 if (CS.paramHasAttr(AttrInd, Attribute::InReg) || 1371 CS.paramHasAttr(AttrInd, Attribute::StructRet) || 1372 CS.paramHasAttr(AttrInd, Attribute::Nest) || 1373 CS.paramHasAttr(AttrInd, Attribute::ByVal)) 1374 return false; 1375 1376 MVT ArgVT; 1377 Type *ArgTy = (*i)->getType(); 1378 if (!isTypeLegal(ArgTy, ArgVT) && 1379 !(ArgVT == MVT::i1 || ArgVT == MVT::i8 || ArgVT == MVT::i16)) 1380 return false; 1381 1382 // We don't handle vector parameters yet. 1383 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64) 1384 return false; 1385 1386 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 1387 Flags.setOrigAlign(OriginalAlignment); 1388 1389 Args.push_back(*i); 1390 ArgRegs.push_back(Arg); 1391 ArgVTs.push_back(ArgVT); 1392 ArgFlags.push_back(Flags); 1393 } 1394 1395 // Handle the arguments now that we've gotten them. 1396 SmallVector<unsigned, 4> RegArgs; 1397 unsigned NumBytes; 1398 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags, RegArgs, CC, NumBytes)) 1399 return false; 1400 1401 // Issue the call. 1402 MachineInstrBuilder MIB; 1403 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BL)); 1404 if (!IntrMemName) 1405 MIB.addGlobalAddress(GV, 0, 0); 1406 else 1407 MIB.addExternalSymbol(IntrMemName, 0); 1408 1409 // Add implicit physical register uses to the call. 1410 for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) 1411 MIB.addReg(RegArgs[i], RegState::Implicit); 1412 1413 // Add a register mask with the call-preserved registers. 1414 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 1415 MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv())); 1416 1417 // Finish off the call including any return values. 1418 SmallVector<unsigned, 4> UsedRegs; 1419 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes)) 1420 return false; 1421 1422 // Set all unused physreg defs as dead. 1423 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); 1424 1425 return true; 1426} 1427 1428bool AArch64FastISel::IsMemCpySmall(uint64_t Len, unsigned Alignment) { 1429 if (Alignment) 1430 return Len / Alignment <= 4; 1431 else 1432 return Len < 32; 1433} 1434 1435bool AArch64FastISel::TryEmitSmallMemCpy(Address Dest, Address Src, 1436 uint64_t Len, unsigned Alignment) { 1437 // Make sure we don't bloat code by inlining very large memcpy's. 1438 if (!IsMemCpySmall(Len, Alignment)) 1439 return false; 1440 1441 int64_t UnscaledOffset = 0; 1442 Address OrigDest = Dest; 1443 Address OrigSrc = Src; 1444 1445 while (Len) { 1446 MVT VT; 1447 if (!Alignment || Alignment >= 8) { 1448 if (Len >= 8) 1449 VT = MVT::i64; 1450 else if (Len >= 4) 1451 VT = MVT::i32; 1452 else if (Len >= 2) 1453 VT = MVT::i16; 1454 else { 1455 VT = MVT::i8; 1456 } 1457 } else { 1458 // Bound based on alignment. 1459 if (Len >= 4 && Alignment == 4) 1460 VT = MVT::i32; 1461 else if (Len >= 2 && Alignment == 2) 1462 VT = MVT::i16; 1463 else { 1464 VT = MVT::i8; 1465 } 1466 } 1467 1468 bool RV; 1469 unsigned ResultReg; 1470 RV = EmitLoad(VT, ResultReg, Src); 1471 assert(RV == true && "Should be able to handle this load."); 1472 RV = EmitStore(VT, ResultReg, Dest); 1473 assert(RV == true && "Should be able to handle this store."); 1474 (void)RV; 1475 1476 int64_t Size = VT.getSizeInBits() / 8; 1477 Len -= Size; 1478 UnscaledOffset += Size; 1479 1480 // We need to recompute the unscaled offset for each iteration. 1481 Dest.setOffset(OrigDest.getOffset() + UnscaledOffset); 1482 Src.setOffset(OrigSrc.getOffset() + UnscaledOffset); 1483 } 1484 1485 return true; 1486} 1487 1488bool AArch64FastISel::SelectIntrinsicCall(const IntrinsicInst &I) { 1489 // FIXME: Handle more intrinsics. 1490 switch (I.getIntrinsicID()) { 1491 default: 1492 return false; 1493 case Intrinsic::memcpy: 1494 case Intrinsic::memmove: { 1495 const MemTransferInst &MTI = cast<MemTransferInst>(I); 1496 // Don't handle volatile. 1497 if (MTI.isVolatile()) 1498 return false; 1499 1500 // Disable inlining for memmove before calls to ComputeAddress. Otherwise, 1501 // we would emit dead code because we don't currently handle memmoves. 1502 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy); 1503 if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) { 1504 // Small memcpy's are common enough that we want to do them without a call 1505 // if possible. 1506 uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue(); 1507 unsigned Alignment = MTI.getAlignment(); 1508 if (IsMemCpySmall(Len, Alignment)) { 1509 Address Dest, Src; 1510 if (!ComputeAddress(MTI.getRawDest(), Dest) || 1511 !ComputeAddress(MTI.getRawSource(), Src)) 1512 return false; 1513 if (TryEmitSmallMemCpy(Dest, Src, Len, Alignment)) 1514 return true; 1515 } 1516 } 1517 1518 if (!MTI.getLength()->getType()->isIntegerTy(64)) 1519 return false; 1520 1521 if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) 1522 // Fast instruction selection doesn't support the special 1523 // address spaces. 1524 return false; 1525 1526 const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove"; 1527 return SelectCall(&I, IntrMemName); 1528 } 1529 case Intrinsic::memset: { 1530 const MemSetInst &MSI = cast<MemSetInst>(I); 1531 // Don't handle volatile. 1532 if (MSI.isVolatile()) 1533 return false; 1534 1535 if (!MSI.getLength()->getType()->isIntegerTy(64)) 1536 return false; 1537 1538 if (MSI.getDestAddressSpace() > 255) 1539 // Fast instruction selection doesn't support the special 1540 // address spaces. 1541 return false; 1542 1543 return SelectCall(&I, "memset"); 1544 } 1545 case Intrinsic::trap: { 1546 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::BRK)) 1547 .addImm(1); 1548 return true; 1549 } 1550 } 1551 return false; 1552} 1553 1554bool AArch64FastISel::SelectRet(const Instruction *I) { 1555 const ReturnInst *Ret = cast<ReturnInst>(I); 1556 const Function &F = *I->getParent()->getParent(); 1557 1558 if (!FuncInfo.CanLowerReturn) 1559 return false; 1560 1561 if (F.isVarArg()) 1562 return false; 1563 1564 // Build a list of return value registers. 1565 SmallVector<unsigned, 4> RetRegs; 1566 1567 if (Ret->getNumOperands() > 0) { 1568 CallingConv::ID CC = F.getCallingConv(); 1569 SmallVector<ISD::OutputArg, 4> Outs; 1570 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); 1571 1572 // Analyze operands of the call, assigning locations to each operand. 1573 SmallVector<CCValAssign, 16> ValLocs; 1574 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, 1575 I->getContext()); 1576 CCAssignFn *RetCC = CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS 1577 : RetCC_AArch64_AAPCS; 1578 CCInfo.AnalyzeReturn(Outs, RetCC); 1579 1580 // Only handle a single return value for now. 1581 if (ValLocs.size() != 1) 1582 return false; 1583 1584 CCValAssign &VA = ValLocs[0]; 1585 const Value *RV = Ret->getOperand(0); 1586 1587 // Don't bother handling odd stuff for now. 1588 if (VA.getLocInfo() != CCValAssign::Full) 1589 return false; 1590 // Only handle register returns for now. 1591 if (!VA.isRegLoc()) 1592 return false; 1593 unsigned Reg = getRegForValue(RV); 1594 if (Reg == 0) 1595 return false; 1596 1597 unsigned SrcReg = Reg + VA.getValNo(); 1598 unsigned DestReg = VA.getLocReg(); 1599 // Avoid a cross-class copy. This is very unlikely. 1600 if (!MRI.getRegClass(SrcReg)->contains(DestReg)) 1601 return false; 1602 1603 EVT RVEVT = TLI.getValueType(RV->getType()); 1604 if (!RVEVT.isSimple()) 1605 return false; 1606 1607 // Vectors (of > 1 lane) in big endian need tricky handling. 1608 if (RVEVT.isVector() && RVEVT.getVectorNumElements() > 1) 1609 return false; 1610 1611 MVT RVVT = RVEVT.getSimpleVT(); 1612 if (RVVT == MVT::f128) 1613 return false; 1614 MVT DestVT = VA.getValVT(); 1615 // Special handling for extended integers. 1616 if (RVVT != DestVT) { 1617 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 1618 return false; 1619 1620 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt()) 1621 return false; 1622 1623 bool isZExt = Outs[0].Flags.isZExt(); 1624 SrcReg = EmitIntExt(RVVT, SrcReg, DestVT, isZExt); 1625 if (SrcReg == 0) 1626 return false; 1627 } 1628 1629 // Make the copy. 1630 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1631 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg); 1632 1633 // Add register to return instruction. 1634 RetRegs.push_back(VA.getLocReg()); 1635 } 1636 1637 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1638 TII.get(AArch64::RET_ReallyLR)); 1639 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 1640 MIB.addReg(RetRegs[i], RegState::Implicit); 1641 return true; 1642} 1643 1644bool AArch64FastISel::SelectTrunc(const Instruction *I) { 1645 Type *DestTy = I->getType(); 1646 Value *Op = I->getOperand(0); 1647 Type *SrcTy = Op->getType(); 1648 1649 EVT SrcEVT = TLI.getValueType(SrcTy, true); 1650 EVT DestEVT = TLI.getValueType(DestTy, true); 1651 if (!SrcEVT.isSimple()) 1652 return false; 1653 if (!DestEVT.isSimple()) 1654 return false; 1655 1656 MVT SrcVT = SrcEVT.getSimpleVT(); 1657 MVT DestVT = DestEVT.getSimpleVT(); 1658 1659 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 && 1660 SrcVT != MVT::i8) 1661 return false; 1662 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8 && 1663 DestVT != MVT::i1) 1664 return false; 1665 1666 unsigned SrcReg = getRegForValue(Op); 1667 if (!SrcReg) 1668 return false; 1669 1670 // If we're truncating from i64 to a smaller non-legal type then generate an 1671 // AND. Otherwise, we know the high bits are undefined and a truncate doesn't 1672 // generate any code. 1673 if (SrcVT == MVT::i64) { 1674 uint64_t Mask = 0; 1675 switch (DestVT.SimpleTy) { 1676 default: 1677 // Trunc i64 to i32 is handled by the target-independent fast-isel. 1678 return false; 1679 case MVT::i1: 1680 Mask = 0x1; 1681 break; 1682 case MVT::i8: 1683 Mask = 0xff; 1684 break; 1685 case MVT::i16: 1686 Mask = 0xffff; 1687 break; 1688 } 1689 // Issue an extract_subreg to get the lower 32-bits. 1690 unsigned Reg32 = FastEmitInst_extractsubreg(MVT::i32, SrcReg, /*Kill=*/true, 1691 AArch64::sub_32); 1692 MRI.constrainRegClass(Reg32, &AArch64::GPR32RegClass); 1693 // Create the AND instruction which performs the actual truncation. 1694 unsigned ANDReg = createResultReg(&AArch64::GPR32spRegClass); 1695 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri), 1696 ANDReg) 1697 .addReg(Reg32) 1698 .addImm(AArch64_AM::encodeLogicalImmediate(Mask, 32)); 1699 SrcReg = ANDReg; 1700 } 1701 1702 UpdateValueMap(I, SrcReg); 1703 return true; 1704} 1705 1706unsigned AArch64FastISel::Emiti1Ext(unsigned SrcReg, MVT DestVT, bool isZExt) { 1707 assert((DestVT == MVT::i8 || DestVT == MVT::i16 || DestVT == MVT::i32 || 1708 DestVT == MVT::i64) && 1709 "Unexpected value type."); 1710 // Handle i8 and i16 as i32. 1711 if (DestVT == MVT::i8 || DestVT == MVT::i16) 1712 DestVT = MVT::i32; 1713 1714 if (isZExt) { 1715 MRI.constrainRegClass(SrcReg, &AArch64::GPR32RegClass); 1716 unsigned ResultReg = createResultReg(&AArch64::GPR32spRegClass); 1717 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ANDWri), 1718 ResultReg) 1719 .addReg(SrcReg) 1720 .addImm(AArch64_AM::encodeLogicalImmediate(1, 32)); 1721 1722 if (DestVT == MVT::i64) { 1723 // We're ZExt i1 to i64. The ANDWri Wd, Ws, #1 implicitly clears the 1724 // upper 32 bits. Emit a SUBREG_TO_REG to extend from Wd to Xd. 1725 unsigned Reg64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass); 1726 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1727 TII.get(AArch64::SUBREG_TO_REG), Reg64) 1728 .addImm(0) 1729 .addReg(ResultReg) 1730 .addImm(AArch64::sub_32); 1731 ResultReg = Reg64; 1732 } 1733 return ResultReg; 1734 } else { 1735 if (DestVT == MVT::i64) { 1736 // FIXME: We're SExt i1 to i64. 1737 return 0; 1738 } 1739 unsigned ResultReg = createResultReg(&AArch64::GPR32RegClass); 1740 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::SBFMWri), 1741 ResultReg) 1742 .addReg(SrcReg) 1743 .addImm(0) 1744 .addImm(0); 1745 return ResultReg; 1746 } 1747} 1748 1749unsigned AArch64FastISel::EmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1750 bool isZExt) { 1751 assert(DestVT != MVT::i1 && "ZeroExt/SignExt an i1?"); 1752 unsigned Opc; 1753 unsigned Imm = 0; 1754 1755 switch (SrcVT.SimpleTy) { 1756 default: 1757 return 0; 1758 case MVT::i1: 1759 return Emiti1Ext(SrcReg, DestVT, isZExt); 1760 case MVT::i8: 1761 if (DestVT == MVT::i64) 1762 Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri; 1763 else 1764 Opc = isZExt ? AArch64::UBFMWri : AArch64::SBFMWri; 1765 Imm = 7; 1766 break; 1767 case MVT::i16: 1768 if (DestVT == MVT::i64) 1769 Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri; 1770 else 1771 Opc = isZExt ? AArch64::UBFMWri : AArch64::SBFMWri; 1772 Imm = 15; 1773 break; 1774 case MVT::i32: 1775 assert(DestVT == MVT::i64 && "IntExt i32 to i32?!?"); 1776 Opc = isZExt ? AArch64::UBFMXri : AArch64::SBFMXri; 1777 Imm = 31; 1778 break; 1779 } 1780 1781 // Handle i8 and i16 as i32. 1782 if (DestVT == MVT::i8 || DestVT == MVT::i16) 1783 DestVT = MVT::i32; 1784 else if (DestVT == MVT::i64) { 1785 unsigned Src64 = MRI.createVirtualRegister(&AArch64::GPR64RegClass); 1786 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1787 TII.get(AArch64::SUBREG_TO_REG), Src64) 1788 .addImm(0) 1789 .addReg(SrcReg) 1790 .addImm(AArch64::sub_32); 1791 SrcReg = Src64; 1792 } 1793 1794 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT)); 1795 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) 1796 .addReg(SrcReg) 1797 .addImm(0) 1798 .addImm(Imm); 1799 1800 return ResultReg; 1801} 1802 1803bool AArch64FastISel::SelectIntExt(const Instruction *I) { 1804 // On ARM, in general, integer casts don't involve legal types; this code 1805 // handles promotable integers. The high bits for a type smaller than 1806 // the register size are assumed to be undefined. 1807 Type *DestTy = I->getType(); 1808 Value *Src = I->getOperand(0); 1809 Type *SrcTy = Src->getType(); 1810 1811 bool isZExt = isa<ZExtInst>(I); 1812 unsigned SrcReg = getRegForValue(Src); 1813 if (!SrcReg) 1814 return false; 1815 1816 EVT SrcEVT = TLI.getValueType(SrcTy, true); 1817 EVT DestEVT = TLI.getValueType(DestTy, true); 1818 if (!SrcEVT.isSimple()) 1819 return false; 1820 if (!DestEVT.isSimple()) 1821 return false; 1822 1823 MVT SrcVT = SrcEVT.getSimpleVT(); 1824 MVT DestVT = DestEVT.getSimpleVT(); 1825 unsigned ResultReg = EmitIntExt(SrcVT, SrcReg, DestVT, isZExt); 1826 if (ResultReg == 0) 1827 return false; 1828 UpdateValueMap(I, ResultReg); 1829 return true; 1830} 1831 1832bool AArch64FastISel::SelectRem(const Instruction *I, unsigned ISDOpcode) { 1833 EVT DestEVT = TLI.getValueType(I->getType(), true); 1834 if (!DestEVT.isSimple()) 1835 return false; 1836 1837 MVT DestVT = DestEVT.getSimpleVT(); 1838 if (DestVT != MVT::i64 && DestVT != MVT::i32) 1839 return false; 1840 1841 unsigned DivOpc; 1842 bool is64bit = (DestVT == MVT::i64); 1843 switch (ISDOpcode) { 1844 default: 1845 return false; 1846 case ISD::SREM: 1847 DivOpc = is64bit ? AArch64::SDIVXr : AArch64::SDIVWr; 1848 break; 1849 case ISD::UREM: 1850 DivOpc = is64bit ? AArch64::UDIVXr : AArch64::UDIVWr; 1851 break; 1852 } 1853 unsigned MSubOpc = is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr; 1854 unsigned Src0Reg = getRegForValue(I->getOperand(0)); 1855 if (!Src0Reg) 1856 return false; 1857 1858 unsigned Src1Reg = getRegForValue(I->getOperand(1)); 1859 if (!Src1Reg) 1860 return false; 1861 1862 unsigned QuotReg = createResultReg(TLI.getRegClassFor(DestVT)); 1863 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(DivOpc), QuotReg) 1864 .addReg(Src0Reg) 1865 .addReg(Src1Reg); 1866 // The remainder is computed as numerator - (quotient * denominator) using the 1867 // MSUB instruction. 1868 unsigned ResultReg = createResultReg(TLI.getRegClassFor(DestVT)); 1869 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MSubOpc), ResultReg) 1870 .addReg(QuotReg) 1871 .addReg(Src1Reg) 1872 .addReg(Src0Reg); 1873 UpdateValueMap(I, ResultReg); 1874 return true; 1875} 1876 1877bool AArch64FastISel::SelectMul(const Instruction *I) { 1878 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType(), true); 1879 if (!SrcEVT.isSimple()) 1880 return false; 1881 MVT SrcVT = SrcEVT.getSimpleVT(); 1882 1883 // Must be simple value type. Don't handle vectors. 1884 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16 && 1885 SrcVT != MVT::i8) 1886 return false; 1887 1888 unsigned Opc; 1889 unsigned ZReg; 1890 switch (SrcVT.SimpleTy) { 1891 default: 1892 return false; 1893 case MVT::i8: 1894 case MVT::i16: 1895 case MVT::i32: 1896 ZReg = AArch64::WZR; 1897 Opc = AArch64::MADDWrrr; 1898 break; 1899 case MVT::i64: 1900 ZReg = AArch64::XZR; 1901 Opc = AArch64::MADDXrrr; 1902 break; 1903 } 1904 1905 unsigned Src0Reg = getRegForValue(I->getOperand(0)); 1906 if (!Src0Reg) 1907 return false; 1908 1909 unsigned Src1Reg = getRegForValue(I->getOperand(1)); 1910 if (!Src1Reg) 1911 return false; 1912 1913 // Create the base instruction, then add the operands. 1914 unsigned ResultReg = createResultReg(TLI.getRegClassFor(SrcVT)); 1915 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) 1916 .addReg(Src0Reg) 1917 .addReg(Src1Reg) 1918 .addReg(ZReg); 1919 UpdateValueMap(I, ResultReg); 1920 return true; 1921} 1922 1923bool AArch64FastISel::TargetSelectInstruction(const Instruction *I) { 1924 switch (I->getOpcode()) { 1925 default: 1926 break; 1927 case Instruction::Load: 1928 return SelectLoad(I); 1929 case Instruction::Store: 1930 return SelectStore(I); 1931 case Instruction::Br: 1932 return SelectBranch(I); 1933 case Instruction::IndirectBr: 1934 return SelectIndirectBr(I); 1935 case Instruction::FCmp: 1936 case Instruction::ICmp: 1937 return SelectCmp(I); 1938 case Instruction::Select: 1939 return SelectSelect(I); 1940 case Instruction::FPExt: 1941 return SelectFPExt(I); 1942 case Instruction::FPTrunc: 1943 return SelectFPTrunc(I); 1944 case Instruction::FPToSI: 1945 return SelectFPToInt(I, /*Signed=*/true); 1946 case Instruction::FPToUI: 1947 return SelectFPToInt(I, /*Signed=*/false); 1948 case Instruction::SIToFP: 1949 return SelectIntToFP(I, /*Signed=*/true); 1950 case Instruction::UIToFP: 1951 return SelectIntToFP(I, /*Signed=*/false); 1952 case Instruction::SRem: 1953 return SelectRem(I, ISD::SREM); 1954 case Instruction::URem: 1955 return SelectRem(I, ISD::UREM); 1956 case Instruction::Call: 1957 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1958 return SelectIntrinsicCall(*II); 1959 return SelectCall(I); 1960 case Instruction::Ret: 1961 return SelectRet(I); 1962 case Instruction::Trunc: 1963 return SelectTrunc(I); 1964 case Instruction::ZExt: 1965 case Instruction::SExt: 1966 return SelectIntExt(I); 1967 case Instruction::Mul: 1968 // FIXME: This really should be handled by the target-independent selector. 1969 return SelectMul(I); 1970 } 1971 return false; 1972 // Silence warnings. 1973 (void)&CC_AArch64_DarwinPCS_VarArg; 1974} 1975 1976namespace llvm { 1977llvm::FastISel *AArch64::createFastISel(FunctionLoweringInfo &funcInfo, 1978 const TargetLibraryInfo *libInfo) { 1979 return new AArch64FastISel(funcInfo, libInfo); 1980} 1981} 1982