1//===-- MipsFastISel.cpp - Mips FastISel implementation --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9/// 10/// \file 11/// \brief This file defines the MIPS-specific support for the FastISel class. 12/// Some of the target-specific code is generated by tablegen in the file 13/// MipsGenFastISel.inc, which is #included here. 14/// 15//===----------------------------------------------------------------------===// 16 17#include "MipsCCState.h" 18#include "MipsInstrInfo.h" 19#include "MipsISelLowering.h" 20#include "MipsMachineFunction.h" 21#include "MipsRegisterInfo.h" 22#include "MipsSubtarget.h" 23#include "MipsTargetMachine.h" 24#include "llvm/Analysis/TargetLibraryInfo.h" 25#include "llvm/CodeGen/FastISel.h" 26#include "llvm/CodeGen/FunctionLoweringInfo.h" 27#include "llvm/CodeGen/MachineInstrBuilder.h" 28#include "llvm/CodeGen/MachineRegisterInfo.h" 29#include "llvm/IR/GetElementPtrTypeIterator.h" 30#include "llvm/IR/GlobalAlias.h" 31#include "llvm/IR/GlobalVariable.h" 32#include "llvm/MC/MCSymbol.h" 33#include "llvm/Target/TargetInstrInfo.h" 34 35using namespace llvm; 36 37namespace { 38 39class MipsFastISel final : public FastISel { 40 41 // All possible address modes. 42 class Address { 43 public: 44 typedef enum { RegBase, FrameIndexBase } BaseKind; 45 46 private: 47 BaseKind Kind; 48 union { 49 unsigned Reg; 50 int FI; 51 } Base; 52 53 int64_t Offset; 54 55 const GlobalValue *GV; 56 57 public: 58 // Innocuous defaults for our address. 59 Address() : Kind(RegBase), Offset(0), GV(0) { Base.Reg = 0; } 60 void setKind(BaseKind K) { Kind = K; } 61 BaseKind getKind() const { return Kind; } 62 bool isRegBase() const { return Kind == RegBase; } 63 bool isFIBase() const { return Kind == FrameIndexBase; } 64 void setReg(unsigned Reg) { 65 assert(isRegBase() && "Invalid base register access!"); 66 Base.Reg = Reg; 67 } 68 unsigned getReg() const { 69 assert(isRegBase() && "Invalid base register access!"); 70 return Base.Reg; 71 } 72 void setFI(unsigned FI) { 73 assert(isFIBase() && "Invalid base frame index access!"); 74 Base.FI = FI; 75 } 76 unsigned getFI() const { 77 assert(isFIBase() && "Invalid base frame index access!"); 78 return Base.FI; 79 } 80 81 void setOffset(int64_t Offset_) { Offset = Offset_; } 82 int64_t getOffset() const { return Offset; } 83 void setGlobalValue(const GlobalValue *G) { GV = G; } 84 const GlobalValue *getGlobalValue() { return GV; } 85 }; 86 87 /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can 88 /// make the right decision when generating code for different targets. 89 const TargetMachine &TM; 90 const MipsSubtarget *Subtarget; 91 const TargetInstrInfo &TII; 92 const TargetLowering &TLI; 93 MipsFunctionInfo *MFI; 94 95 // Convenience variables to avoid some queries. 96 LLVMContext *Context; 97 98 bool fastLowerCall(CallLoweringInfo &CLI) override; 99 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override; 100 101 bool TargetSupported; 102 bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle 103 // floating point but not reject doing fast-isel in other 104 // situations 105 106private: 107 // Selection routines. 108 bool selectLogicalOp(const Instruction *I); 109 bool selectLoad(const Instruction *I); 110 bool selectStore(const Instruction *I); 111 bool selectBranch(const Instruction *I); 112 bool selectSelect(const Instruction *I); 113 bool selectCmp(const Instruction *I); 114 bool selectFPExt(const Instruction *I); 115 bool selectFPTrunc(const Instruction *I); 116 bool selectFPToInt(const Instruction *I, bool IsSigned); 117 bool selectRet(const Instruction *I); 118 bool selectTrunc(const Instruction *I); 119 bool selectIntExt(const Instruction *I); 120 bool selectShift(const Instruction *I); 121 bool selectDivRem(const Instruction *I, unsigned ISDOpcode); 122 123 // Utility helper routines. 124 bool isTypeLegal(Type *Ty, MVT &VT); 125 bool isTypeSupported(Type *Ty, MVT &VT); 126 bool isLoadTypeLegal(Type *Ty, MVT &VT); 127 bool computeAddress(const Value *Obj, Address &Addr); 128 bool computeCallAddress(const Value *V, Address &Addr); 129 void simplifyAddress(Address &Addr); 130 131 // Emit helper routines. 132 bool emitCmp(unsigned DestReg, const CmpInst *CI); 133 bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 134 unsigned Alignment = 0); 135 bool emitStore(MVT VT, unsigned SrcReg, Address Addr, 136 MachineMemOperand *MMO = nullptr); 137 bool emitStore(MVT VT, unsigned SrcReg, Address &Addr, 138 unsigned Alignment = 0); 139 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt); 140 bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg, 141 142 bool IsZExt); 143 bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg); 144 145 bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg); 146 bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT, 147 unsigned DestReg); 148 bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT, 149 unsigned DestReg); 150 151 unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned); 152 153 unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS, 154 const Value *RHS); 155 156 unsigned materializeFP(const ConstantFP *CFP, MVT VT); 157 unsigned materializeGV(const GlobalValue *GV, MVT VT); 158 unsigned materializeInt(const Constant *C, MVT VT); 159 unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC); 160 unsigned materializeExternalCallSym(MCSymbol *Syn); 161 162 MachineInstrBuilder emitInst(unsigned Opc) { 163 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); 164 } 165 MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) { 166 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), 167 DstReg); 168 } 169 MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg, 170 unsigned MemReg, int64_t MemOffset) { 171 return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset); 172 } 173 MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg, 174 unsigned MemReg, int64_t MemOffset) { 175 return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset); 176 } 177 178 unsigned fastEmitInst_rr(unsigned MachineInstOpcode, 179 const TargetRegisterClass *RC, 180 unsigned Op0, bool Op0IsKill, 181 unsigned Op1, bool Op1IsKill); 182 183 // for some reason, this default is not generated by tablegen 184 // so we explicitly generate it here. 185 // 186 unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC, 187 unsigned Op0, bool Op0IsKill, uint64_t imm1, 188 uint64_t imm2, unsigned Op3, bool Op3IsKill) { 189 return 0; 190 } 191 192 // Call handling routines. 193private: 194 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const; 195 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs, 196 unsigned &NumBytes); 197 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes); 198 199public: 200 // Backend specific FastISel code. 201 explicit MipsFastISel(FunctionLoweringInfo &funcInfo, 202 const TargetLibraryInfo *libInfo) 203 : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()), 204 Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()), 205 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) { 206 MFI = funcInfo.MF->getInfo<MipsFunctionInfo>(); 207 Context = &funcInfo.Fn->getContext(); 208 bool ISASupported = !Subtarget->hasMips32r6() && 209 !Subtarget->inMicroMipsMode() && Subtarget->hasMips32(); 210 TargetSupported = 211 ISASupported && TM.isPositionIndependent() && 212 (static_cast<const MipsTargetMachine &>(TM).getABI().IsO32()); 213 UnsupportedFPMode = Subtarget->isFP64bit(); 214 } 215 216 unsigned fastMaterializeAlloca(const AllocaInst *AI) override; 217 unsigned fastMaterializeConstant(const Constant *C) override; 218 bool fastSelectInstruction(const Instruction *I) override; 219 220#include "MipsGenFastISel.inc" 221}; 222} // end anonymous namespace. 223 224static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, 225 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 226 CCState &State) LLVM_ATTRIBUTE_UNUSED; 227 228static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, 229 CCValAssign::LocInfo LocInfo, 230 ISD::ArgFlagsTy ArgFlags, CCState &State) { 231 llvm_unreachable("should not be called"); 232} 233 234static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, 235 CCValAssign::LocInfo LocInfo, 236 ISD::ArgFlagsTy ArgFlags, CCState &State) { 237 llvm_unreachable("should not be called"); 238} 239 240#include "MipsGenCallingConv.inc" 241 242CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const { 243 return CC_MipsO32; 244} 245 246unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT, 247 const Value *LHS, const Value *RHS) { 248 // Canonicalize immediates to the RHS first. 249 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS)) 250 std::swap(LHS, RHS); 251 252 unsigned Opc; 253 switch (ISDOpc) { 254 case ISD::AND: 255 Opc = Mips::AND; 256 break; 257 case ISD::OR: 258 Opc = Mips::OR; 259 break; 260 case ISD::XOR: 261 Opc = Mips::XOR; 262 break; 263 default: 264 llvm_unreachable("unexpected opcode"); 265 } 266 267 unsigned LHSReg = getRegForValue(LHS); 268 if (!LHSReg) 269 return 0; 270 271 unsigned RHSReg; 272 if (const auto *C = dyn_cast<ConstantInt>(RHS)) 273 RHSReg = materializeInt(C, MVT::i32); 274 else 275 RHSReg = getRegForValue(RHS); 276 if (!RHSReg) 277 return 0; 278 279 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); 280 if (!ResultReg) 281 return 0; 282 283 emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg); 284 return ResultReg; 285} 286 287unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) { 288 if (!TargetSupported) 289 return 0; 290 291 assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i32 && 292 "Alloca should always return a pointer."); 293 294 DenseMap<const AllocaInst *, int>::iterator SI = 295 FuncInfo.StaticAllocaMap.find(AI); 296 297 if (SI != FuncInfo.StaticAllocaMap.end()) { 298 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); 299 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LEA_ADDiu), 300 ResultReg) 301 .addFrameIndex(SI->second) 302 .addImm(0); 303 return ResultReg; 304 } 305 306 return 0; 307} 308 309unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) { 310 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1) 311 return 0; 312 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 313 const ConstantInt *CI = cast<ConstantInt>(C); 314 return materialize32BitInt(CI->getZExtValue(), RC); 315} 316 317unsigned MipsFastISel::materialize32BitInt(int64_t Imm, 318 const TargetRegisterClass *RC) { 319 unsigned ResultReg = createResultReg(RC); 320 321 if (isInt<16>(Imm)) { 322 unsigned Opc = Mips::ADDiu; 323 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm); 324 return ResultReg; 325 } else if (isUInt<16>(Imm)) { 326 emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm); 327 return ResultReg; 328 } 329 unsigned Lo = Imm & 0xFFFF; 330 unsigned Hi = (Imm >> 16) & 0xFFFF; 331 if (Lo) { 332 // Both Lo and Hi have nonzero bits. 333 unsigned TmpReg = createResultReg(RC); 334 emitInst(Mips::LUi, TmpReg).addImm(Hi); 335 emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo); 336 } else { 337 emitInst(Mips::LUi, ResultReg).addImm(Hi); 338 } 339 return ResultReg; 340} 341 342unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) { 343 if (UnsupportedFPMode) 344 return 0; 345 int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); 346 if (VT == MVT::f32) { 347 const TargetRegisterClass *RC = &Mips::FGR32RegClass; 348 unsigned DestReg = createResultReg(RC); 349 unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass); 350 emitInst(Mips::MTC1, DestReg).addReg(TempReg); 351 return DestReg; 352 } else if (VT == MVT::f64) { 353 const TargetRegisterClass *RC = &Mips::AFGR64RegClass; 354 unsigned DestReg = createResultReg(RC); 355 unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass); 356 unsigned TempReg2 = 357 materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass); 358 emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1); 359 return DestReg; 360 } 361 return 0; 362} 363 364unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) { 365 // For now 32-bit only. 366 if (VT != MVT::i32) 367 return 0; 368 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 369 unsigned DestReg = createResultReg(RC); 370 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 371 bool IsThreadLocal = GVar && GVar->isThreadLocal(); 372 // TLS not supported at this time. 373 if (IsThreadLocal) 374 return 0; 375 emitInst(Mips::LW, DestReg) 376 .addReg(MFI->getGlobalBaseReg()) 377 .addGlobalAddress(GV, 0, MipsII::MO_GOT); 378 if ((GV->hasInternalLinkage() || 379 (GV->hasLocalLinkage() && !isa<Function>(GV)))) { 380 unsigned TempReg = createResultReg(RC); 381 emitInst(Mips::ADDiu, TempReg) 382 .addReg(DestReg) 383 .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO); 384 DestReg = TempReg; 385 } 386 return DestReg; 387} 388 389unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) { 390 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 391 unsigned DestReg = createResultReg(RC); 392 emitInst(Mips::LW, DestReg) 393 .addReg(MFI->getGlobalBaseReg()) 394 .addSym(Sym, MipsII::MO_GOT); 395 return DestReg; 396} 397 398// Materialize a constant into a register, and return the register 399// number (or zero if we failed to handle it). 400unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) { 401 if (!TargetSupported) 402 return 0; 403 404 EVT CEVT = TLI.getValueType(DL, C->getType(), true); 405 406 // Only handle simple types. 407 if (!CEVT.isSimple()) 408 return 0; 409 MVT VT = CEVT.getSimpleVT(); 410 411 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 412 return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT); 413 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 414 return materializeGV(GV, VT); 415 else if (isa<ConstantInt>(C)) 416 return materializeInt(C, VT); 417 418 return 0; 419} 420 421bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) { 422 423 const User *U = nullptr; 424 unsigned Opcode = Instruction::UserOp1; 425 if (const Instruction *I = dyn_cast<Instruction>(Obj)) { 426 // Don't walk into other basic blocks unless the object is an alloca from 427 // another block, otherwise it may not have a virtual register assigned. 428 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) || 429 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) { 430 Opcode = I->getOpcode(); 431 U = I; 432 } 433 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) { 434 Opcode = C->getOpcode(); 435 U = C; 436 } 437 switch (Opcode) { 438 default: 439 break; 440 case Instruction::BitCast: { 441 // Look through bitcasts. 442 return computeAddress(U->getOperand(0), Addr); 443 } 444 case Instruction::GetElementPtr: { 445 Address SavedAddr = Addr; 446 uint64_t TmpOffset = Addr.getOffset(); 447 // Iterate through the GEP folding the constants into offsets where 448 // we can. 449 gep_type_iterator GTI = gep_type_begin(U); 450 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; 451 ++i, ++GTI) { 452 const Value *Op = *i; 453 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 454 const StructLayout *SL = DL.getStructLayout(STy); 455 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue(); 456 TmpOffset += SL->getElementOffset(Idx); 457 } else { 458 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); 459 for (;;) { 460 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) { 461 // Constant-offset addressing. 462 TmpOffset += CI->getSExtValue() * S; 463 break; 464 } 465 if (canFoldAddIntoGEP(U, Op)) { 466 // A compatible add with a constant operand. Fold the constant. 467 ConstantInt *CI = 468 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1)); 469 TmpOffset += CI->getSExtValue() * S; 470 // Iterate on the other operand. 471 Op = cast<AddOperator>(Op)->getOperand(0); 472 continue; 473 } 474 // Unsupported 475 goto unsupported_gep; 476 } 477 } 478 } 479 // Try to grab the base operand now. 480 Addr.setOffset(TmpOffset); 481 if (computeAddress(U->getOperand(0), Addr)) 482 return true; 483 // We failed, restore everything and try the other options. 484 Addr = SavedAddr; 485 unsupported_gep: 486 break; 487 } 488 case Instruction::Alloca: { 489 const AllocaInst *AI = cast<AllocaInst>(Obj); 490 DenseMap<const AllocaInst *, int>::iterator SI = 491 FuncInfo.StaticAllocaMap.find(AI); 492 if (SI != FuncInfo.StaticAllocaMap.end()) { 493 Addr.setKind(Address::FrameIndexBase); 494 Addr.setFI(SI->second); 495 return true; 496 } 497 break; 498 } 499 } 500 Addr.setReg(getRegForValue(Obj)); 501 return Addr.getReg() != 0; 502} 503 504bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) { 505 const User *U = nullptr; 506 unsigned Opcode = Instruction::UserOp1; 507 508 if (const auto *I = dyn_cast<Instruction>(V)) { 509 // Check if the value is defined in the same basic block. This information 510 // is crucial to know whether or not folding an operand is valid. 511 if (I->getParent() == FuncInfo.MBB->getBasicBlock()) { 512 Opcode = I->getOpcode(); 513 U = I; 514 } 515 } else if (const auto *C = dyn_cast<ConstantExpr>(V)) { 516 Opcode = C->getOpcode(); 517 U = C; 518 } 519 520 switch (Opcode) { 521 default: 522 break; 523 case Instruction::BitCast: 524 // Look past bitcasts if its operand is in the same BB. 525 return computeCallAddress(U->getOperand(0), Addr); 526 break; 527 case Instruction::IntToPtr: 528 // Look past no-op inttoptrs if its operand is in the same BB. 529 if (TLI.getValueType(DL, U->getOperand(0)->getType()) == 530 TLI.getPointerTy(DL)) 531 return computeCallAddress(U->getOperand(0), Addr); 532 break; 533 case Instruction::PtrToInt: 534 // Look past no-op ptrtoints if its operand is in the same BB. 535 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL)) 536 return computeCallAddress(U->getOperand(0), Addr); 537 break; 538 } 539 540 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 541 Addr.setGlobalValue(GV); 542 return true; 543 } 544 545 // If all else fails, try to materialize the value in a register. 546 if (!Addr.getGlobalValue()) { 547 Addr.setReg(getRegForValue(V)); 548 return Addr.getReg() != 0; 549 } 550 551 return false; 552} 553 554bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) { 555 EVT evt = TLI.getValueType(DL, Ty, true); 556 // Only handle simple types. 557 if (evt == MVT::Other || !evt.isSimple()) 558 return false; 559 VT = evt.getSimpleVT(); 560 561 // Handle all legal types, i.e. a register that will directly hold this 562 // value. 563 return TLI.isTypeLegal(VT); 564} 565 566bool MipsFastISel::isTypeSupported(Type *Ty, MVT &VT) { 567 if (Ty->isVectorTy()) 568 return false; 569 570 if (isTypeLegal(Ty, VT)) 571 return true; 572 573 // If this is a type than can be sign or zero-extended to a basic operation 574 // go ahead and accept it now. 575 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 576 return true; 577 578 return false; 579} 580 581bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) { 582 if (isTypeLegal(Ty, VT)) 583 return true; 584 // We will extend this in a later patch: 585 // If this is a type than can be sign or zero-extended to a basic operation 586 // go ahead and accept it now. 587 if (VT == MVT::i8 || VT == MVT::i16) 588 return true; 589 return false; 590} 591// Because of how EmitCmp is called with fast-isel, you can 592// end up with redundant "andi" instructions after the sequences emitted below. 593// We should try and solve this issue in the future. 594// 595bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) { 596 const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1); 597 bool IsUnsigned = CI->isUnsigned(); 598 unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned); 599 if (LeftReg == 0) 600 return false; 601 unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned); 602 if (RightReg == 0) 603 return false; 604 CmpInst::Predicate P = CI->getPredicate(); 605 606 switch (P) { 607 default: 608 return false; 609 case CmpInst::ICMP_EQ: { 610 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 611 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg); 612 emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1); 613 break; 614 } 615 case CmpInst::ICMP_NE: { 616 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 617 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg); 618 emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg); 619 break; 620 } 621 case CmpInst::ICMP_UGT: { 622 emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg); 623 break; 624 } 625 case CmpInst::ICMP_ULT: { 626 emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg); 627 break; 628 } 629 case CmpInst::ICMP_UGE: { 630 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 631 emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg); 632 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1); 633 break; 634 } 635 case CmpInst::ICMP_ULE: { 636 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 637 emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg); 638 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1); 639 break; 640 } 641 case CmpInst::ICMP_SGT: { 642 emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg); 643 break; 644 } 645 case CmpInst::ICMP_SLT: { 646 emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg); 647 break; 648 } 649 case CmpInst::ICMP_SGE: { 650 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 651 emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg); 652 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1); 653 break; 654 } 655 case CmpInst::ICMP_SLE: { 656 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 657 emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg); 658 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1); 659 break; 660 } 661 case CmpInst::FCMP_OEQ: 662 case CmpInst::FCMP_UNE: 663 case CmpInst::FCMP_OLT: 664 case CmpInst::FCMP_OLE: 665 case CmpInst::FCMP_OGT: 666 case CmpInst::FCMP_OGE: { 667 if (UnsupportedFPMode) 668 return false; 669 bool IsFloat = Left->getType()->isFloatTy(); 670 bool IsDouble = Left->getType()->isDoubleTy(); 671 if (!IsFloat && !IsDouble) 672 return false; 673 unsigned Opc, CondMovOpc; 674 switch (P) { 675 case CmpInst::FCMP_OEQ: 676 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32; 677 CondMovOpc = Mips::MOVT_I; 678 break; 679 case CmpInst::FCMP_UNE: 680 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32; 681 CondMovOpc = Mips::MOVF_I; 682 break; 683 case CmpInst::FCMP_OLT: 684 Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32; 685 CondMovOpc = Mips::MOVT_I; 686 break; 687 case CmpInst::FCMP_OLE: 688 Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32; 689 CondMovOpc = Mips::MOVT_I; 690 break; 691 case CmpInst::FCMP_OGT: 692 Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32; 693 CondMovOpc = Mips::MOVF_I; 694 break; 695 case CmpInst::FCMP_OGE: 696 Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32; 697 CondMovOpc = Mips::MOVF_I; 698 break; 699 default: 700 llvm_unreachable("Only switching of a subset of CCs."); 701 } 702 unsigned RegWithZero = createResultReg(&Mips::GPR32RegClass); 703 unsigned RegWithOne = createResultReg(&Mips::GPR32RegClass); 704 emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0); 705 emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1); 706 emitInst(Opc).addReg(LeftReg).addReg(RightReg).addReg( 707 Mips::FCC0, RegState::ImplicitDefine); 708 emitInst(CondMovOpc, ResultReg) 709 .addReg(RegWithOne) 710 .addReg(Mips::FCC0) 711 .addReg(RegWithZero); 712 break; 713 } 714 } 715 return true; 716} 717bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 718 unsigned Alignment) { 719 // 720 // more cases will be handled here in following patches. 721 // 722 unsigned Opc; 723 switch (VT.SimpleTy) { 724 case MVT::i32: { 725 ResultReg = createResultReg(&Mips::GPR32RegClass); 726 Opc = Mips::LW; 727 break; 728 } 729 case MVT::i16: { 730 ResultReg = createResultReg(&Mips::GPR32RegClass); 731 Opc = Mips::LHu; 732 break; 733 } 734 case MVT::i8: { 735 ResultReg = createResultReg(&Mips::GPR32RegClass); 736 Opc = Mips::LBu; 737 break; 738 } 739 case MVT::f32: { 740 if (UnsupportedFPMode) 741 return false; 742 ResultReg = createResultReg(&Mips::FGR32RegClass); 743 Opc = Mips::LWC1; 744 break; 745 } 746 case MVT::f64: { 747 if (UnsupportedFPMode) 748 return false; 749 ResultReg = createResultReg(&Mips::AFGR64RegClass); 750 Opc = Mips::LDC1; 751 break; 752 } 753 default: 754 return false; 755 } 756 if (Addr.isRegBase()) { 757 simplifyAddress(Addr); 758 emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset()); 759 return true; 760 } 761 if (Addr.isFIBase()) { 762 unsigned FI = Addr.getFI(); 763 unsigned Align = 4; 764 unsigned Offset = Addr.getOffset(); 765 MachineFrameInfo &MFI = *MF->getFrameInfo(); 766 MachineMemOperand *MMO = MF->getMachineMemOperand( 767 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 768 MFI.getObjectSize(FI), Align); 769 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) 770 .addFrameIndex(FI) 771 .addImm(Offset) 772 .addMemOperand(MMO); 773 return true; 774 } 775 return false; 776} 777 778bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr, 779 unsigned Alignment) { 780 // 781 // more cases will be handled here in following patches. 782 // 783 unsigned Opc; 784 switch (VT.SimpleTy) { 785 case MVT::i8: 786 Opc = Mips::SB; 787 break; 788 case MVT::i16: 789 Opc = Mips::SH; 790 break; 791 case MVT::i32: 792 Opc = Mips::SW; 793 break; 794 case MVT::f32: 795 if (UnsupportedFPMode) 796 return false; 797 Opc = Mips::SWC1; 798 break; 799 case MVT::f64: 800 if (UnsupportedFPMode) 801 return false; 802 Opc = Mips::SDC1; 803 break; 804 default: 805 return false; 806 } 807 if (Addr.isRegBase()) { 808 simplifyAddress(Addr); 809 emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset()); 810 return true; 811 } 812 if (Addr.isFIBase()) { 813 unsigned FI = Addr.getFI(); 814 unsigned Align = 4; 815 unsigned Offset = Addr.getOffset(); 816 MachineFrameInfo &MFI = *MF->getFrameInfo(); 817 MachineMemOperand *MMO = MF->getMachineMemOperand( 818 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 819 MFI.getObjectSize(FI), Align); 820 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) 821 .addReg(SrcReg) 822 .addFrameIndex(FI) 823 .addImm(Offset) 824 .addMemOperand(MMO); 825 return true; 826 } 827 return false; 828} 829 830bool MipsFastISel::selectLogicalOp(const Instruction *I) { 831 MVT VT; 832 if (!isTypeSupported(I->getType(), VT)) 833 return false; 834 835 unsigned ResultReg; 836 switch (I->getOpcode()) { 837 default: 838 llvm_unreachable("Unexpected instruction."); 839 case Instruction::And: 840 ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1)); 841 break; 842 case Instruction::Or: 843 ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1)); 844 break; 845 case Instruction::Xor: 846 ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1)); 847 break; 848 } 849 850 if (!ResultReg) 851 return false; 852 853 updateValueMap(I, ResultReg); 854 return true; 855} 856 857bool MipsFastISel::selectLoad(const Instruction *I) { 858 // Atomic loads need special handling. 859 if (cast<LoadInst>(I)->isAtomic()) 860 return false; 861 862 // Verify we have a legal type before going any further. 863 MVT VT; 864 if (!isLoadTypeLegal(I->getType(), VT)) 865 return false; 866 867 // See if we can handle this address. 868 Address Addr; 869 if (!computeAddress(I->getOperand(0), Addr)) 870 return false; 871 872 unsigned ResultReg; 873 if (!emitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment())) 874 return false; 875 updateValueMap(I, ResultReg); 876 return true; 877} 878 879bool MipsFastISel::selectStore(const Instruction *I) { 880 Value *Op0 = I->getOperand(0); 881 unsigned SrcReg = 0; 882 883 // Atomic stores need special handling. 884 if (cast<StoreInst>(I)->isAtomic()) 885 return false; 886 887 // Verify we have a legal type before going any further. 888 MVT VT; 889 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) 890 return false; 891 892 // Get the value to be stored into a register. 893 SrcReg = getRegForValue(Op0); 894 if (SrcReg == 0) 895 return false; 896 897 // See if we can handle this address. 898 Address Addr; 899 if (!computeAddress(I->getOperand(1), Addr)) 900 return false; 901 902 if (!emitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment())) 903 return false; 904 return true; 905} 906 907// 908// This can cause a redundant sltiu to be generated. 909// FIXME: try and eliminate this in a future patch. 910// 911bool MipsFastISel::selectBranch(const Instruction *I) { 912 const BranchInst *BI = cast<BranchInst>(I); 913 MachineBasicBlock *BrBB = FuncInfo.MBB; 914 // 915 // TBB is the basic block for the case where the comparison is true. 916 // FBB is the basic block for the case where the comparison is false. 917 // if (cond) goto TBB 918 // goto FBB 919 // TBB: 920 // 921 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; 922 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; 923 BI->getCondition(); 924 // For now, just try the simplest case where it's fed by a compare. 925 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { 926 unsigned CondReg = createResultReg(&Mips::GPR32RegClass); 927 if (!emitCmp(CondReg, CI)) 928 return false; 929 BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ)) 930 .addReg(CondReg) 931 .addMBB(TBB); 932 finishCondBranch(BI->getParent(), TBB, FBB); 933 return true; 934 } 935 return false; 936} 937 938bool MipsFastISel::selectCmp(const Instruction *I) { 939 const CmpInst *CI = cast<CmpInst>(I); 940 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); 941 if (!emitCmp(ResultReg, CI)) 942 return false; 943 updateValueMap(I, ResultReg); 944 return true; 945} 946 947// Attempt to fast-select a floating-point extend instruction. 948bool MipsFastISel::selectFPExt(const Instruction *I) { 949 if (UnsupportedFPMode) 950 return false; 951 Value *Src = I->getOperand(0); 952 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true); 953 EVT DestVT = TLI.getValueType(DL, I->getType(), true); 954 955 if (SrcVT != MVT::f32 || DestVT != MVT::f64) 956 return false; 957 958 unsigned SrcReg = 959 getRegForValue(Src); // this must be a 32bit floating point register class 960 // maybe we should handle this differently 961 if (!SrcReg) 962 return false; 963 964 unsigned DestReg = createResultReg(&Mips::AFGR64RegClass); 965 emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg); 966 updateValueMap(I, DestReg); 967 return true; 968} 969 970bool MipsFastISel::selectSelect(const Instruction *I) { 971 assert(isa<SelectInst>(I) && "Expected a select instruction."); 972 973 MVT VT; 974 if (!isTypeSupported(I->getType(), VT)) 975 return false; 976 977 unsigned CondMovOpc; 978 const TargetRegisterClass *RC; 979 980 if (VT.isInteger() && !VT.isVector() && VT.getSizeInBits() <= 32) { 981 CondMovOpc = Mips::MOVN_I_I; 982 RC = &Mips::GPR32RegClass; 983 } else if (VT == MVT::f32) { 984 CondMovOpc = Mips::MOVN_I_S; 985 RC = &Mips::FGR32RegClass; 986 } else if (VT == MVT::f64) { 987 CondMovOpc = Mips::MOVN_I_D32; 988 RC = &Mips::AFGR64RegClass; 989 } else 990 return false; 991 992 const SelectInst *SI = cast<SelectInst>(I); 993 const Value *Cond = SI->getCondition(); 994 unsigned Src1Reg = getRegForValue(SI->getTrueValue()); 995 unsigned Src2Reg = getRegForValue(SI->getFalseValue()); 996 unsigned CondReg = getRegForValue(Cond); 997 998 if (!Src1Reg || !Src2Reg || !CondReg) 999 return false; 1000 1001 unsigned ZExtCondReg = createResultReg(&Mips::GPR32RegClass); 1002 if (!ZExtCondReg) 1003 return false; 1004 1005 if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true)) 1006 return false; 1007 1008 unsigned ResultReg = createResultReg(RC); 1009 unsigned TempReg = createResultReg(RC); 1010 1011 if (!ResultReg || !TempReg) 1012 return false; 1013 1014 emitInst(TargetOpcode::COPY, TempReg).addReg(Src2Reg); 1015 emitInst(CondMovOpc, ResultReg) 1016 .addReg(Src1Reg).addReg(ZExtCondReg).addReg(TempReg); 1017 updateValueMap(I, ResultReg); 1018 return true; 1019} 1020 1021// Attempt to fast-select a floating-point truncate instruction. 1022bool MipsFastISel::selectFPTrunc(const Instruction *I) { 1023 if (UnsupportedFPMode) 1024 return false; 1025 Value *Src = I->getOperand(0); 1026 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true); 1027 EVT DestVT = TLI.getValueType(DL, I->getType(), true); 1028 1029 if (SrcVT != MVT::f64 || DestVT != MVT::f32) 1030 return false; 1031 1032 unsigned SrcReg = getRegForValue(Src); 1033 if (!SrcReg) 1034 return false; 1035 1036 unsigned DestReg = createResultReg(&Mips::FGR32RegClass); 1037 if (!DestReg) 1038 return false; 1039 1040 emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg); 1041 updateValueMap(I, DestReg); 1042 return true; 1043} 1044 1045// Attempt to fast-select a floating-point-to-integer conversion. 1046bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) { 1047 if (UnsupportedFPMode) 1048 return false; 1049 MVT DstVT, SrcVT; 1050 if (!IsSigned) 1051 return false; // We don't handle this case yet. There is no native 1052 // instruction for this but it can be synthesized. 1053 Type *DstTy = I->getType(); 1054 if (!isTypeLegal(DstTy, DstVT)) 1055 return false; 1056 1057 if (DstVT != MVT::i32) 1058 return false; 1059 1060 Value *Src = I->getOperand(0); 1061 Type *SrcTy = Src->getType(); 1062 if (!isTypeLegal(SrcTy, SrcVT)) 1063 return false; 1064 1065 if (SrcVT != MVT::f32 && SrcVT != MVT::f64) 1066 return false; 1067 1068 unsigned SrcReg = getRegForValue(Src); 1069 if (SrcReg == 0) 1070 return false; 1071 1072 // Determine the opcode for the conversion, which takes place 1073 // entirely within FPRs. 1074 unsigned DestReg = createResultReg(&Mips::GPR32RegClass); 1075 unsigned TempReg = createResultReg(&Mips::FGR32RegClass); 1076 unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32; 1077 1078 // Generate the convert. 1079 emitInst(Opc, TempReg).addReg(SrcReg); 1080 emitInst(Mips::MFC1, DestReg).addReg(TempReg); 1081 1082 updateValueMap(I, DestReg); 1083 return true; 1084} 1085 1086bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI, 1087 SmallVectorImpl<MVT> &OutVTs, 1088 unsigned &NumBytes) { 1089 CallingConv::ID CC = CLI.CallConv; 1090 SmallVector<CCValAssign, 16> ArgLocs; 1091 CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context); 1092 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC)); 1093 // Get a count of how many bytes are to be pushed on the stack. 1094 NumBytes = CCInfo.getNextStackOffset(); 1095 // This is the minimum argument area used for A0-A3. 1096 if (NumBytes < 16) 1097 NumBytes = 16; 1098 1099 emitInst(Mips::ADJCALLSTACKDOWN).addImm(16); 1100 // Process the args. 1101 MVT firstMVT; 1102 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1103 CCValAssign &VA = ArgLocs[i]; 1104 const Value *ArgVal = CLI.OutVals[VA.getValNo()]; 1105 MVT ArgVT = OutVTs[VA.getValNo()]; 1106 1107 if (i == 0) { 1108 firstMVT = ArgVT; 1109 if (ArgVT == MVT::f32) { 1110 VA.convertToReg(Mips::F12); 1111 } else if (ArgVT == MVT::f64) { 1112 VA.convertToReg(Mips::D6); 1113 } 1114 } else if (i == 1) { 1115 if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) { 1116 if (ArgVT == MVT::f32) { 1117 VA.convertToReg(Mips::F14); 1118 } else if (ArgVT == MVT::f64) { 1119 VA.convertToReg(Mips::D7); 1120 } 1121 } 1122 } 1123 if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32) || (ArgVT == MVT::i16) || 1124 (ArgVT == MVT::i8)) && 1125 VA.isMemLoc()) { 1126 switch (VA.getLocMemOffset()) { 1127 case 0: 1128 VA.convertToReg(Mips::A0); 1129 break; 1130 case 4: 1131 VA.convertToReg(Mips::A1); 1132 break; 1133 case 8: 1134 VA.convertToReg(Mips::A2); 1135 break; 1136 case 12: 1137 VA.convertToReg(Mips::A3); 1138 break; 1139 default: 1140 break; 1141 } 1142 } 1143 unsigned ArgReg = getRegForValue(ArgVal); 1144 if (!ArgReg) 1145 return false; 1146 1147 // Handle arg promotion: SExt, ZExt, AExt. 1148 switch (VA.getLocInfo()) { 1149 case CCValAssign::Full: 1150 break; 1151 case CCValAssign::AExt: 1152 case CCValAssign::SExt: { 1153 MVT DestVT = VA.getLocVT(); 1154 MVT SrcVT = ArgVT; 1155 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false); 1156 if (!ArgReg) 1157 return false; 1158 break; 1159 } 1160 case CCValAssign::ZExt: { 1161 MVT DestVT = VA.getLocVT(); 1162 MVT SrcVT = ArgVT; 1163 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true); 1164 if (!ArgReg) 1165 return false; 1166 break; 1167 } 1168 default: 1169 llvm_unreachable("Unknown arg promotion!"); 1170 } 1171 1172 // Now copy/store arg to correct locations. 1173 if (VA.isRegLoc() && !VA.needsCustom()) { 1174 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1175 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg); 1176 CLI.OutRegs.push_back(VA.getLocReg()); 1177 } else if (VA.needsCustom()) { 1178 llvm_unreachable("Mips does not use custom args."); 1179 return false; 1180 } else { 1181 // 1182 // FIXME: This path will currently return false. It was copied 1183 // from the AArch64 port and should be essentially fine for Mips too. 1184 // The work to finish up this path will be done in a follow-on patch. 1185 // 1186 assert(VA.isMemLoc() && "Assuming store on stack."); 1187 // Don't emit stores for undef values. 1188 if (isa<UndefValue>(ArgVal)) 1189 continue; 1190 1191 // Need to store on the stack. 1192 // FIXME: This alignment is incorrect but this path is disabled 1193 // for now (will return false). We need to determine the right alignment 1194 // based on the normal alignment for the underlying machine type. 1195 // 1196 unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4); 1197 1198 unsigned BEAlign = 0; 1199 if (ArgSize < 8 && !Subtarget->isLittle()) 1200 BEAlign = 8 - ArgSize; 1201 1202 Address Addr; 1203 Addr.setKind(Address::RegBase); 1204 Addr.setReg(Mips::SP); 1205 Addr.setOffset(VA.getLocMemOffset() + BEAlign); 1206 1207 unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType()); 1208 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( 1209 MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()), 1210 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment); 1211 (void)(MMO); 1212 // if (!emitStore(ArgVT, ArgReg, Addr, MMO)) 1213 return false; // can't store on the stack yet. 1214 } 1215 } 1216 1217 return true; 1218} 1219 1220bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT, 1221 unsigned NumBytes) { 1222 CallingConv::ID CC = CLI.CallConv; 1223 emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0); 1224 if (RetVT != MVT::isVoid) { 1225 SmallVector<CCValAssign, 16> RVLocs; 1226 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); 1227 CCInfo.AnalyzeCallResult(RetVT, RetCC_Mips); 1228 1229 // Only handle a single return value. 1230 if (RVLocs.size() != 1) 1231 return false; 1232 // Copy all of the result registers out of their specified physreg. 1233 MVT CopyVT = RVLocs[0].getValVT(); 1234 // Special handling for extended integers. 1235 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16) 1236 CopyVT = MVT::i32; 1237 1238 unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT)); 1239 if (!ResultReg) 1240 return false; 1241 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1242 TII.get(TargetOpcode::COPY), 1243 ResultReg).addReg(RVLocs[0].getLocReg()); 1244 CLI.InRegs.push_back(RVLocs[0].getLocReg()); 1245 1246 CLI.ResultReg = ResultReg; 1247 CLI.NumResultRegs = 1; 1248 } 1249 return true; 1250} 1251 1252bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) { 1253 if (!TargetSupported) 1254 return false; 1255 1256 CallingConv::ID CC = CLI.CallConv; 1257 bool IsTailCall = CLI.IsTailCall; 1258 bool IsVarArg = CLI.IsVarArg; 1259 const Value *Callee = CLI.Callee; 1260 MCSymbol *Symbol = CLI.Symbol; 1261 1262 // Do not handle FastCC. 1263 if (CC == CallingConv::Fast) 1264 return false; 1265 1266 // Allow SelectionDAG isel to handle tail calls. 1267 if (IsTailCall) 1268 return false; 1269 1270 // Let SDISel handle vararg functions. 1271 if (IsVarArg) 1272 return false; 1273 1274 // FIXME: Only handle *simple* calls for now. 1275 MVT RetVT; 1276 if (CLI.RetTy->isVoidTy()) 1277 RetVT = MVT::isVoid; 1278 else if (!isTypeSupported(CLI.RetTy, RetVT)) 1279 return false; 1280 1281 for (auto Flag : CLI.OutFlags) 1282 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal()) 1283 return false; 1284 1285 // Set up the argument vectors. 1286 SmallVector<MVT, 16> OutVTs; 1287 OutVTs.reserve(CLI.OutVals.size()); 1288 1289 for (auto *Val : CLI.OutVals) { 1290 MVT VT; 1291 if (!isTypeLegal(Val->getType(), VT) && 1292 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) 1293 return false; 1294 1295 // We don't handle vector parameters yet. 1296 if (VT.isVector() || VT.getSizeInBits() > 64) 1297 return false; 1298 1299 OutVTs.push_back(VT); 1300 } 1301 1302 Address Addr; 1303 if (!computeCallAddress(Callee, Addr)) 1304 return false; 1305 1306 // Handle the arguments now that we've gotten them. 1307 unsigned NumBytes; 1308 if (!processCallArgs(CLI, OutVTs, NumBytes)) 1309 return false; 1310 1311 if (!Addr.getGlobalValue()) 1312 return false; 1313 1314 // Issue the call. 1315 unsigned DestAddress; 1316 if (Symbol) 1317 DestAddress = materializeExternalCallSym(Symbol); 1318 else 1319 DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32); 1320 emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress); 1321 MachineInstrBuilder MIB = 1322 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::JALR), 1323 Mips::RA).addReg(Mips::T9); 1324 1325 // Add implicit physical register uses to the call. 1326 for (auto Reg : CLI.OutRegs) 1327 MIB.addReg(Reg, RegState::Implicit); 1328 1329 // Add a register mask with the call-preserved registers. 1330 // Proper defs for return values will be added by setPhysRegsDeadExcept(). 1331 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); 1332 1333 CLI.Call = MIB; 1334 1335 // Finish off the call including any return values. 1336 return finishCall(CLI, RetVT, NumBytes); 1337} 1338 1339bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { 1340 if (!TargetSupported) 1341 return false; 1342 1343 switch (II->getIntrinsicID()) { 1344 default: 1345 return false; 1346 case Intrinsic::bswap: { 1347 Type *RetTy = II->getCalledFunction()->getReturnType(); 1348 1349 MVT VT; 1350 if (!isTypeSupported(RetTy, VT)) 1351 return false; 1352 1353 unsigned SrcReg = getRegForValue(II->getOperand(0)); 1354 if (SrcReg == 0) 1355 return false; 1356 unsigned DestReg = createResultReg(&Mips::GPR32RegClass); 1357 if (DestReg == 0) 1358 return false; 1359 if (VT == MVT::i16) { 1360 if (Subtarget->hasMips32r2()) { 1361 emitInst(Mips::WSBH, DestReg).addReg(SrcReg); 1362 updateValueMap(II, DestReg); 1363 return true; 1364 } else { 1365 unsigned TempReg[3]; 1366 for (int i = 0; i < 3; i++) { 1367 TempReg[i] = createResultReg(&Mips::GPR32RegClass); 1368 if (TempReg[i] == 0) 1369 return false; 1370 } 1371 emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8); 1372 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(8); 1373 emitInst(Mips::OR, TempReg[2]).addReg(TempReg[0]).addReg(TempReg[1]); 1374 emitInst(Mips::ANDi, DestReg).addReg(TempReg[2]).addImm(0xFFFF); 1375 updateValueMap(II, DestReg); 1376 return true; 1377 } 1378 } else if (VT == MVT::i32) { 1379 if (Subtarget->hasMips32r2()) { 1380 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 1381 emitInst(Mips::WSBH, TempReg).addReg(SrcReg); 1382 emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16); 1383 updateValueMap(II, DestReg); 1384 return true; 1385 } else { 1386 unsigned TempReg[8]; 1387 for (int i = 0; i < 8; i++) { 1388 TempReg[i] = createResultReg(&Mips::GPR32RegClass); 1389 if (TempReg[i] == 0) 1390 return false; 1391 } 1392 1393 emitInst(Mips::SRL, TempReg[0]).addReg(SrcReg).addImm(8); 1394 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(24); 1395 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[0]).addImm(0xFF00); 1396 emitInst(Mips::OR, TempReg[3]).addReg(TempReg[1]).addReg(TempReg[2]); 1397 1398 emitInst(Mips::ANDi, TempReg[4]).addReg(SrcReg).addImm(0xFF00); 1399 emitInst(Mips::SLL, TempReg[5]).addReg(TempReg[4]).addImm(8); 1400 1401 emitInst(Mips::SLL, TempReg[6]).addReg(SrcReg).addImm(24); 1402 emitInst(Mips::OR, TempReg[7]).addReg(TempReg[3]).addReg(TempReg[5]); 1403 emitInst(Mips::OR, DestReg).addReg(TempReg[6]).addReg(TempReg[7]); 1404 updateValueMap(II, DestReg); 1405 return true; 1406 } 1407 } 1408 return false; 1409 } 1410 case Intrinsic::memcpy: 1411 case Intrinsic::memmove: { 1412 const auto *MTI = cast<MemTransferInst>(II); 1413 // Don't handle volatile. 1414 if (MTI->isVolatile()) 1415 return false; 1416 if (!MTI->getLength()->getType()->isIntegerTy(32)) 1417 return false; 1418 const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove"; 1419 return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2); 1420 } 1421 case Intrinsic::memset: { 1422 const MemSetInst *MSI = cast<MemSetInst>(II); 1423 // Don't handle volatile. 1424 if (MSI->isVolatile()) 1425 return false; 1426 if (!MSI->getLength()->getType()->isIntegerTy(32)) 1427 return false; 1428 return lowerCallTo(II, "memset", II->getNumArgOperands() - 2); 1429 } 1430 } 1431 return false; 1432} 1433 1434bool MipsFastISel::selectRet(const Instruction *I) { 1435 const Function &F = *I->getParent()->getParent(); 1436 const ReturnInst *Ret = cast<ReturnInst>(I); 1437 1438 if (!FuncInfo.CanLowerReturn) 1439 return false; 1440 1441 // Build a list of return value registers. 1442 SmallVector<unsigned, 4> RetRegs; 1443 1444 if (Ret->getNumOperands() > 0) { 1445 CallingConv::ID CC = F.getCallingConv(); 1446 1447 // Do not handle FastCC. 1448 if (CC == CallingConv::Fast) 1449 return false; 1450 1451 SmallVector<ISD::OutputArg, 4> Outs; 1452 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); 1453 1454 // Analyze operands of the call, assigning locations to each operand. 1455 SmallVector<CCValAssign, 16> ValLocs; 1456 MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, 1457 I->getContext()); 1458 CCAssignFn *RetCC = RetCC_Mips; 1459 CCInfo.AnalyzeReturn(Outs, RetCC); 1460 1461 // Only handle a single return value for now. 1462 if (ValLocs.size() != 1) 1463 return false; 1464 1465 CCValAssign &VA = ValLocs[0]; 1466 const Value *RV = Ret->getOperand(0); 1467 1468 // Don't bother handling odd stuff for now. 1469 if ((VA.getLocInfo() != CCValAssign::Full) && 1470 (VA.getLocInfo() != CCValAssign::BCvt)) 1471 return false; 1472 1473 // Only handle register returns for now. 1474 if (!VA.isRegLoc()) 1475 return false; 1476 1477 unsigned Reg = getRegForValue(RV); 1478 if (Reg == 0) 1479 return false; 1480 1481 unsigned SrcReg = Reg + VA.getValNo(); 1482 unsigned DestReg = VA.getLocReg(); 1483 // Avoid a cross-class copy. This is very unlikely. 1484 if (!MRI.getRegClass(SrcReg)->contains(DestReg)) 1485 return false; 1486 1487 EVT RVEVT = TLI.getValueType(DL, RV->getType()); 1488 if (!RVEVT.isSimple()) 1489 return false; 1490 1491 if (RVEVT.isVector()) 1492 return false; 1493 1494 MVT RVVT = RVEVT.getSimpleVT(); 1495 if (RVVT == MVT::f128) 1496 return false; 1497 1498 MVT DestVT = VA.getValVT(); 1499 // Special handling for extended integers. 1500 if (RVVT != DestVT) { 1501 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16) 1502 return false; 1503 1504 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) { 1505 bool IsZExt = Outs[0].Flags.isZExt(); 1506 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt); 1507 if (SrcReg == 0) 1508 return false; 1509 } 1510 } 1511 1512 // Make the copy. 1513 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1514 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg); 1515 1516 // Add register to return instruction. 1517 RetRegs.push_back(VA.getLocReg()); 1518 } 1519 MachineInstrBuilder MIB = emitInst(Mips::RetRA); 1520 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) 1521 MIB.addReg(RetRegs[i], RegState::Implicit); 1522 return true; 1523} 1524 1525bool MipsFastISel::selectTrunc(const Instruction *I) { 1526 // The high bits for a type smaller than the register size are assumed to be 1527 // undefined. 1528 Value *Op = I->getOperand(0); 1529 1530 EVT SrcVT, DestVT; 1531 SrcVT = TLI.getValueType(DL, Op->getType(), true); 1532 DestVT = TLI.getValueType(DL, I->getType(), true); 1533 1534 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) 1535 return false; 1536 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) 1537 return false; 1538 1539 unsigned SrcReg = getRegForValue(Op); 1540 if (!SrcReg) 1541 return false; 1542 1543 // Because the high bits are undefined, a truncate doesn't generate 1544 // any code. 1545 updateValueMap(I, SrcReg); 1546 return true; 1547} 1548bool MipsFastISel::selectIntExt(const Instruction *I) { 1549 Type *DestTy = I->getType(); 1550 Value *Src = I->getOperand(0); 1551 Type *SrcTy = Src->getType(); 1552 1553 bool isZExt = isa<ZExtInst>(I); 1554 unsigned SrcReg = getRegForValue(Src); 1555 if (!SrcReg) 1556 return false; 1557 1558 EVT SrcEVT, DestEVT; 1559 SrcEVT = TLI.getValueType(DL, SrcTy, true); 1560 DestEVT = TLI.getValueType(DL, DestTy, true); 1561 if (!SrcEVT.isSimple()) 1562 return false; 1563 if (!DestEVT.isSimple()) 1564 return false; 1565 1566 MVT SrcVT = SrcEVT.getSimpleVT(); 1567 MVT DestVT = DestEVT.getSimpleVT(); 1568 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); 1569 1570 if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt)) 1571 return false; 1572 updateValueMap(I, ResultReg); 1573 return true; 1574} 1575bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1576 unsigned DestReg) { 1577 unsigned ShiftAmt; 1578 switch (SrcVT.SimpleTy) { 1579 default: 1580 return false; 1581 case MVT::i8: 1582 ShiftAmt = 24; 1583 break; 1584 case MVT::i16: 1585 ShiftAmt = 16; 1586 break; 1587 } 1588 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 1589 emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt); 1590 emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt); 1591 return true; 1592} 1593 1594bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1595 unsigned DestReg) { 1596 switch (SrcVT.SimpleTy) { 1597 default: 1598 return false; 1599 case MVT::i8: 1600 emitInst(Mips::SEB, DestReg).addReg(SrcReg); 1601 break; 1602 case MVT::i16: 1603 emitInst(Mips::SEH, DestReg).addReg(SrcReg); 1604 break; 1605 } 1606 return true; 1607} 1608 1609bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1610 unsigned DestReg) { 1611 if ((DestVT != MVT::i32) && (DestVT != MVT::i16)) 1612 return false; 1613 if (Subtarget->hasMips32r2()) 1614 return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg); 1615 return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg); 1616} 1617 1618bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1619 unsigned DestReg) { 1620 int64_t Imm; 1621 1622 switch (SrcVT.SimpleTy) { 1623 default: 1624 return false; 1625 case MVT::i1: 1626 Imm = 1; 1627 break; 1628 case MVT::i8: 1629 Imm = 0xff; 1630 break; 1631 case MVT::i16: 1632 Imm = 0xffff; 1633 break; 1634 } 1635 1636 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(Imm); 1637 return true; 1638} 1639 1640bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1641 unsigned DestReg, bool IsZExt) { 1642 // FastISel does not have plumbing to deal with extensions where the SrcVT or 1643 // DestVT are odd things, so test to make sure that they are both types we can 1644 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise 1645 // bail out to SelectionDAG. 1646 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && (DestVT != MVT::i32)) || 1647 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && (SrcVT != MVT::i16))) 1648 return false; 1649 if (IsZExt) 1650 return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg); 1651 return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg); 1652} 1653 1654unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, 1655 bool isZExt) { 1656 unsigned DestReg = createResultReg(&Mips::GPR32RegClass); 1657 bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt); 1658 return Success ? DestReg : 0; 1659} 1660 1661bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) { 1662 EVT DestEVT = TLI.getValueType(DL, I->getType(), true); 1663 if (!DestEVT.isSimple()) 1664 return false; 1665 1666 MVT DestVT = DestEVT.getSimpleVT(); 1667 if (DestVT != MVT::i32) 1668 return false; 1669 1670 unsigned DivOpc; 1671 switch (ISDOpcode) { 1672 default: 1673 return false; 1674 case ISD::SDIV: 1675 case ISD::SREM: 1676 DivOpc = Mips::SDIV; 1677 break; 1678 case ISD::UDIV: 1679 case ISD::UREM: 1680 DivOpc = Mips::UDIV; 1681 break; 1682 } 1683 1684 unsigned Src0Reg = getRegForValue(I->getOperand(0)); 1685 unsigned Src1Reg = getRegForValue(I->getOperand(1)); 1686 if (!Src0Reg || !Src1Reg) 1687 return false; 1688 1689 emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg); 1690 emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7); 1691 1692 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); 1693 if (!ResultReg) 1694 return false; 1695 1696 unsigned MFOpc = (ISDOpcode == ISD::SREM || ISDOpcode == ISD::UREM) 1697 ? Mips::MFHI 1698 : Mips::MFLO; 1699 emitInst(MFOpc, ResultReg); 1700 1701 updateValueMap(I, ResultReg); 1702 return true; 1703} 1704 1705bool MipsFastISel::selectShift(const Instruction *I) { 1706 MVT RetVT; 1707 1708 if (!isTypeSupported(I->getType(), RetVT)) 1709 return false; 1710 1711 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); 1712 if (!ResultReg) 1713 return false; 1714 1715 unsigned Opcode = I->getOpcode(); 1716 const Value *Op0 = I->getOperand(0); 1717 unsigned Op0Reg = getRegForValue(Op0); 1718 if (!Op0Reg) 1719 return false; 1720 1721 // If AShr or LShr, then we need to make sure the operand0 is sign extended. 1722 if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) { 1723 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 1724 if (!TempReg) 1725 return false; 1726 1727 MVT Op0MVT = TLI.getValueType(DL, Op0->getType(), true).getSimpleVT(); 1728 bool IsZExt = Opcode == Instruction::LShr; 1729 if (!emitIntExt(Op0MVT, Op0Reg, MVT::i32, TempReg, IsZExt)) 1730 return false; 1731 1732 Op0Reg = TempReg; 1733 } 1734 1735 if (const auto *C = dyn_cast<ConstantInt>(I->getOperand(1))) { 1736 uint64_t ShiftVal = C->getZExtValue(); 1737 1738 switch (Opcode) { 1739 default: 1740 llvm_unreachable("Unexpected instruction."); 1741 case Instruction::Shl: 1742 Opcode = Mips::SLL; 1743 break; 1744 case Instruction::AShr: 1745 Opcode = Mips::SRA; 1746 break; 1747 case Instruction::LShr: 1748 Opcode = Mips::SRL; 1749 break; 1750 } 1751 1752 emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal); 1753 updateValueMap(I, ResultReg); 1754 return true; 1755 } 1756 1757 unsigned Op1Reg = getRegForValue(I->getOperand(1)); 1758 if (!Op1Reg) 1759 return false; 1760 1761 switch (Opcode) { 1762 default: 1763 llvm_unreachable("Unexpected instruction."); 1764 case Instruction::Shl: 1765 Opcode = Mips::SLLV; 1766 break; 1767 case Instruction::AShr: 1768 Opcode = Mips::SRAV; 1769 break; 1770 case Instruction::LShr: 1771 Opcode = Mips::SRLV; 1772 break; 1773 } 1774 1775 emitInst(Opcode, ResultReg).addReg(Op0Reg).addReg(Op1Reg); 1776 updateValueMap(I, ResultReg); 1777 return true; 1778} 1779 1780bool MipsFastISel::fastSelectInstruction(const Instruction *I) { 1781 if (!TargetSupported) 1782 return false; 1783 switch (I->getOpcode()) { 1784 default: 1785 break; 1786 case Instruction::Load: 1787 return selectLoad(I); 1788 case Instruction::Store: 1789 return selectStore(I); 1790 case Instruction::SDiv: 1791 if (!selectBinaryOp(I, ISD::SDIV)) 1792 return selectDivRem(I, ISD::SDIV); 1793 return true; 1794 case Instruction::UDiv: 1795 if (!selectBinaryOp(I, ISD::UDIV)) 1796 return selectDivRem(I, ISD::UDIV); 1797 return true; 1798 case Instruction::SRem: 1799 if (!selectBinaryOp(I, ISD::SREM)) 1800 return selectDivRem(I, ISD::SREM); 1801 return true; 1802 case Instruction::URem: 1803 if (!selectBinaryOp(I, ISD::UREM)) 1804 return selectDivRem(I, ISD::UREM); 1805 return true; 1806 case Instruction::Shl: 1807 case Instruction::LShr: 1808 case Instruction::AShr: 1809 return selectShift(I); 1810 case Instruction::And: 1811 case Instruction::Or: 1812 case Instruction::Xor: 1813 return selectLogicalOp(I); 1814 case Instruction::Br: 1815 return selectBranch(I); 1816 case Instruction::Ret: 1817 return selectRet(I); 1818 case Instruction::Trunc: 1819 return selectTrunc(I); 1820 case Instruction::ZExt: 1821 case Instruction::SExt: 1822 return selectIntExt(I); 1823 case Instruction::FPTrunc: 1824 return selectFPTrunc(I); 1825 case Instruction::FPExt: 1826 return selectFPExt(I); 1827 case Instruction::FPToSI: 1828 return selectFPToInt(I, /*isSigned*/ true); 1829 case Instruction::FPToUI: 1830 return selectFPToInt(I, /*isSigned*/ false); 1831 case Instruction::ICmp: 1832 case Instruction::FCmp: 1833 return selectCmp(I); 1834 case Instruction::Select: 1835 return selectSelect(I); 1836 } 1837 return false; 1838} 1839 1840unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V, 1841 bool IsUnsigned) { 1842 unsigned VReg = getRegForValue(V); 1843 if (VReg == 0) 1844 return 0; 1845 MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT(); 1846 if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) { 1847 unsigned TempReg = createResultReg(&Mips::GPR32RegClass); 1848 if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned)) 1849 return 0; 1850 VReg = TempReg; 1851 } 1852 return VReg; 1853} 1854 1855void MipsFastISel::simplifyAddress(Address &Addr) { 1856 if (!isInt<16>(Addr.getOffset())) { 1857 unsigned TempReg = 1858 materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass); 1859 unsigned DestReg = createResultReg(&Mips::GPR32RegClass); 1860 emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg()); 1861 Addr.setReg(DestReg); 1862 Addr.setOffset(0); 1863 } 1864} 1865 1866unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 1867 const TargetRegisterClass *RC, 1868 unsigned Op0, bool Op0IsKill, 1869 unsigned Op1, bool Op1IsKill) { 1870 // We treat the MUL instruction in a special way because it clobbers 1871 // the HI0 & LO0 registers. The TableGen definition of this instruction can 1872 // mark these registers only as implicitly defined. As a result, the 1873 // register allocator runs out of registers when this instruction is 1874 // followed by another instruction that defines the same registers too. 1875 // We can fix this by explicitly marking those registers as dead. 1876 if (MachineInstOpcode == Mips::MUL) { 1877 unsigned ResultReg = createResultReg(RC); 1878 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1879 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1880 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1881 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1882 .addReg(Op0, getKillRegState(Op0IsKill)) 1883 .addReg(Op1, getKillRegState(Op1IsKill)) 1884 .addReg(Mips::HI0, RegState::ImplicitDefine | RegState::Dead) 1885 .addReg(Mips::LO0, RegState::ImplicitDefine | RegState::Dead); 1886 return ResultReg; 1887 } 1888 1889 return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op0IsKill, Op1, 1890 Op1IsKill); 1891} 1892 1893namespace llvm { 1894FastISel *Mips::createFastISel(FunctionLoweringInfo &funcInfo, 1895 const TargetLibraryInfo *libInfo) { 1896 return new MipsFastISel(funcInfo, libInfo); 1897} 1898} 1899