SystemZInstrInfo.cpp revision cf1b5bd60ab7cf907bef20c3997ffb249b4fe90a
1//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the SystemZ implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "SystemZInstrInfo.h" 15#include "SystemZInstrBuilder.h" 16#include "llvm/CodeGen/MachineRegisterInfo.h" 17#include "llvm/Target/TargetMachine.h" 18 19#define GET_INSTRINFO_CTOR 20#define GET_INSTRMAP_INFO 21#include "SystemZGenInstrInfo.inc" 22 23using namespace llvm; 24 25SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm) 26 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP), 27 RI(tm) { 28} 29 30// MI is a 128-bit load or store. Split it into two 64-bit loads or stores, 31// each having the opcode given by NewOpcode. 32void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, 33 unsigned NewOpcode) const { 34 MachineBasicBlock *MBB = MI->getParent(); 35 MachineFunction &MF = *MBB->getParent(); 36 37 // Get two load or store instructions. Use the original instruction for one 38 // of them (arbitarily the second here) and create a clone for the other. 39 MachineInstr *EarlierMI = MF.CloneMachineInstr(MI); 40 MBB->insert(MI, EarlierMI); 41 42 // Set up the two 64-bit registers. 43 MachineOperand &HighRegOp = EarlierMI->getOperand(0); 44 MachineOperand &LowRegOp = MI->getOperand(0); 45 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_high)); 46 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_low)); 47 48 // The address in the first (high) instruction is already correct. 49 // Adjust the offset in the second (low) instruction. 50 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2); 51 MachineOperand &LowOffsetOp = MI->getOperand(2); 52 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8); 53 54 // Set the opcodes. 55 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm()); 56 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm()); 57 assert(HighOpcode && LowOpcode && "Both offsets should be in range"); 58 59 EarlierMI->setDesc(get(HighOpcode)); 60 MI->setDesc(get(LowOpcode)); 61} 62 63// Split ADJDYNALLOC instruction MI. 64void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const { 65 MachineBasicBlock *MBB = MI->getParent(); 66 MachineFunction &MF = *MBB->getParent(); 67 MachineFrameInfo *MFFrame = MF.getFrameInfo(); 68 MachineOperand &OffsetMO = MI->getOperand(2); 69 70 uint64_t Offset = (MFFrame->getMaxCallFrameSize() + 71 SystemZMC::CallFrameSize + 72 OffsetMO.getImm()); 73 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset); 74 assert(NewOpcode && "No support for huge argument lists yet"); 75 MI->setDesc(get(NewOpcode)); 76 OffsetMO.setImm(Offset); 77} 78 79// If MI is a simple load or store for a frame object, return the register 80// it loads or stores and set FrameIndex to the index of the frame object. 81// Return 0 otherwise. 82// 83// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 84static int isSimpleMove(const MachineInstr *MI, int &FrameIndex, 85 unsigned Flag) { 86 const MCInstrDesc &MCID = MI->getDesc(); 87 if ((MCID.TSFlags & Flag) && 88 MI->getOperand(1).isFI() && 89 MI->getOperand(2).getImm() == 0 && 90 MI->getOperand(3).getReg() == 0) { 91 FrameIndex = MI->getOperand(1).getIndex(); 92 return MI->getOperand(0).getReg(); 93 } 94 return 0; 95} 96 97unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 98 int &FrameIndex) const { 99 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad); 100} 101 102unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 103 int &FrameIndex) const { 104 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore); 105} 106 107bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 108 MachineBasicBlock *&TBB, 109 MachineBasicBlock *&FBB, 110 SmallVectorImpl<MachineOperand> &Cond, 111 bool AllowModify) const { 112 // Most of the code and comments here are boilerplate. 113 114 // Start from the bottom of the block and work up, examining the 115 // terminator instructions. 116 MachineBasicBlock::iterator I = MBB.end(); 117 while (I != MBB.begin()) { 118 --I; 119 if (I->isDebugValue()) 120 continue; 121 122 // Working from the bottom, when we see a non-terminator instruction, we're 123 // done. 124 if (!isUnpredicatedTerminator(I)) 125 break; 126 127 // A terminator that isn't a branch can't easily be handled by this 128 // analysis. 129 if (!I->isBranch()) 130 return true; 131 132 // Can't handle indirect branches. 133 SystemZII::Branch Branch(getBranchInfo(I)); 134 if (!Branch.Target->isMBB()) 135 return true; 136 137 // Punt on compound branches. 138 if (Branch.Type != SystemZII::BranchNormal) 139 return true; 140 141 if (Branch.CCMask == SystemZ::CCMASK_ANY) { 142 // Handle unconditional branches. 143 if (!AllowModify) { 144 TBB = Branch.Target->getMBB(); 145 continue; 146 } 147 148 // If the block has any instructions after a JMP, delete them. 149 while (llvm::next(I) != MBB.end()) 150 llvm::next(I)->eraseFromParent(); 151 152 Cond.clear(); 153 FBB = 0; 154 155 // Delete the JMP if it's equivalent to a fall-through. 156 if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) { 157 TBB = 0; 158 I->eraseFromParent(); 159 I = MBB.end(); 160 continue; 161 } 162 163 // TBB is used to indicate the unconditinal destination. 164 TBB = Branch.Target->getMBB(); 165 continue; 166 } 167 168 // Working from the bottom, handle the first conditional branch. 169 if (Cond.empty()) { 170 // FIXME: add X86-style branch swap 171 FBB = TBB; 172 TBB = Branch.Target->getMBB(); 173 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask)); 174 continue; 175 } 176 177 // Handle subsequent conditional branches. 178 assert(Cond.size() == 1); 179 assert(TBB); 180 181 // Only handle the case where all conditional branches branch to the same 182 // destination. 183 if (TBB != Branch.Target->getMBB()) 184 return true; 185 186 // If the conditions are the same, we can leave them alone. 187 unsigned OldCond = Cond[0].getImm(); 188 if (OldCond == Branch.CCMask) 189 continue; 190 191 // FIXME: Try combining conditions like X86 does. Should be easy on Z! 192 } 193 194 return false; 195} 196 197unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 198 // Most of the code and comments here are boilerplate. 199 MachineBasicBlock::iterator I = MBB.end(); 200 unsigned Count = 0; 201 202 while (I != MBB.begin()) { 203 --I; 204 if (I->isDebugValue()) 205 continue; 206 if (!I->isBranch()) 207 break; 208 if (!getBranchInfo(I).Target->isMBB()) 209 break; 210 // Remove the branch. 211 I->eraseFromParent(); 212 I = MBB.end(); 213 ++Count; 214 } 215 216 return Count; 217} 218 219unsigned 220SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 221 MachineBasicBlock *FBB, 222 const SmallVectorImpl<MachineOperand> &Cond, 223 DebugLoc DL) const { 224 // In this function we output 32-bit branches, which should always 225 // have enough range. They can be shortened and relaxed by later code 226 // in the pipeline, if desired. 227 228 // Shouldn't be a fall through. 229 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 230 assert((Cond.size() == 1 || Cond.size() == 0) && 231 "SystemZ branch conditions have one component!"); 232 233 if (Cond.empty()) { 234 // Unconditional branch? 235 assert(!FBB && "Unconditional branch with multiple successors!"); 236 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB); 237 return 1; 238 } 239 240 // Conditional branch. 241 unsigned Count = 0; 242 unsigned CC = Cond[0].getImm(); 243 BuildMI(&MBB, DL, get(SystemZ::BRC)).addImm(CC).addMBB(TBB); 244 ++Count; 245 246 if (FBB) { 247 // Two-way Conditional branch. Insert the second branch. 248 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB); 249 ++Count; 250 } 251 return Count; 252} 253 254void 255SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 256 MachineBasicBlock::iterator MBBI, DebugLoc DL, 257 unsigned DestReg, unsigned SrcReg, 258 bool KillSrc) const { 259 // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too. 260 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) { 261 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_high), 262 RI.getSubReg(SrcReg, SystemZ::subreg_high), KillSrc); 263 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_low), 264 RI.getSubReg(SrcReg, SystemZ::subreg_low), KillSrc); 265 return; 266 } 267 268 // Everything else needs only one instruction. 269 unsigned Opcode; 270 if (SystemZ::GR32BitRegClass.contains(DestReg, SrcReg)) 271 Opcode = SystemZ::LR; 272 else if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg)) 273 Opcode = SystemZ::LGR; 274 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg)) 275 Opcode = SystemZ::LER; 276 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg)) 277 Opcode = SystemZ::LDR; 278 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg)) 279 Opcode = SystemZ::LXR; 280 else 281 llvm_unreachable("Impossible reg-to-reg copy"); 282 283 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) 284 .addReg(SrcReg, getKillRegState(KillSrc)); 285} 286 287void 288SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 289 MachineBasicBlock::iterator MBBI, 290 unsigned SrcReg, bool isKill, 291 int FrameIdx, 292 const TargetRegisterClass *RC, 293 const TargetRegisterInfo *TRI) const { 294 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 295 296 // Callers may expect a single instruction, so keep 128-bit moves 297 // together for now and lower them after register allocation. 298 unsigned LoadOpcode, StoreOpcode; 299 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 300 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode)) 301 .addReg(SrcReg, getKillRegState(isKill)), FrameIdx); 302} 303 304void 305SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 306 MachineBasicBlock::iterator MBBI, 307 unsigned DestReg, int FrameIdx, 308 const TargetRegisterClass *RC, 309 const TargetRegisterInfo *TRI) const { 310 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 311 312 // Callers may expect a single instruction, so keep 128-bit moves 313 // together for now and lower them after register allocation. 314 unsigned LoadOpcode, StoreOpcode; 315 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 316 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg), 317 FrameIdx); 318} 319 320// Return true if MI is a simple load or store with a 12-bit displacement 321// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 322static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) { 323 const MCInstrDesc &MCID = MI->getDesc(); 324 return ((MCID.TSFlags & Flag) && 325 isUInt<12>(MI->getOperand(2).getImm()) && 326 MI->getOperand(3).getReg() == 0); 327} 328 329// Return a MachineMemOperand for FrameIndex with flags MMOFlags. 330// Offset is the byte offset from the start of FrameIndex. 331static MachineMemOperand *getFrameMMO(MachineFunction &MF, int FrameIndex, 332 uint64_t &Offset, unsigned MMOFlags) { 333 const MachineFrameInfo *MFI = MF.getFrameInfo(); 334 const Value *V = PseudoSourceValue::getFixedStack(FrameIndex); 335 return MF.getMachineMemOperand(MachinePointerInfo(V, Offset), MMOFlags, 336 MFI->getObjectSize(FrameIndex), 337 MFI->getObjectAlignment(FrameIndex)); 338} 339 340MachineInstr * 341SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 342 MachineInstr *MI, 343 const SmallVectorImpl<unsigned> &Ops, 344 int FrameIndex) const { 345 const MachineFrameInfo *MFI = MF.getFrameInfo(); 346 unsigned Size = MFI->getObjectSize(FrameIndex); 347 348 // Eary exit for cases we don't care about 349 if (Ops.size() != 1) 350 return 0; 351 352 unsigned OpNum = Ops[0]; 353 assert(Size == MF.getRegInfo() 354 .getRegClass(MI->getOperand(OpNum).getReg())->getSize() && 355 "Invalid size combination"); 356 357 // Look for cases where the source of a simple store or the destination 358 // of a simple load is being spilled. Try to use MVC instead. 359 // 360 // Although MVC is in practice a fast choice in these cases, it is still 361 // logically a bytewise copy. This means that we cannot use it if the 362 // load or store is volatile. It also means that the transformation is 363 // not valid in cases where the two memories partially overlap; however, 364 // that is not a problem here, because we know that one of the memories 365 // is a full frame index. 366 if (OpNum == 0 && MI->hasOneMemOperand()) { 367 MachineMemOperand *MMO = *MI->memoperands_begin(); 368 if (MMO->getSize() == Size && !MMO->isVolatile()) { 369 // Handle conversion of loads. 370 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) { 371 uint64_t Offset = 0; 372 MachineMemOperand *FrameMMO = getFrameMMO(MF, FrameIndex, Offset, 373 MachineMemOperand::MOStore); 374 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) 375 .addFrameIndex(FrameIndex).addImm(Offset).addImm(Size) 376 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) 377 .addMemOperand(FrameMMO).addMemOperand(MMO); 378 } 379 // Handle conversion of stores. 380 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) { 381 uint64_t Offset = 0; 382 MachineMemOperand *FrameMMO = getFrameMMO(MF, FrameIndex, Offset, 383 MachineMemOperand::MOLoad); 384 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) 385 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) 386 .addImm(Size).addFrameIndex(FrameIndex).addImm(Offset) 387 .addMemOperand(MMO).addMemOperand(FrameMMO); 388 } 389 } 390 } 391 392 // If the spilled operand is the final one, try to change <INSN>R 393 // into <INSN>. 394 int MemOpcode = SystemZ::getMemOpcode(MI->getOpcode()); 395 if (MemOpcode >= 0) { 396 unsigned NumOps = MI->getNumExplicitOperands(); 397 if (OpNum == NumOps - 1) { 398 const MCInstrDesc &MemDesc = get(MemOpcode); 399 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags); 400 assert(AccessBytes != 0 && "Size of access should be known"); 401 assert(AccessBytes <= Size && "Access outside the frame index"); 402 uint64_t Offset = Size - AccessBytes; 403 MachineMemOperand *FrameMMO = getFrameMMO(MF, FrameIndex, Offset, 404 MachineMemOperand::MOLoad); 405 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode)); 406 for (unsigned I = 0; I < OpNum; ++I) 407 MIB.addOperand(MI->getOperand(I)); 408 MIB.addFrameIndex(FrameIndex).addImm(Offset); 409 if (MemDesc.TSFlags & SystemZII::HasIndex) 410 MIB.addReg(0); 411 MIB.addMemOperand(FrameMMO); 412 return MIB; 413 } 414 } 415 416 return 0; 417} 418 419MachineInstr * 420SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, 421 const SmallVectorImpl<unsigned> &Ops, 422 MachineInstr* LoadMI) const { 423 return 0; 424} 425 426bool 427SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 428 switch (MI->getOpcode()) { 429 case SystemZ::L128: 430 splitMove(MI, SystemZ::LG); 431 return true; 432 433 case SystemZ::ST128: 434 splitMove(MI, SystemZ::STG); 435 return true; 436 437 case SystemZ::LX: 438 splitMove(MI, SystemZ::LD); 439 return true; 440 441 case SystemZ::STX: 442 splitMove(MI, SystemZ::STD); 443 return true; 444 445 case SystemZ::ADJDYNALLOC: 446 splitAdjDynAlloc(MI); 447 return true; 448 449 default: 450 return false; 451 } 452} 453 454bool SystemZInstrInfo:: 455ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 456 assert(Cond.size() == 1 && "Invalid branch condition!"); 457 Cond[0].setImm(Cond[0].getImm() ^ SystemZ::CCMASK_ANY); 458 return false; 459} 460 461uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const { 462 if (MI->getOpcode() == TargetOpcode::INLINEASM) { 463 const MachineFunction *MF = MI->getParent()->getParent(); 464 const char *AsmStr = MI->getOperand(0).getSymbolName(); 465 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 466 } 467 return MI->getDesc().getSize(); 468} 469 470SystemZII::Branch 471SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const { 472 switch (MI->getOpcode()) { 473 case SystemZ::BR: 474 case SystemZ::J: 475 case SystemZ::JG: 476 return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY, 477 &MI->getOperand(0)); 478 479 case SystemZ::BRC: 480 case SystemZ::BRCL: 481 return SystemZII::Branch(SystemZII::BranchNormal, 482 MI->getOperand(0).getImm(), &MI->getOperand(1)); 483 484 case SystemZ::CIJ: 485 case SystemZ::CRJ: 486 return SystemZII::Branch(SystemZII::BranchC, MI->getOperand(2).getImm(), 487 &MI->getOperand(3)); 488 489 case SystemZ::CGIJ: 490 case SystemZ::CGRJ: 491 return SystemZII::Branch(SystemZII::BranchCG, MI->getOperand(2).getImm(), 492 &MI->getOperand(3)); 493 494 default: 495 llvm_unreachable("Unrecognized branch opcode"); 496 } 497} 498 499void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, 500 unsigned &LoadOpcode, 501 unsigned &StoreOpcode) const { 502 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) { 503 LoadOpcode = SystemZ::L; 504 StoreOpcode = SystemZ::ST32; 505 } else if (RC == &SystemZ::GR64BitRegClass || 506 RC == &SystemZ::ADDR64BitRegClass) { 507 LoadOpcode = SystemZ::LG; 508 StoreOpcode = SystemZ::STG; 509 } else if (RC == &SystemZ::GR128BitRegClass || 510 RC == &SystemZ::ADDR128BitRegClass) { 511 LoadOpcode = SystemZ::L128; 512 StoreOpcode = SystemZ::ST128; 513 } else if (RC == &SystemZ::FP32BitRegClass) { 514 LoadOpcode = SystemZ::LE; 515 StoreOpcode = SystemZ::STE; 516 } else if (RC == &SystemZ::FP64BitRegClass) { 517 LoadOpcode = SystemZ::LD; 518 StoreOpcode = SystemZ::STD; 519 } else if (RC == &SystemZ::FP128BitRegClass) { 520 LoadOpcode = SystemZ::LX; 521 StoreOpcode = SystemZ::STX; 522 } else 523 llvm_unreachable("Unsupported regclass to load or store"); 524} 525 526unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode, 527 int64_t Offset) const { 528 const MCInstrDesc &MCID = get(Opcode); 529 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); 530 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) { 531 // Get the instruction to use for unsigned 12-bit displacements. 532 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode); 533 if (Disp12Opcode >= 0) 534 return Disp12Opcode; 535 536 // All address-related instructions can use unsigned 12-bit 537 // displacements. 538 return Opcode; 539 } 540 if (isInt<20>(Offset) && isInt<20>(Offset2)) { 541 // Get the instruction to use for signed 20-bit displacements. 542 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode); 543 if (Disp20Opcode >= 0) 544 return Disp20Opcode; 545 546 // Check whether Opcode allows signed 20-bit displacements. 547 if (MCID.TSFlags & SystemZII::Has20BitOffset) 548 return Opcode; 549 } 550 return 0; 551} 552 553unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode, 554 const MachineInstr *MI) const { 555 switch (Opcode) { 556 case SystemZ::CR: 557 return SystemZ::CRJ; 558 case SystemZ::CGR: 559 return SystemZ::CGRJ; 560 case SystemZ::CHI: 561 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0; 562 case SystemZ::CGHI: 563 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0; 564 default: 565 return 0; 566 } 567} 568 569void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB, 570 MachineBasicBlock::iterator MBBI, 571 unsigned Reg, uint64_t Value) const { 572 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 573 unsigned Opcode; 574 if (isInt<16>(Value)) 575 Opcode = SystemZ::LGHI; 576 else if (SystemZ::isImmLL(Value)) 577 Opcode = SystemZ::LLILL; 578 else if (SystemZ::isImmLH(Value)) { 579 Opcode = SystemZ::LLILH; 580 Value >>= 16; 581 } else { 582 assert(isInt<32>(Value) && "Huge values not handled yet"); 583 Opcode = SystemZ::LGFI; 584 } 585 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value); 586} 587