1//===-- MachineFunction.cpp -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Collect native machine code information for a function.  This allows
11// target-specific information about the generated code to be stored with each
12// function.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/CodeGen/MachineFunction.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallString.h"
19#include "llvm/Analysis/ConstantFolding.h"
20#include "llvm/Analysis/EHPersonalities.h"
21#include "llvm/CodeGen/MachineConstantPool.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineFunctionInitializer.h"
24#include "llvm/CodeGen/MachineFunctionPass.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineJumpTableInfo.h"
27#include "llvm/CodeGen/MachineModuleInfo.h"
28#include "llvm/CodeGen/MachineRegisterInfo.h"
29#include "llvm/CodeGen/Passes.h"
30#include "llvm/CodeGen/PseudoSourceValue.h"
31#include "llvm/CodeGen/WinEHFuncInfo.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/DebugInfo.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/Module.h"
36#include "llvm/IR/ModuleSlotTracker.h"
37#include "llvm/MC/MCAsmInfo.h"
38#include "llvm/MC/MCContext.h"
39#include "llvm/Support/Debug.h"
40#include "llvm/Support/GraphWriter.h"
41#include "llvm/Support/raw_ostream.h"
42#include "llvm/Target/TargetFrameLowering.h"
43#include "llvm/Target/TargetLowering.h"
44#include "llvm/Target/TargetMachine.h"
45#include "llvm/Target/TargetSubtargetInfo.h"
46using namespace llvm;
47
48#define DEBUG_TYPE "codegen"
49
50static cl::opt<unsigned>
51    AlignAllFunctions("align-all-functions",
52                      cl::desc("Force the alignment of all functions."),
53                      cl::init(0), cl::Hidden);
54
55void MachineFunctionInitializer::anchor() {}
56
57void MachineFunctionProperties::print(raw_ostream &ROS, bool OnlySet) const {
58  // Leave this function even in NDEBUG as an out-of-line anchor.
59#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
60  for (BitVector::size_type i = 0; i < Properties.size(); ++i) {
61    bool HasProperty = Properties[i];
62    if (OnlySet && !HasProperty)
63      continue;
64    switch(static_cast<Property>(i)) {
65      case Property::IsSSA:
66        ROS << (HasProperty ? "SSA, " : "Post SSA, ");
67        break;
68      case Property::TracksLiveness:
69        ROS << (HasProperty ? "" : "not ") << "tracking liveness, ";
70        break;
71      case Property::AllVRegsAllocated:
72        ROS << (HasProperty ? "AllVRegsAllocated" : "HasVRegs");
73        break;
74      default:
75        break;
76    }
77  }
78#endif
79}
80
81//===----------------------------------------------------------------------===//
82// MachineFunction implementation
83//===----------------------------------------------------------------------===//
84
85// Out-of-line virtual method.
86MachineFunctionInfo::~MachineFunctionInfo() {}
87
88void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
89  MBB->getParent()->DeleteMachineBasicBlock(MBB);
90}
91
92static inline unsigned getFnStackAlignment(const TargetSubtargetInfo *STI,
93                                           const Function *Fn) {
94  if (Fn->hasFnAttribute(Attribute::StackAlignment))
95    return Fn->getFnStackAlignment();
96  return STI->getFrameLowering()->getStackAlignment();
97}
98
99MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
100                                 unsigned FunctionNum, MachineModuleInfo &mmi)
101    : Fn(F), Target(TM), STI(TM.getSubtargetImpl(*F)), Ctx(mmi.getContext()),
102      MMI(mmi) {
103  // Assume the function starts in SSA form with correct liveness.
104  Properties.set(MachineFunctionProperties::Property::IsSSA);
105  Properties.set(MachineFunctionProperties::Property::TracksLiveness);
106  if (STI->getRegisterInfo())
107    RegInfo = new (Allocator) MachineRegisterInfo(this);
108  else
109    RegInfo = nullptr;
110
111  MFInfo = nullptr;
112  // We can realign the stack if the target supports it and the user hasn't
113  // explicitly asked us not to.
114  bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() &&
115                      !F->hasFnAttribute("no-realign-stack");
116  FrameInfo = new (Allocator) MachineFrameInfo(
117      getFnStackAlignment(STI, Fn), /*StackRealignable=*/CanRealignSP,
118      /*ForceRealign=*/CanRealignSP &&
119          F->hasFnAttribute(Attribute::StackAlignment));
120
121  if (Fn->hasFnAttribute(Attribute::StackAlignment))
122    FrameInfo->ensureMaxAlignment(Fn->getFnStackAlignment());
123
124  ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
125  Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
126
127  // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
128  // FIXME: Use Function::optForSize().
129  if (!Fn->hasFnAttribute(Attribute::OptimizeForSize))
130    Alignment = std::max(Alignment,
131                         STI->getTargetLowering()->getPrefFunctionAlignment());
132
133  if (AlignAllFunctions)
134    Alignment = AlignAllFunctions;
135
136  FunctionNumber = FunctionNum;
137  JumpTableInfo = nullptr;
138
139  if (isFuncletEHPersonality(classifyEHPersonality(
140          F->hasPersonalityFn() ? F->getPersonalityFn() : nullptr))) {
141    WinEHInfo = new (Allocator) WinEHFuncInfo();
142  }
143
144  assert(TM.isCompatibleDataLayout(getDataLayout()) &&
145         "Can't create a MachineFunction using a Module with a "
146         "Target-incompatible DataLayout attached\n");
147
148  PSVManager = llvm::make_unique<PseudoSourceValueManager>();
149}
150
151MachineFunction::~MachineFunction() {
152  // Don't call destructors on MachineInstr and MachineOperand. All of their
153  // memory comes from the BumpPtrAllocator which is about to be purged.
154  //
155  // Do call MachineBasicBlock destructors, it contains std::vectors.
156  for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
157    I->Insts.clearAndLeakNodesUnsafely();
158
159  InstructionRecycler.clear(Allocator);
160  OperandRecycler.clear(Allocator);
161  BasicBlockRecycler.clear(Allocator);
162  if (RegInfo) {
163    RegInfo->~MachineRegisterInfo();
164    Allocator.Deallocate(RegInfo);
165  }
166  if (MFInfo) {
167    MFInfo->~MachineFunctionInfo();
168    Allocator.Deallocate(MFInfo);
169  }
170
171  FrameInfo->~MachineFrameInfo();
172  Allocator.Deallocate(FrameInfo);
173
174  ConstantPool->~MachineConstantPool();
175  Allocator.Deallocate(ConstantPool);
176
177  if (JumpTableInfo) {
178    JumpTableInfo->~MachineJumpTableInfo();
179    Allocator.Deallocate(JumpTableInfo);
180  }
181
182  if (WinEHInfo) {
183    WinEHInfo->~WinEHFuncInfo();
184    Allocator.Deallocate(WinEHInfo);
185  }
186}
187
188const DataLayout &MachineFunction::getDataLayout() const {
189  return Fn->getParent()->getDataLayout();
190}
191
192/// Get the JumpTableInfo for this function.
193/// If it does not already exist, allocate one.
194MachineJumpTableInfo *MachineFunction::
195getOrCreateJumpTableInfo(unsigned EntryKind) {
196  if (JumpTableInfo) return JumpTableInfo;
197
198  JumpTableInfo = new (Allocator)
199    MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
200  return JumpTableInfo;
201}
202
203/// Should we be emitting segmented stack stuff for the function
204bool MachineFunction::shouldSplitStack() const {
205  return getFunction()->hasFnAttribute("split-stack");
206}
207
208/// This discards all of the MachineBasicBlock numbers and recomputes them.
209/// This guarantees that the MBB numbers are sequential, dense, and match the
210/// ordering of the blocks within the function.  If a specific MachineBasicBlock
211/// is specified, only that block and those after it are renumbered.
212void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
213  if (empty()) { MBBNumbering.clear(); return; }
214  MachineFunction::iterator MBBI, E = end();
215  if (MBB == nullptr)
216    MBBI = begin();
217  else
218    MBBI = MBB->getIterator();
219
220  // Figure out the block number this should have.
221  unsigned BlockNo = 0;
222  if (MBBI != begin())
223    BlockNo = std::prev(MBBI)->getNumber() + 1;
224
225  for (; MBBI != E; ++MBBI, ++BlockNo) {
226    if (MBBI->getNumber() != (int)BlockNo) {
227      // Remove use of the old number.
228      if (MBBI->getNumber() != -1) {
229        assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
230               "MBB number mismatch!");
231        MBBNumbering[MBBI->getNumber()] = nullptr;
232      }
233
234      // If BlockNo is already taken, set that block's number to -1.
235      if (MBBNumbering[BlockNo])
236        MBBNumbering[BlockNo]->setNumber(-1);
237
238      MBBNumbering[BlockNo] = &*MBBI;
239      MBBI->setNumber(BlockNo);
240    }
241  }
242
243  // Okay, all the blocks are renumbered.  If we have compactified the block
244  // numbering, shrink MBBNumbering now.
245  assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
246  MBBNumbering.resize(BlockNo);
247}
248
249/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
250MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
251                                                  const DebugLoc &DL,
252                                                  bool NoImp) {
253  return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
254    MachineInstr(*this, MCID, DL, NoImp);
255}
256
257/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
258/// identical in all ways except the instruction has no parent, prev, or next.
259MachineInstr *
260MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
261  return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
262             MachineInstr(*this, *Orig);
263}
264
265/// Delete the given MachineInstr.
266///
267/// This function also serves as the MachineInstr destructor - the real
268/// ~MachineInstr() destructor must be empty.
269void
270MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
271  // Strip it for parts. The operand array and the MI object itself are
272  // independently recyclable.
273  if (MI->Operands)
274    deallocateOperandArray(MI->CapOperands, MI->Operands);
275  // Don't call ~MachineInstr() which must be trivial anyway because
276  // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
277  // destructors.
278  InstructionRecycler.Deallocate(Allocator, MI);
279}
280
281/// Allocate a new MachineBasicBlock. Use this instead of
282/// `new MachineBasicBlock'.
283MachineBasicBlock *
284MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
285  return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
286             MachineBasicBlock(*this, bb);
287}
288
289/// Delete the given MachineBasicBlock.
290void
291MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
292  assert(MBB->getParent() == this && "MBB parent mismatch!");
293  MBB->~MachineBasicBlock();
294  BasicBlockRecycler.Deallocate(Allocator, MBB);
295}
296
297MachineMemOperand *
298MachineFunction::getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f,
299                                      uint64_t s, unsigned base_alignment,
300                                      const AAMDNodes &AAInfo,
301                                      const MDNode *Ranges) {
302  // FIXME: Get rid of this static_cast and make getMachineOperand take a
303  // MachineMemOperand::Flags param.
304  return new (Allocator)
305      MachineMemOperand(PtrInfo, static_cast<MachineMemOperand::Flags>(f), s,
306                        base_alignment, AAInfo, Ranges);
307}
308
309MachineMemOperand *
310MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
311                                      int64_t Offset, uint64_t Size) {
312  if (MMO->getValue())
313    return new (Allocator)
314               MachineMemOperand(MachinePointerInfo(MMO->getValue(),
315                                                    MMO->getOffset()+Offset),
316                                 MMO->getFlags(), Size,
317                                 MMO->getBaseAlignment());
318  return new (Allocator)
319             MachineMemOperand(MachinePointerInfo(MMO->getPseudoValue(),
320                                                  MMO->getOffset()+Offset),
321                               MMO->getFlags(), Size,
322                               MMO->getBaseAlignment());
323}
324
325MachineInstr::mmo_iterator
326MachineFunction::allocateMemRefsArray(unsigned long Num) {
327  return Allocator.Allocate<MachineMemOperand *>(Num);
328}
329
330std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
331MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
332                                    MachineInstr::mmo_iterator End) {
333  // Count the number of load mem refs.
334  unsigned Num = 0;
335  for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
336    if ((*I)->isLoad())
337      ++Num;
338
339  // Allocate a new array and populate it with the load information.
340  MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
341  unsigned Index = 0;
342  for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
343    if ((*I)->isLoad()) {
344      if (!(*I)->isStore())
345        // Reuse the MMO.
346        Result[Index] = *I;
347      else {
348        // Clone the MMO and unset the store flag.
349        MachineMemOperand *JustLoad =
350          getMachineMemOperand((*I)->getPointerInfo(),
351                               (*I)->getFlags() & ~MachineMemOperand::MOStore,
352                               (*I)->getSize(), (*I)->getBaseAlignment(),
353                               (*I)->getAAInfo());
354        Result[Index] = JustLoad;
355      }
356      ++Index;
357    }
358  }
359  return std::make_pair(Result, Result + Num);
360}
361
362std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator>
363MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
364                                     MachineInstr::mmo_iterator End) {
365  // Count the number of load mem refs.
366  unsigned Num = 0;
367  for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
368    if ((*I)->isStore())
369      ++Num;
370
371  // Allocate a new array and populate it with the store information.
372  MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
373  unsigned Index = 0;
374  for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
375    if ((*I)->isStore()) {
376      if (!(*I)->isLoad())
377        // Reuse the MMO.
378        Result[Index] = *I;
379      else {
380        // Clone the MMO and unset the load flag.
381        MachineMemOperand *JustStore =
382          getMachineMemOperand((*I)->getPointerInfo(),
383                               (*I)->getFlags() & ~MachineMemOperand::MOLoad,
384                               (*I)->getSize(), (*I)->getBaseAlignment(),
385                               (*I)->getAAInfo());
386        Result[Index] = JustStore;
387      }
388      ++Index;
389    }
390  }
391  return std::make_pair(Result, Result + Num);
392}
393
394const char *MachineFunction::createExternalSymbolName(StringRef Name) {
395  char *Dest = Allocator.Allocate<char>(Name.size() + 1);
396  std::copy(Name.begin(), Name.end(), Dest);
397  Dest[Name.size()] = 0;
398  return Dest;
399}
400
401#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
402LLVM_DUMP_METHOD void MachineFunction::dump() const {
403  print(dbgs());
404}
405#endif
406
407StringRef MachineFunction::getName() const {
408  assert(getFunction() && "No function!");
409  return getFunction()->getName();
410}
411
412void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
413  OS << "# Machine code for function " << getName() << ": ";
414  OS << "Properties: <";
415  getProperties().print(OS);
416  OS << ">\n";
417
418  // Print Frame Information
419  FrameInfo->print(*this, OS);
420
421  // Print JumpTable Information
422  if (JumpTableInfo)
423    JumpTableInfo->print(OS);
424
425  // Print Constant Pool
426  ConstantPool->print(OS);
427
428  const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
429
430  if (RegInfo && !RegInfo->livein_empty()) {
431    OS << "Function Live Ins: ";
432    for (MachineRegisterInfo::livein_iterator
433         I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
434      OS << PrintReg(I->first, TRI);
435      if (I->second)
436        OS << " in " << PrintReg(I->second, TRI);
437      if (std::next(I) != E)
438        OS << ", ";
439    }
440    OS << '\n';
441  }
442
443  ModuleSlotTracker MST(getFunction()->getParent());
444  MST.incorporateFunction(*getFunction());
445  for (const auto &BB : *this) {
446    OS << '\n';
447    BB.print(OS, MST, Indexes);
448  }
449
450  OS << "\n# End machine code for function " << getName() << ".\n\n";
451}
452
453namespace llvm {
454  template<>
455  struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
456
457  DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
458
459    static std::string getGraphName(const MachineFunction *F) {
460      return ("CFG for '" + F->getName() + "' function").str();
461    }
462
463    std::string getNodeLabel(const MachineBasicBlock *Node,
464                             const MachineFunction *Graph) {
465      std::string OutStr;
466      {
467        raw_string_ostream OSS(OutStr);
468
469        if (isSimple()) {
470          OSS << "BB#" << Node->getNumber();
471          if (const BasicBlock *BB = Node->getBasicBlock())
472            OSS << ": " << BB->getName();
473        } else
474          Node->print(OSS);
475      }
476
477      if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
478
479      // Process string output to make it nicer...
480      for (unsigned i = 0; i != OutStr.length(); ++i)
481        if (OutStr[i] == '\n') {                            // Left justify
482          OutStr[i] = '\\';
483          OutStr.insert(OutStr.begin()+i+1, 'l');
484        }
485      return OutStr;
486    }
487  };
488}
489
490void MachineFunction::viewCFG() const
491{
492#ifndef NDEBUG
493  ViewGraph(this, "mf" + getName());
494#else
495  errs() << "MachineFunction::viewCFG is only available in debug builds on "
496         << "systems with Graphviz or gv!\n";
497#endif // NDEBUG
498}
499
500void MachineFunction::viewCFGOnly() const
501{
502#ifndef NDEBUG
503  ViewGraph(this, "mf" + getName(), true);
504#else
505  errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
506         << "systems with Graphviz or gv!\n";
507#endif // NDEBUG
508}
509
510/// Add the specified physical register as a live-in value and
511/// create a corresponding virtual register for it.
512unsigned MachineFunction::addLiveIn(unsigned PReg,
513                                    const TargetRegisterClass *RC) {
514  MachineRegisterInfo &MRI = getRegInfo();
515  unsigned VReg = MRI.getLiveInVirtReg(PReg);
516  if (VReg) {
517    const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
518    (void)VRegRC;
519    // A physical register can be added several times.
520    // Between two calls, the register class of the related virtual register
521    // may have been constrained to match some operation constraints.
522    // In that case, check that the current register class includes the
523    // physical register and is a sub class of the specified RC.
524    assert((VRegRC == RC || (VRegRC->contains(PReg) &&
525                             RC->hasSubClassEq(VRegRC))) &&
526            "Register class mismatch!");
527    return VReg;
528  }
529  VReg = MRI.createVirtualRegister(RC);
530  MRI.addLiveIn(PReg, VReg);
531  return VReg;
532}
533
534/// Return the MCSymbol for the specified non-empty jump table.
535/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
536/// normal 'L' label is returned.
537MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
538                                        bool isLinkerPrivate) const {
539  const DataLayout &DL = getDataLayout();
540  assert(JumpTableInfo && "No jump tables");
541  assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
542
543  const char *Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
544                                       : DL.getPrivateGlobalPrefix();
545  SmallString<60> Name;
546  raw_svector_ostream(Name)
547    << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
548  return Ctx.getOrCreateSymbol(Name);
549}
550
551/// Return a function-local symbol to represent the PIC base.
552MCSymbol *MachineFunction::getPICBaseSymbol() const {
553  const DataLayout &DL = getDataLayout();
554  return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
555                               Twine(getFunctionNumber()) + "$pb");
556}
557
558//===----------------------------------------------------------------------===//
559//  MachineFrameInfo implementation
560//===----------------------------------------------------------------------===//
561
562/// Make sure the function is at least Align bytes aligned.
563void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
564  if (!StackRealignable)
565    assert(Align <= StackAlignment &&
566           "For targets without stack realignment, Align is out of limit!");
567  if (MaxAlignment < Align) MaxAlignment = Align;
568}
569
570/// Clamp the alignment if requested and emit a warning.
571static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
572                                           unsigned StackAlign) {
573  if (!ShouldClamp || Align <= StackAlign)
574    return Align;
575  DEBUG(dbgs() << "Warning: requested alignment " << Align
576               << " exceeds the stack alignment " << StackAlign
577               << " when stack realignment is off" << '\n');
578  return StackAlign;
579}
580
581/// Create a new statically sized stack object, returning a nonnegative
582/// identifier to represent it.
583int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
584                      bool isSS, const AllocaInst *Alloca) {
585  assert(Size != 0 && "Cannot allocate zero size stack objects!");
586  Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
587  Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, Alloca,
588                                !isSS));
589  int Index = (int)Objects.size() - NumFixedObjects - 1;
590  assert(Index >= 0 && "Bad frame index!");
591  ensureMaxAlignment(Alignment);
592  return Index;
593}
594
595/// Create a new statically sized stack object that represents a spill slot,
596/// returning a nonnegative identifier to represent it.
597int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
598                                             unsigned Alignment) {
599  Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
600  CreateStackObject(Size, Alignment, true);
601  int Index = (int)Objects.size() - NumFixedObjects - 1;
602  ensureMaxAlignment(Alignment);
603  return Index;
604}
605
606/// Notify the MachineFrameInfo object that a variable sized object has been
607/// created. This must be created whenever a variable sized object is created,
608/// whether or not the index returned is actually used.
609int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
610                                                const AllocaInst *Alloca) {
611  HasVarSizedObjects = true;
612  Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
613  Objects.push_back(StackObject(0, Alignment, 0, false, false, Alloca, true));
614  ensureMaxAlignment(Alignment);
615  return (int)Objects.size()-NumFixedObjects-1;
616}
617
618/// Create a new object at a fixed location on the stack.
619/// All fixed objects should be created before other objects are created for
620/// efficiency. By default, fixed objects are immutable. This returns an
621/// index with a negative value.
622int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
623                                        bool Immutable, bool isAliased) {
624  assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
625  // The alignment of the frame index can be determined from its offset from
626  // the incoming frame position.  If the frame object is at offset 32 and
627  // the stack is guaranteed to be 16-byte aligned, then we know that the
628  // object is 16-byte aligned. Note that unlike the non-fixed case, if the
629  // stack needs realignment, we can't assume that the stack will in fact be
630  // aligned.
631  unsigned Align = MinAlign(SPOffset, ForcedRealign ? 1 : StackAlignment);
632  Align = clampStackAlignment(!StackRealignable, Align, StackAlignment);
633  Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
634                                              /*isSS*/   false,
635                                              /*Alloca*/ nullptr, isAliased));
636  return -++NumFixedObjects;
637}
638
639/// Create a spill slot at a fixed location on the stack.
640/// Returns an index with a negative value.
641int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
642                                                  int64_t SPOffset) {
643  unsigned Align = MinAlign(SPOffset, ForcedRealign ? 1 : StackAlignment);
644  Align = clampStackAlignment(!StackRealignable, Align, StackAlignment);
645  Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset,
646                                              /*Immutable*/ true,
647                                              /*isSS*/ true,
648                                              /*Alloca*/ nullptr,
649                                              /*isAliased*/ false));
650  return -++NumFixedObjects;
651}
652
653BitVector MachineFrameInfo::getPristineRegs(const MachineFunction &MF) const {
654  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
655  BitVector BV(TRI->getNumRegs());
656
657  // Before CSI is calculated, no registers are considered pristine. They can be
658  // freely used and PEI will make sure they are saved.
659  if (!isCalleeSavedInfoValid())
660    return BV;
661
662  for (const MCPhysReg *CSR = TRI->getCalleeSavedRegs(&MF); CSR && *CSR; ++CSR)
663    BV.set(*CSR);
664
665  // Saved CSRs are not pristine.
666  for (auto &I : getCalleeSavedInfo())
667    for (MCSubRegIterator S(I.getReg(), TRI, true); S.isValid(); ++S)
668      BV.reset(*S);
669
670  return BV;
671}
672
673unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
674  const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
675  const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
676  unsigned MaxAlign = getMaxAlignment();
677  int Offset = 0;
678
679  // This code is very, very similar to PEI::calculateFrameObjectOffsets().
680  // It really should be refactored to share code. Until then, changes
681  // should keep in mind that there's tight coupling between the two.
682
683  for (int i = getObjectIndexBegin(); i != 0; ++i) {
684    int FixedOff = -getObjectOffset(i);
685    if (FixedOff > Offset) Offset = FixedOff;
686  }
687  for (unsigned i = 0, e = getObjectIndexEnd(); i != e; ++i) {
688    if (isDeadObjectIndex(i))
689      continue;
690    Offset += getObjectSize(i);
691    unsigned Align = getObjectAlignment(i);
692    // Adjust to alignment boundary
693    Offset = (Offset+Align-1)/Align*Align;
694
695    MaxAlign = std::max(Align, MaxAlign);
696  }
697
698  if (adjustsStack() && TFI->hasReservedCallFrame(MF))
699    Offset += getMaxCallFrameSize();
700
701  // Round up the size to a multiple of the alignment.  If the function has
702  // any calls or alloca's, align to the target's StackAlignment value to
703  // ensure that the callee's frame or the alloca data is suitably aligned;
704  // otherwise, for leaf functions, align to the TransientStackAlignment
705  // value.
706  unsigned StackAlign;
707  if (adjustsStack() || hasVarSizedObjects() ||
708      (RegInfo->needsStackRealignment(MF) && getObjectIndexEnd() != 0))
709    StackAlign = TFI->getStackAlignment();
710  else
711    StackAlign = TFI->getTransientStackAlignment();
712
713  // If the frame pointer is eliminated, all frame offsets will be relative to
714  // SP not FP. Align to MaxAlign so this works.
715  StackAlign = std::max(StackAlign, MaxAlign);
716  unsigned AlignMask = StackAlign - 1;
717  Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
718
719  return (unsigned)Offset;
720}
721
722void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
723  if (Objects.empty()) return;
724
725  const TargetFrameLowering *FI = MF.getSubtarget().getFrameLowering();
726  int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
727
728  OS << "Frame Objects:\n";
729
730  for (unsigned i = 0, e = Objects.size(); i != e; ++i) {
731    const StackObject &SO = Objects[i];
732    OS << "  fi#" << (int)(i-NumFixedObjects) << ": ";
733    if (SO.Size == ~0ULL) {
734      OS << "dead\n";
735      continue;
736    }
737    if (SO.Size == 0)
738      OS << "variable sized";
739    else
740      OS << "size=" << SO.Size;
741    OS << ", align=" << SO.Alignment;
742
743    if (i < NumFixedObjects)
744      OS << ", fixed";
745    if (i < NumFixedObjects || SO.SPOffset != -1) {
746      int64_t Off = SO.SPOffset - ValOffset;
747      OS << ", at location [SP";
748      if (Off > 0)
749        OS << "+" << Off;
750      else if (Off < 0)
751        OS << Off;
752      OS << "]";
753    }
754    OS << "\n";
755  }
756}
757
758#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
759void MachineFrameInfo::dump(const MachineFunction &MF) const {
760  print(MF, dbgs());
761}
762#endif
763
764//===----------------------------------------------------------------------===//
765//  MachineJumpTableInfo implementation
766//===----------------------------------------------------------------------===//
767
768/// Return the size of each entry in the jump table.
769unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
770  // The size of a jump table entry is 4 bytes unless the entry is just the
771  // address of a block, in which case it is the pointer size.
772  switch (getEntryKind()) {
773  case MachineJumpTableInfo::EK_BlockAddress:
774    return TD.getPointerSize();
775  case MachineJumpTableInfo::EK_GPRel64BlockAddress:
776    return 8;
777  case MachineJumpTableInfo::EK_GPRel32BlockAddress:
778  case MachineJumpTableInfo::EK_LabelDifference32:
779  case MachineJumpTableInfo::EK_Custom32:
780    return 4;
781  case MachineJumpTableInfo::EK_Inline:
782    return 0;
783  }
784  llvm_unreachable("Unknown jump table encoding!");
785}
786
787/// Return the alignment of each entry in the jump table.
788unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
789  // The alignment of a jump table entry is the alignment of int32 unless the
790  // entry is just the address of a block, in which case it is the pointer
791  // alignment.
792  switch (getEntryKind()) {
793  case MachineJumpTableInfo::EK_BlockAddress:
794    return TD.getPointerABIAlignment();
795  case MachineJumpTableInfo::EK_GPRel64BlockAddress:
796    return TD.getABIIntegerTypeAlignment(64);
797  case MachineJumpTableInfo::EK_GPRel32BlockAddress:
798  case MachineJumpTableInfo::EK_LabelDifference32:
799  case MachineJumpTableInfo::EK_Custom32:
800    return TD.getABIIntegerTypeAlignment(32);
801  case MachineJumpTableInfo::EK_Inline:
802    return 1;
803  }
804  llvm_unreachable("Unknown jump table encoding!");
805}
806
807/// Create a new jump table entry in the jump table info.
808unsigned MachineJumpTableInfo::createJumpTableIndex(
809                               const std::vector<MachineBasicBlock*> &DestBBs) {
810  assert(!DestBBs.empty() && "Cannot create an empty jump table!");
811  JumpTables.push_back(MachineJumpTableEntry(DestBBs));
812  return JumpTables.size()-1;
813}
814
815/// If Old is the target of any jump tables, update the jump tables to branch
816/// to New instead.
817bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
818                                                  MachineBasicBlock *New) {
819  assert(Old != New && "Not making a change?");
820  bool MadeChange = false;
821  for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
822    ReplaceMBBInJumpTable(i, Old, New);
823  return MadeChange;
824}
825
826/// If Old is a target of the jump tables, update the jump table to branch to
827/// New instead.
828bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
829                                                 MachineBasicBlock *Old,
830                                                 MachineBasicBlock *New) {
831  assert(Old != New && "Not making a change?");
832  bool MadeChange = false;
833  MachineJumpTableEntry &JTE = JumpTables[Idx];
834  for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
835    if (JTE.MBBs[j] == Old) {
836      JTE.MBBs[j] = New;
837      MadeChange = true;
838    }
839  return MadeChange;
840}
841
842void MachineJumpTableInfo::print(raw_ostream &OS) const {
843  if (JumpTables.empty()) return;
844
845  OS << "Jump Tables:\n";
846
847  for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
848    OS << "  jt#" << i << ": ";
849    for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
850      OS << " BB#" << JumpTables[i].MBBs[j]->getNumber();
851  }
852
853  OS << '\n';
854}
855
856#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
857LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); }
858#endif
859
860
861//===----------------------------------------------------------------------===//
862//  MachineConstantPool implementation
863//===----------------------------------------------------------------------===//
864
865void MachineConstantPoolValue::anchor() { }
866
867Type *MachineConstantPoolEntry::getType() const {
868  if (isMachineConstantPoolEntry())
869    return Val.MachineCPVal->getType();
870  return Val.ConstVal->getType();
871}
872
873bool MachineConstantPoolEntry::needsRelocation() const {
874  if (isMachineConstantPoolEntry())
875    return true;
876  return Val.ConstVal->needsRelocation();
877}
878
879SectionKind
880MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
881  if (needsRelocation())
882    return SectionKind::getReadOnlyWithRel();
883  switch (DL->getTypeAllocSize(getType())) {
884  case 4:
885    return SectionKind::getMergeableConst4();
886  case 8:
887    return SectionKind::getMergeableConst8();
888  case 16:
889    return SectionKind::getMergeableConst16();
890  case 32:
891    return SectionKind::getMergeableConst32();
892  default:
893    return SectionKind::getReadOnly();
894  }
895}
896
897MachineConstantPool::~MachineConstantPool() {
898  for (unsigned i = 0, e = Constants.size(); i != e; ++i)
899    if (Constants[i].isMachineConstantPoolEntry())
900      delete Constants[i].Val.MachineCPVal;
901  for (DenseSet<MachineConstantPoolValue*>::iterator I =
902       MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
903       I != E; ++I)
904    delete *I;
905}
906
907/// Test whether the given two constants can be allocated the same constant pool
908/// entry.
909static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
910                                      const DataLayout &DL) {
911  // Handle the trivial case quickly.
912  if (A == B) return true;
913
914  // If they have the same type but weren't the same constant, quickly
915  // reject them.
916  if (A->getType() == B->getType()) return false;
917
918  // We can't handle structs or arrays.
919  if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
920      isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
921    return false;
922
923  // For now, only support constants with the same size.
924  uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
925  if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
926    return false;
927
928  Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
929
930  // Try constant folding a bitcast of both instructions to an integer.  If we
931  // get two identical ConstantInt's, then we are good to share them.  We use
932  // the constant folding APIs to do this so that we get the benefit of
933  // DataLayout.
934  if (isa<PointerType>(A->getType()))
935    A = ConstantFoldCastOperand(Instruction::PtrToInt,
936                                const_cast<Constant *>(A), IntTy, DL);
937  else if (A->getType() != IntTy)
938    A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
939                                IntTy, DL);
940  if (isa<PointerType>(B->getType()))
941    B = ConstantFoldCastOperand(Instruction::PtrToInt,
942                                const_cast<Constant *>(B), IntTy, DL);
943  else if (B->getType() != IntTy)
944    B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
945                                IntTy, DL);
946
947  return A == B;
948}
949
950/// Create a new entry in the constant pool or return an existing one.
951/// User must specify the log2 of the minimum required alignment for the object.
952unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
953                                                   unsigned Alignment) {
954  assert(Alignment && "Alignment must be specified!");
955  if (Alignment > PoolAlignment) PoolAlignment = Alignment;
956
957  // Check to see if we already have this constant.
958  //
959  // FIXME, this could be made much more efficient for large constant pools.
960  for (unsigned i = 0, e = Constants.size(); i != e; ++i)
961    if (!Constants[i].isMachineConstantPoolEntry() &&
962        CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
963      if ((unsigned)Constants[i].getAlignment() < Alignment)
964        Constants[i].Alignment = Alignment;
965      return i;
966    }
967
968  Constants.push_back(MachineConstantPoolEntry(C, Alignment));
969  return Constants.size()-1;
970}
971
972unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
973                                                   unsigned Alignment) {
974  assert(Alignment && "Alignment must be specified!");
975  if (Alignment > PoolAlignment) PoolAlignment = Alignment;
976
977  // Check to see if we already have this constant.
978  //
979  // FIXME, this could be made much more efficient for large constant pools.
980  int Idx = V->getExistingMachineCPValue(this, Alignment);
981  if (Idx != -1) {
982    MachineCPVsSharingEntries.insert(V);
983    return (unsigned)Idx;
984  }
985
986  Constants.push_back(MachineConstantPoolEntry(V, Alignment));
987  return Constants.size()-1;
988}
989
990void MachineConstantPool::print(raw_ostream &OS) const {
991  if (Constants.empty()) return;
992
993  OS << "Constant Pool:\n";
994  for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
995    OS << "  cp#" << i << ": ";
996    if (Constants[i].isMachineConstantPoolEntry())
997      Constants[i].Val.MachineCPVal->print(OS);
998    else
999      Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1000    OS << ", align=" << Constants[i].getAlignment();
1001    OS << "\n";
1002  }
1003}
1004
1005#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1006LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); }
1007#endif
1008