TargetLowering.h revision 86098bd6a63d2cdf0c9be9ef3151bd2728281fd7
1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes how to lower LLVM code to machine code.  This has two
11// main components:
12//
13//  1. Which ValueTypes are natively supported by the target.
14//  2. Which operations are supported for supported ValueTypes.
15//  3. Cost thresholds for alternative implementations of certain operations.
16//
17// In addition it has a few other components, like information about FP
18// immediates.
19//
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_TARGET_TARGETLOWERING_H
23#define LLVM_TARGET_TARGETLOWERING_H
24
25#include "llvm/Constants.h"
26#include "llvm/InlineAsm.h"
27#include "llvm/CodeGen/SelectionDAGNodes.h"
28#include "llvm/CodeGen/RuntimeLibcalls.h"
29#include "llvm/ADT/APFloat.h"
30#include "llvm/ADT/DenseMap.h"
31#include "llvm/ADT/STLExtras.h"
32#include <map>
33#include <vector>
34
35namespace llvm {
36  class AllocaInst;
37  class Function;
38  class FastISel;
39  class MachineBasicBlock;
40  class MachineFunction;
41  class MachineFrameInfo;
42  class MachineInstr;
43  class MachineModuleInfo;
44  class SDNode;
45  class SDValue;
46  class SelectionDAG;
47  class TargetData;
48  class TargetMachine;
49  class TargetRegisterClass;
50  class TargetSubtarget;
51  class Value;
52  class VectorType;
53
54//===----------------------------------------------------------------------===//
55/// TargetLowering - This class defines information used to lower LLVM code to
56/// legal SelectionDAG operators that the target instruction selector can accept
57/// natively.
58///
59/// This class also defines callbacks that targets must implement to lower
60/// target-specific constructs to SelectionDAG operators.
61///
62class TargetLowering {
63public:
64  /// LegalizeAction - This enum indicates whether operations are valid for a
65  /// target, and if not, what action should be used to make them valid.
66  enum LegalizeAction {
67    Legal,      // The target natively supports this operation.
68    Promote,    // This operation should be executed in a larger type.
69    Expand,     // Try to expand this to other ops, otherwise use a libcall.
70    Custom      // Use the LowerOperation hook to implement custom lowering.
71  };
72
73  enum OutOfRangeShiftAmount {
74    Undefined,  // Oversized shift amounts are undefined (default).
75    Mask,       // Shift amounts are auto masked (anded) to value size.
76    Extend      // Oversized shift pulls in zeros or sign bits.
77  };
78
79  enum SetCCResultValue {
80    UndefinedSetCCResult,          // SetCC returns a garbage/unknown extend.
81    ZeroOrOneSetCCResult,          // SetCC returns a zero extended result.
82    ZeroOrNegativeOneSetCCResult   // SetCC returns a sign extended result.
83  };
84
85  enum SchedPreference {
86    SchedulingForLatency,          // Scheduling for shortest total latency.
87    SchedulingForRegPressure       // Scheduling for lowest register pressure.
88  };
89
90  explicit TargetLowering(TargetMachine &TM);
91  virtual ~TargetLowering();
92
93  TargetMachine &getTargetMachine() const { return TM; }
94  const TargetData *getTargetData() const { return TD; }
95
96  bool isBigEndian() const { return !IsLittleEndian; }
97  bool isLittleEndian() const { return IsLittleEndian; }
98  MVT getPointerTy() const { return PointerTy; }
99  MVT getShiftAmountTy() const { return ShiftAmountTy; }
100  OutOfRangeShiftAmount getShiftAmountFlavor() const {return ShiftAmtHandling; }
101
102  /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC
103  /// codegen.
104  bool usesGlobalOffsetTable() const { return UsesGlobalOffsetTable; }
105
106  /// isSelectExpensive - Return true if the select operation is expensive for
107  /// this target.
108  bool isSelectExpensive() const { return SelectIsExpensive; }
109
110  /// isIntDivCheap() - Return true if integer divide is usually cheaper than
111  /// a sequence of several shifts, adds, and multiplies for this target.
112  bool isIntDivCheap() const { return IntDivIsCheap; }
113
114  /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
115  /// srl/add/sra.
116  bool isPow2DivCheap() const { return Pow2DivIsCheap; }
117
118  /// getSetCCResultType - Return the ValueType of the result of setcc
119  /// operations.
120  virtual MVT getSetCCResultType(const SDValue &) const;
121
122  /// getSetCCResultContents - For targets without boolean registers, this flag
123  /// returns information about the contents of the high-bits in the setcc
124  /// result register.
125  SetCCResultValue getSetCCResultContents() const { return SetCCResultContents;}
126
127  /// getSchedulingPreference - Return target scheduling preference.
128  SchedPreference getSchedulingPreference() const {
129    return SchedPreferenceInfo;
130  }
131
132  /// getRegClassFor - Return the register class that should be used for the
133  /// specified value type.  This may only be called on legal types.
134  TargetRegisterClass *getRegClassFor(MVT VT) const {
135    assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT));
136    TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT()];
137    assert(RC && "This value type is not natively supported!");
138    return RC;
139  }
140
141  /// isTypeLegal - Return true if the target has native support for the
142  /// specified value type.  This means that it has a register that directly
143  /// holds it without promotions or expansions.
144  bool isTypeLegal(MVT VT) const {
145    assert(!VT.isSimple() ||
146           (unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT));
147    return VT.isSimple() && RegClassForVT[VT.getSimpleVT()] != 0;
148  }
149
150  class ValueTypeActionImpl {
151    /// ValueTypeActions - This is a bitvector that contains two bits for each
152    /// value type, where the two bits correspond to the LegalizeAction enum.
153    /// This can be queried with "getTypeAction(VT)".
154    uint32_t ValueTypeActions[2];
155  public:
156    ValueTypeActionImpl() {
157      ValueTypeActions[0] = ValueTypeActions[1] = 0;
158    }
159    ValueTypeActionImpl(const ValueTypeActionImpl &RHS) {
160      ValueTypeActions[0] = RHS.ValueTypeActions[0];
161      ValueTypeActions[1] = RHS.ValueTypeActions[1];
162    }
163
164    LegalizeAction getTypeAction(MVT VT) const {
165      if (VT.isExtended()) {
166        if (VT.isVector()) return Expand;
167        if (VT.isInteger())
168          // First promote to a power-of-two size, then expand if necessary.
169          return VT == VT.getRoundIntegerType() ? Expand : Promote;
170        assert(0 && "Unsupported extended type!");
171        return Legal;
172      }
173      unsigned I = VT.getSimpleVT();
174      assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
175      return (LegalizeAction)((ValueTypeActions[I>>4] >> ((2*I) & 31)) & 3);
176    }
177    void setTypeAction(MVT VT, LegalizeAction Action) {
178      unsigned I = VT.getSimpleVT();
179      assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
180      ValueTypeActions[I>>4] |= Action << ((I*2) & 31);
181    }
182  };
183
184  const ValueTypeActionImpl &getValueTypeActions() const {
185    return ValueTypeActions;
186  }
187
188  /// getTypeAction - Return how we should legalize values of this type, either
189  /// it is already legal (return 'Legal') or we need to promote it to a larger
190  /// type (return 'Promote'), or we need to expand it into multiple registers
191  /// of smaller integer type (return 'Expand').  'Custom' is not an option.
192  LegalizeAction getTypeAction(MVT VT) const {
193    return ValueTypeActions.getTypeAction(VT);
194  }
195
196  /// getTypeToTransformTo - For types supported by the target, this is an
197  /// identity function.  For types that must be promoted to larger types, this
198  /// returns the larger type to promote to.  For integer types that are larger
199  /// than the largest integer register, this contains one step in the expansion
200  /// to get to the smaller register. For illegal floating point types, this
201  /// returns the integer type to transform to.
202  MVT getTypeToTransformTo(MVT VT) const {
203    if (VT.isSimple()) {
204      assert((unsigned)VT.getSimpleVT() < array_lengthof(TransformToType));
205      MVT NVT = TransformToType[VT.getSimpleVT()];
206      assert(getTypeAction(NVT) != Promote &&
207             "Promote may not follow Expand or Promote");
208      return NVT;
209    }
210
211    if (VT.isVector())
212      return MVT::getVectorVT(VT.getVectorElementType(),
213                              VT.getVectorNumElements() / 2);
214    if (VT.isInteger()) {
215      MVT NVT = VT.getRoundIntegerType();
216      if (NVT == VT)
217        // Size is a power of two - expand to half the size.
218        return MVT::getIntegerVT(VT.getSizeInBits() / 2);
219      else
220        // Promote to a power of two size, avoiding multi-step promotion.
221        return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT;
222    }
223    assert(0 && "Unsupported extended type!");
224    return MVT(); // Not reached
225  }
226
227  /// getTypeToExpandTo - For types supported by the target, this is an
228  /// identity function.  For types that must be expanded (i.e. integer types
229  /// that are larger than the largest integer register or illegal floating
230  /// point types), this returns the largest legal type it will be expanded to.
231  MVT getTypeToExpandTo(MVT VT) const {
232    assert(!VT.isVector());
233    while (true) {
234      switch (getTypeAction(VT)) {
235      case Legal:
236        return VT;
237      case Expand:
238        VT = getTypeToTransformTo(VT);
239        break;
240      default:
241        assert(false && "Type is not legal nor is it to be expanded!");
242        return VT;
243      }
244    }
245    return VT;
246  }
247
248  /// getVectorTypeBreakdown - Vector types are broken down into some number of
249  /// legal first class types.  For example, MVT::v8f32 maps to 2 MVT::v4f32
250  /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
251  /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
252  ///
253  /// This method returns the number of registers needed, and the VT for each
254  /// register.  It also returns the VT and quantity of the intermediate values
255  /// before they are promoted/expanded.
256  ///
257  unsigned getVectorTypeBreakdown(MVT VT,
258                                  MVT &IntermediateVT,
259                                  unsigned &NumIntermediates,
260                                  MVT &RegisterVT) const;
261
262  typedef std::vector<APFloat>::const_iterator legal_fpimm_iterator;
263  legal_fpimm_iterator legal_fpimm_begin() const {
264    return LegalFPImmediates.begin();
265  }
266  legal_fpimm_iterator legal_fpimm_end() const {
267    return LegalFPImmediates.end();
268  }
269
270  /// isShuffleMaskLegal - Targets can use this to indicate that they only
271  /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
272  /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
273  /// are assumed to be legal.
274  virtual bool isShuffleMaskLegal(SDValue Mask, MVT VT) const {
275    return true;
276  }
277
278  /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
279  /// used by Targets can use this to indicate if there is a suitable
280  /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
281  /// pool entry.
282  virtual bool isVectorClearMaskLegal(const std::vector<SDValue> &BVOps,
283                                      MVT EVT,
284                                      SelectionDAG &DAG) const {
285    return false;
286  }
287
288  /// getOperationAction - Return how this operation should be treated: either
289  /// it is legal, needs to be promoted to a larger size, needs to be
290  /// expanded to some other code sequence, or the target has a custom expander
291  /// for it.
292  LegalizeAction getOperationAction(unsigned Op, MVT VT) const {
293    if (VT.isExtended()) return Expand;
294    assert(Op < array_lengthof(OpActions) &&
295           (unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 &&
296           "Table isn't big enough!");
297    return (LegalizeAction)((OpActions[Op] >> (2*VT.getSimpleVT())) & 3);
298  }
299
300  /// isOperationLegal - Return true if the specified operation is legal on this
301  /// target.
302  bool isOperationLegal(unsigned Op, MVT VT) const {
303    return (VT == MVT::Other || isTypeLegal(VT)) &&
304      (getOperationAction(Op, VT) == Legal ||
305       getOperationAction(Op, VT) == Custom);
306  }
307
308  /// getLoadXAction - Return how this load with extension should be treated:
309  /// either it is legal, needs to be promoted to a larger size, needs to be
310  /// expanded to some other code sequence, or the target has a custom expander
311  /// for it.
312  LegalizeAction getLoadXAction(unsigned LType, MVT VT) const {
313    assert(LType < array_lengthof(LoadXActions) &&
314           (unsigned)VT.getSimpleVT() < sizeof(LoadXActions[0])*4 &&
315           "Table isn't big enough!");
316    return (LegalizeAction)((LoadXActions[LType] >> (2*VT.getSimpleVT())) & 3);
317  }
318
319  /// isLoadXLegal - Return true if the specified load with extension is legal
320  /// on this target.
321  bool isLoadXLegal(unsigned LType, MVT VT) const {
322    return VT.isSimple() &&
323      (getLoadXAction(LType, VT) == Legal ||
324       getLoadXAction(LType, VT) == Custom);
325  }
326
327  /// getTruncStoreAction - Return how this store with truncation should be
328  /// treated: either it is legal, needs to be promoted to a larger size, needs
329  /// to be expanded to some other code sequence, or the target has a custom
330  /// expander for it.
331  LegalizeAction getTruncStoreAction(MVT ValVT,
332                                     MVT MemVT) const {
333    assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) &&
334           (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 &&
335           "Table isn't big enough!");
336    return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT()] >>
337                             (2*MemVT.getSimpleVT())) & 3);
338  }
339
340  /// isTruncStoreLegal - Return true if the specified store with truncation is
341  /// legal on this target.
342  bool isTruncStoreLegal(MVT ValVT, MVT MemVT) const {
343    return isTypeLegal(ValVT) && MemVT.isSimple() &&
344      (getTruncStoreAction(ValVT, MemVT) == Legal ||
345       getTruncStoreAction(ValVT, MemVT) == Custom);
346  }
347
348  /// getIndexedLoadAction - Return how the indexed load should be treated:
349  /// either it is legal, needs to be promoted to a larger size, needs to be
350  /// expanded to some other code sequence, or the target has a custom expander
351  /// for it.
352  LegalizeAction
353  getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
354    assert(IdxMode < array_lengthof(IndexedModeActions[0]) &&
355           (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0][0])*4 &&
356           "Table isn't big enough!");
357    return (LegalizeAction)((IndexedModeActions[0][IdxMode] >>
358                             (2*VT.getSimpleVT())) & 3);
359  }
360
361  /// isIndexedLoadLegal - Return true if the specified indexed load is legal
362  /// on this target.
363  bool isIndexedLoadLegal(unsigned IdxMode, MVT VT) const {
364    return VT.isSimple() &&
365      (getIndexedLoadAction(IdxMode, VT) == Legal ||
366       getIndexedLoadAction(IdxMode, VT) == Custom);
367  }
368
369  /// getIndexedStoreAction - Return how the indexed store should be treated:
370  /// either it is legal, needs to be promoted to a larger size, needs to be
371  /// expanded to some other code sequence, or the target has a custom expander
372  /// for it.
373  LegalizeAction
374  getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
375    assert(IdxMode < array_lengthof(IndexedModeActions[1]) &&
376           (unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 &&
377           "Table isn't big enough!");
378    return (LegalizeAction)((IndexedModeActions[1][IdxMode] >>
379                             (2*VT.getSimpleVT())) & 3);
380  }
381
382  /// isIndexedStoreLegal - Return true if the specified indexed load is legal
383  /// on this target.
384  bool isIndexedStoreLegal(unsigned IdxMode, MVT VT) const {
385    return VT.isSimple() &&
386      (getIndexedStoreAction(IdxMode, VT) == Legal ||
387       getIndexedStoreAction(IdxMode, VT) == Custom);
388  }
389
390  /// getConvertAction - Return how the conversion should be treated:
391  /// either it is legal, needs to be promoted to a larger size, needs to be
392  /// expanded to some other code sequence, or the target has a custom expander
393  /// for it.
394  LegalizeAction
395  getConvertAction(MVT FromVT, MVT ToVT) const {
396    assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) &&
397           (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 &&
398           "Table isn't big enough!");
399    return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT()] >>
400                             (2*ToVT.getSimpleVT())) & 3);
401  }
402
403  /// isConvertLegal - Return true if the specified conversion is legal
404  /// on this target.
405  bool isConvertLegal(MVT FromVT, MVT ToVT) const {
406    return isTypeLegal(FromVT) && isTypeLegal(ToVT) &&
407      (getConvertAction(FromVT, ToVT) == Legal ||
408       getConvertAction(FromVT, ToVT) == Custom);
409  }
410
411  /// getTypeToPromoteTo - If the action for this operation is to promote, this
412  /// method returns the ValueType to promote to.
413  MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
414    assert(getOperationAction(Op, VT) == Promote &&
415           "This operation isn't promoted!");
416
417    // See if this has an explicit type specified.
418    std::map<std::pair<unsigned, MVT::SimpleValueType>,
419             MVT::SimpleValueType>::const_iterator PTTI =
420      PromoteToType.find(std::make_pair(Op, VT.getSimpleVT()));
421    if (PTTI != PromoteToType.end()) return PTTI->second;
422
423    assert((VT.isInteger() || VT.isFloatingPoint()) &&
424           "Cannot autopromote this type, add it with AddPromotedToType.");
425
426    MVT NVT = VT;
427    do {
428      NVT = (MVT::SimpleValueType)(NVT.getSimpleVT()+1);
429      assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
430             "Didn't find type to promote to!");
431    } while (!isTypeLegal(NVT) ||
432              getOperationAction(Op, NVT) == Promote);
433    return NVT;
434  }
435
436  /// getValueType - Return the MVT corresponding to this LLVM type.
437  /// This is fixed by the LLVM operations except for the pointer size.  If
438  /// AllowUnknown is true, this will return MVT::Other for types with no MVT
439  /// counterpart (e.g. structs), otherwise it will assert.
440  MVT getValueType(const Type *Ty, bool AllowUnknown = false) const {
441    MVT VT = MVT::getMVT(Ty, AllowUnknown);
442    return VT == MVT::iPTR ? PointerTy : VT;
443  }
444
445  /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
446  /// function arguments in the caller parameter area.  This is the actual
447  /// alignment, not its logarithm.
448  virtual unsigned getByValTypeAlignment(const Type *Ty) const;
449
450  /// getRegisterType - Return the type of registers that this ValueType will
451  /// eventually require.
452  MVT getRegisterType(MVT VT) const {
453    if (VT.isSimple()) {
454      assert((unsigned)VT.getSimpleVT() < array_lengthof(RegisterTypeForVT));
455      return RegisterTypeForVT[VT.getSimpleVT()];
456    }
457    if (VT.isVector()) {
458      MVT VT1, RegisterVT;
459      unsigned NumIntermediates;
460      (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT);
461      return RegisterVT;
462    }
463    if (VT.isInteger()) {
464      return getRegisterType(getTypeToTransformTo(VT));
465    }
466    assert(0 && "Unsupported extended type!");
467    return MVT(); // Not reached
468  }
469
470  /// getNumRegisters - Return the number of registers that this ValueType will
471  /// eventually require.  This is one for any types promoted to live in larger
472  /// registers, but may be more than one for types (like i64) that are split
473  /// into pieces.  For types like i140, which are first promoted then expanded,
474  /// it is the number of registers needed to hold all the bits of the original
475  /// type.  For an i140 on a 32 bit machine this means 5 registers.
476  unsigned getNumRegisters(MVT VT) const {
477    if (VT.isSimple()) {
478      assert((unsigned)VT.getSimpleVT() < array_lengthof(NumRegistersForVT));
479      return NumRegistersForVT[VT.getSimpleVT()];
480    }
481    if (VT.isVector()) {
482      MVT VT1, VT2;
483      unsigned NumIntermediates;
484      return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2);
485    }
486    if (VT.isInteger()) {
487      unsigned BitWidth = VT.getSizeInBits();
488      unsigned RegWidth = getRegisterType(VT).getSizeInBits();
489      return (BitWidth + RegWidth - 1) / RegWidth;
490    }
491    assert(0 && "Unsupported extended type!");
492    return 0; // Not reached
493  }
494
495  /// ShouldShrinkFPConstant - If true, then instruction selection should
496  /// seek to shrink the FP constant of the specified type to a smaller type
497  /// in order to save space and / or reduce runtime.
498  virtual bool ShouldShrinkFPConstant(MVT VT) const { return true; }
499
500  /// hasTargetDAGCombine - If true, the target has custom DAG combine
501  /// transformations that it can perform for the specified node.
502  bool hasTargetDAGCombine(ISD::NodeType NT) const {
503    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
504    return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
505  }
506
507  /// This function returns the maximum number of store operations permitted
508  /// to replace a call to llvm.memset. The value is set by the target at the
509  /// performance threshold for such a replacement.
510  /// @brief Get maximum # of store operations permitted for llvm.memset
511  unsigned getMaxStoresPerMemset() const { return maxStoresPerMemset; }
512
513  /// This function returns the maximum number of store operations permitted
514  /// to replace a call to llvm.memcpy. The value is set by the target at the
515  /// performance threshold for such a replacement.
516  /// @brief Get maximum # of store operations permitted for llvm.memcpy
517  unsigned getMaxStoresPerMemcpy() const { return maxStoresPerMemcpy; }
518
519  /// This function returns the maximum number of store operations permitted
520  /// to replace a call to llvm.memmove. The value is set by the target at the
521  /// performance threshold for such a replacement.
522  /// @brief Get maximum # of store operations permitted for llvm.memmove
523  unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; }
524
525  /// This function returns true if the target allows unaligned memory accesses.
526  /// This is used, for example, in situations where an array copy/move/set is
527  /// converted to a sequence of store operations. It's use helps to ensure that
528  /// such replacements don't generate code that causes an alignment error
529  /// (trap) on the target machine.
530  /// @brief Determine if the target supports unaligned memory accesses.
531  bool allowsUnalignedMemoryAccesses() const {
532    return allowUnalignedMemoryAccesses;
533  }
534
535  /// getOptimalMemOpType - Returns the target specific optimal type for load
536  /// and store operations as a result of memset, memcpy, and memmove lowering.
537  /// It returns MVT::iAny if SelectionDAG should be responsible for
538  /// determining it.
539  virtual MVT getOptimalMemOpType(uint64_t Size, unsigned Align,
540                                  bool isSrcConst, bool isSrcStr) const {
541    return MVT::iAny;
542  }
543
544  /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
545  /// to implement llvm.setjmp.
546  bool usesUnderscoreSetJmp() const {
547    return UseUnderscoreSetJmp;
548  }
549
550  /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp
551  /// to implement llvm.longjmp.
552  bool usesUnderscoreLongJmp() const {
553    return UseUnderscoreLongJmp;
554  }
555
556  /// getStackPointerRegisterToSaveRestore - If a physical register, this
557  /// specifies the register that llvm.savestack/llvm.restorestack should save
558  /// and restore.
559  unsigned getStackPointerRegisterToSaveRestore() const {
560    return StackPointerRegisterToSaveRestore;
561  }
562
563  /// getExceptionAddressRegister - If a physical register, this returns
564  /// the register that receives the exception address on entry to a landing
565  /// pad.
566  unsigned getExceptionAddressRegister() const {
567    return ExceptionPointerRegister;
568  }
569
570  /// getExceptionSelectorRegister - If a physical register, this returns
571  /// the register that receives the exception typeid on entry to a landing
572  /// pad.
573  unsigned getExceptionSelectorRegister() const {
574    return ExceptionSelectorRegister;
575  }
576
577  /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never
578  /// set, the default is 200)
579  unsigned getJumpBufSize() const {
580    return JumpBufSize;
581  }
582
583  /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes
584  /// (if never set, the default is 0)
585  unsigned getJumpBufAlignment() const {
586    return JumpBufAlignment;
587  }
588
589  /// getIfCvtBlockLimit - returns the target specific if-conversion block size
590  /// limit. Any block whose size is greater should not be predicated.
591  unsigned getIfCvtBlockSizeLimit() const {
592    return IfCvtBlockSizeLimit;
593  }
594
595  /// getIfCvtDupBlockLimit - returns the target specific size limit for a
596  /// block to be considered for duplication. Any block whose size is greater
597  /// should not be duplicated to facilitate its predication.
598  unsigned getIfCvtDupBlockSizeLimit() const {
599    return IfCvtDupBlockSizeLimit;
600  }
601
602  /// getPrefLoopAlignment - return the preferred loop alignment.
603  ///
604  unsigned getPrefLoopAlignment() const {
605    return PrefLoopAlignment;
606  }
607
608  /// getPreIndexedAddressParts - returns true by value, base pointer and
609  /// offset pointer and addressing mode by reference if the node's address
610  /// can be legally represented as pre-indexed load / store address.
611  virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
612                                         SDValue &Offset,
613                                         ISD::MemIndexedMode &AM,
614                                         SelectionDAG &DAG) {
615    return false;
616  }
617
618  /// getPostIndexedAddressParts - returns true by value, base pointer and
619  /// offset pointer and addressing mode by reference if this node can be
620  /// combined with a load / store to form a post-indexed load / store.
621  virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
622                                          SDValue &Base, SDValue &Offset,
623                                          ISD::MemIndexedMode &AM,
624                                          SelectionDAG &DAG) {
625    return false;
626  }
627
628  /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
629  /// jumptable.
630  virtual SDValue getPICJumpTableRelocBase(SDValue Table,
631                                             SelectionDAG &DAG) const;
632
633  //===--------------------------------------------------------------------===//
634  // TargetLowering Optimization Methods
635  //
636
637  /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
638  /// SDValues for returning information from TargetLowering to its clients
639  /// that want to combine
640  struct TargetLoweringOpt {
641    SelectionDAG &DAG;
642    bool AfterLegalize;
643    SDValue Old;
644    SDValue New;
645
646    explicit TargetLoweringOpt(SelectionDAG &InDAG, bool afterLegalize)
647      : DAG(InDAG), AfterLegalize(afterLegalize) {}
648
649    bool CombineTo(SDValue O, SDValue N) {
650      Old = O;
651      New = N;
652      return true;
653    }
654
655    /// ShrinkDemandedConstant - Check to see if the specified operand of the
656    /// specified instruction is a constant integer.  If so, check to see if
657    /// there are any bits set in the constant that are not demanded.  If so,
658    /// shrink the constant and return true.
659    bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
660  };
661
662  /// SimplifyDemandedBits - Look at Op.  At this point, we know that only the
663  /// DemandedMask bits of the result of Op are ever used downstream.  If we can
664  /// use this information to simplify Op, create a new simplified DAG node and
665  /// return true, returning the original and new nodes in Old and New.
666  /// Otherwise, analyze the expression and return a mask of KnownOne and
667  /// KnownZero bits for the expression (used to simplify the caller).
668  /// The KnownZero/One bits may only be accurate for those bits in the
669  /// DemandedMask.
670  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
671                            APInt &KnownZero, APInt &KnownOne,
672                            TargetLoweringOpt &TLO, unsigned Depth = 0) const;
673
674  /// computeMaskedBitsForTargetNode - Determine which of the bits specified in
675  /// Mask are known to be either zero or one and return them in the
676  /// KnownZero/KnownOne bitsets.
677  virtual void computeMaskedBitsForTargetNode(const SDValue Op,
678                                              const APInt &Mask,
679                                              APInt &KnownZero,
680                                              APInt &KnownOne,
681                                              const SelectionDAG &DAG,
682                                              unsigned Depth = 0) const;
683
684  /// ComputeNumSignBitsForTargetNode - This method can be implemented by
685  /// targets that want to expose additional information about sign bits to the
686  /// DAG Combiner.
687  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
688                                                   unsigned Depth = 0) const;
689
690  struct DAGCombinerInfo {
691    void *DC;  // The DAG Combiner object.
692    bool BeforeLegalize;
693    bool CalledByLegalizer;
694  public:
695    SelectionDAG &DAG;
696
697    DAGCombinerInfo(SelectionDAG &dag, bool bl, bool cl, void *dc)
698      : DC(dc), BeforeLegalize(bl), CalledByLegalizer(cl), DAG(dag) {}
699
700    bool isBeforeLegalize() const { return BeforeLegalize; }
701    bool isCalledByLegalizer() const { return CalledByLegalizer; }
702
703    void AddToWorklist(SDNode *N);
704    SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To);
705    SDValue CombineTo(SDNode *N, SDValue Res);
706    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1);
707  };
708
709  /// SimplifySetCC - Try to simplify a setcc built with the specified operands
710  /// and cc. If it is unable to simplify it, return a null SDValue.
711  SDValue SimplifySetCC(MVT VT, SDValue N0, SDValue N1,
712                          ISD::CondCode Cond, bool foldBooleans,
713                          DAGCombinerInfo &DCI) const;
714
715  /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
716  /// node is a GlobalAddress + offset.
717  virtual bool
718  isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const;
719
720  /// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is
721  /// loading 'Bytes' bytes from a location that is 'Dist' units away from the
722  /// location that the 'Base' load is loading from.
723  bool isConsecutiveLoad(SDNode *LD, SDNode *Base, unsigned Bytes, int Dist,
724                         const MachineFrameInfo *MFI) const;
725
726  /// PerformDAGCombine - This method will be invoked for all target nodes and
727  /// for any target-independent nodes that the target has registered with
728  /// invoke it for.
729  ///
730  /// The semantics are as follows:
731  /// Return Value:
732  ///   SDValue.Val == 0   - No change was made
733  ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
734  ///   otherwise            - N should be replaced by the returned Operand.
735  ///
736  /// In addition, methods provided by DAGCombinerInfo may be used to perform
737  /// more complex transformations.
738  ///
739  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
740
741  //===--------------------------------------------------------------------===//
742  // TargetLowering Configuration Methods - These methods should be invoked by
743  // the derived class constructor to configure this object for the target.
744  //
745
746protected:
747  /// setUsesGlobalOffsetTable - Specify that this target does or doesn't use a
748  /// GOT for PC-relative code.
749  void setUsesGlobalOffsetTable(bool V) { UsesGlobalOffsetTable = V; }
750
751  /// setShiftAmountType - Describe the type that should be used for shift
752  /// amounts.  This type defaults to the pointer type.
753  void setShiftAmountType(MVT VT) { ShiftAmountTy = VT; }
754
755  /// setSetCCResultContents - Specify how the target extends the result of a
756  /// setcc operation in a register.
757  void setSetCCResultContents(SetCCResultValue Ty) { SetCCResultContents = Ty; }
758
759  /// setSchedulingPreference - Specify the target scheduling preference.
760  void setSchedulingPreference(SchedPreference Pref) {
761    SchedPreferenceInfo = Pref;
762  }
763
764  /// setShiftAmountFlavor - Describe how the target handles out of range shift
765  /// amounts.
766  void setShiftAmountFlavor(OutOfRangeShiftAmount OORSA) {
767    ShiftAmtHandling = OORSA;
768  }
769
770  /// setUseUnderscoreSetJmp - Indicate whether this target prefers to
771  /// use _setjmp to implement llvm.setjmp or the non _ version.
772  /// Defaults to false.
773  void setUseUnderscoreSetJmp(bool Val) {
774    UseUnderscoreSetJmp = Val;
775  }
776
777  /// setUseUnderscoreLongJmp - Indicate whether this target prefers to
778  /// use _longjmp to implement llvm.longjmp or the non _ version.
779  /// Defaults to false.
780  void setUseUnderscoreLongJmp(bool Val) {
781    UseUnderscoreLongJmp = Val;
782  }
783
784  /// setStackPointerRegisterToSaveRestore - If set to a physical register, this
785  /// specifies the register that llvm.savestack/llvm.restorestack should save
786  /// and restore.
787  void setStackPointerRegisterToSaveRestore(unsigned R) {
788    StackPointerRegisterToSaveRestore = R;
789  }
790
791  /// setExceptionPointerRegister - If set to a physical register, this sets
792  /// the register that receives the exception address on entry to a landing
793  /// pad.
794  void setExceptionPointerRegister(unsigned R) {
795    ExceptionPointerRegister = R;
796  }
797
798  /// setExceptionSelectorRegister - If set to a physical register, this sets
799  /// the register that receives the exception typeid on entry to a landing
800  /// pad.
801  void setExceptionSelectorRegister(unsigned R) {
802    ExceptionSelectorRegister = R;
803  }
804
805  /// SelectIsExpensive - Tells the code generator not to expand operations
806  /// into sequences that use the select operations if possible.
807  void setSelectIsExpensive() { SelectIsExpensive = true; }
808
809  /// setIntDivIsCheap - Tells the code generator that integer divide is
810  /// expensive, and if possible, should be replaced by an alternate sequence
811  /// of instructions not containing an integer divide.
812  void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
813
814  /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
815  /// srl/add/sra for a signed divide by power of two, and let the target handle
816  /// it.
817  void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
818
819  /// addRegisterClass - Add the specified register class as an available
820  /// regclass for the specified value type.  This indicates the selector can
821  /// handle values of that class natively.
822  void addRegisterClass(MVT VT, TargetRegisterClass *RC) {
823    assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT));
824    AvailableRegClasses.push_back(std::make_pair(VT, RC));
825    RegClassForVT[VT.getSimpleVT()] = RC;
826  }
827
828  /// computeRegisterProperties - Once all of the register classes are added,
829  /// this allows us to compute derived properties we expose.
830  void computeRegisterProperties();
831
832  /// setOperationAction - Indicate that the specified operation does not work
833  /// with the specified type and indicate what to do about it.
834  void setOperationAction(unsigned Op, MVT VT,
835                          LegalizeAction Action) {
836    assert((unsigned)VT.getSimpleVT() < sizeof(OpActions[0])*4 &&
837           Op < array_lengthof(OpActions) && "Table isn't big enough!");
838    OpActions[Op] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2);
839    OpActions[Op] |= (uint64_t)Action << VT.getSimpleVT()*2;
840  }
841
842  /// setLoadXAction - Indicate that the specified load with extension does not
843  /// work with the with specified type and indicate what to do about it.
844  void setLoadXAction(unsigned ExtType, MVT VT,
845                      LegalizeAction Action) {
846    assert((unsigned)VT.getSimpleVT() < sizeof(LoadXActions[0])*4 &&
847           ExtType < array_lengthof(LoadXActions) &&
848           "Table isn't big enough!");
849    LoadXActions[ExtType] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2);
850    LoadXActions[ExtType] |= (uint64_t)Action << VT.getSimpleVT()*2;
851  }
852
853  /// setTruncStoreAction - Indicate that the specified truncating store does
854  /// not work with the with specified type and indicate what to do about it.
855  void setTruncStoreAction(MVT ValVT, MVT MemVT,
856                           LegalizeAction Action) {
857    assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) &&
858           (unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 &&
859           "Table isn't big enough!");
860    TruncStoreActions[ValVT.getSimpleVT()] &= ~(uint64_t(3UL) <<
861                                                MemVT.getSimpleVT()*2);
862    TruncStoreActions[ValVT.getSimpleVT()] |= (uint64_t)Action <<
863      MemVT.getSimpleVT()*2;
864  }
865
866  /// setIndexedLoadAction - Indicate that the specified indexed load does or
867  /// does not work with the with specified type and indicate what to do abort
868  /// it. NOTE: All indexed mode loads are initialized to Expand in
869  /// TargetLowering.cpp
870  void setIndexedLoadAction(unsigned IdxMode, MVT VT,
871                            LegalizeAction Action) {
872    assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[0])*4 &&
873           IdxMode < array_lengthof(IndexedModeActions[0]) &&
874           "Table isn't big enough!");
875    IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2);
876    IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2;
877  }
878
879  /// setIndexedStoreAction - Indicate that the specified indexed store does or
880  /// does not work with the with specified type and indicate what to do about
881  /// it. NOTE: All indexed mode stores are initialized to Expand in
882  /// TargetLowering.cpp
883  void setIndexedStoreAction(unsigned IdxMode, MVT VT,
884                             LegalizeAction Action) {
885    assert((unsigned)VT.getSimpleVT() < sizeof(IndexedModeActions[1][0])*4 &&
886           IdxMode < array_lengthof(IndexedModeActions[1]) &&
887           "Table isn't big enough!");
888    IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT.getSimpleVT()*2);
889    IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT.getSimpleVT()*2;
890  }
891
892  /// setConvertAction - Indicate that the specified conversion does or does
893  /// not work with the with specified type and indicate what to do about it.
894  void setConvertAction(MVT FromVT, MVT ToVT,
895                        LegalizeAction Action) {
896    assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) &&
897           (unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 &&
898           "Table isn't big enough!");
899    ConvertActions[FromVT.getSimpleVT()] &= ~(uint64_t(3UL) <<
900                                              ToVT.getSimpleVT()*2);
901    ConvertActions[FromVT.getSimpleVT()] |= (uint64_t)Action <<
902      ToVT.getSimpleVT()*2;
903  }
904
905  /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
906  /// promotion code defaults to trying a larger integer/fp until it can find
907  /// one that works.  If that default is insufficient, this method can be used
908  /// by the target to override the default.
909  void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
910    PromoteToType[std::make_pair(Opc, OrigVT.getSimpleVT())] =
911      DestVT.getSimpleVT();
912  }
913
914  /// addLegalFPImmediate - Indicate that this target can instruction select
915  /// the specified FP immediate natively.
916  void addLegalFPImmediate(const APFloat& Imm) {
917    LegalFPImmediates.push_back(Imm);
918  }
919
920  /// setTargetDAGCombine - Targets should invoke this method for each target
921  /// independent node that they want to provide a custom DAG combiner for by
922  /// implementing the PerformDAGCombine virtual method.
923  void setTargetDAGCombine(ISD::NodeType NT) {
924    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
925    TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
926  }
927
928  /// setJumpBufSize - Set the target's required jmp_buf buffer size (in
929  /// bytes); default is 200
930  void setJumpBufSize(unsigned Size) {
931    JumpBufSize = Size;
932  }
933
934  /// setJumpBufAlignment - Set the target's required jmp_buf buffer
935  /// alignment (in bytes); default is 0
936  void setJumpBufAlignment(unsigned Align) {
937    JumpBufAlignment = Align;
938  }
939
940  /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size
941  /// limit (in number of instructions); default is 2.
942  void setIfCvtBlockSizeLimit(unsigned Limit) {
943    IfCvtBlockSizeLimit = Limit;
944  }
945
946  /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number
947  /// of instructions) to be considered for code duplication during
948  /// if-conversion; default is 2.
949  void setIfCvtDupBlockSizeLimit(unsigned Limit) {
950    IfCvtDupBlockSizeLimit = Limit;
951  }
952
953  /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
954  /// alignment is zero, it means the target does not care about loop alignment.
955  void setPrefLoopAlignment(unsigned Align) {
956    PrefLoopAlignment = Align;
957  }
958
959public:
960
961  virtual const TargetSubtarget *getSubtarget() {
962    assert(0 && "Not Implemented");
963    return NULL;    // this is here to silence compiler errors
964  }
965  //===--------------------------------------------------------------------===//
966  // Lowering methods - These methods must be implemented by targets so that
967  // the SelectionDAGLowering code knows how to lower these.
968  //
969
970  /// LowerArguments - This hook must be implemented to indicate how we should
971  /// lower the arguments for the specified function, into the specified DAG.
972  virtual void
973  LowerArguments(Function &F, SelectionDAG &DAG,
974                 SmallVectorImpl<SDValue>& ArgValues);
975
976  /// LowerCallTo - This hook lowers an abstract call to a function into an
977  /// actual call.  This returns a pair of operands.  The first element is the
978  /// return value for the function (if RetTy is not VoidTy).  The second
979  /// element is the outgoing token chain.
980  struct ArgListEntry {
981    SDValue Node;
982    const Type* Ty;
983    bool isSExt  : 1;
984    bool isZExt  : 1;
985    bool isInReg : 1;
986    bool isSRet  : 1;
987    bool isNest  : 1;
988    bool isByVal : 1;
989    uint16_t Alignment;
990
991    ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
992      isSRet(false), isNest(false), isByVal(false), Alignment(0) { }
993  };
994  typedef std::vector<ArgListEntry> ArgListTy;
995  virtual std::pair<SDValue, SDValue>
996  LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt,
997              bool isVarArg, bool isInreg, unsigned CallingConv,
998              bool isTailCall, SDValue Callee, ArgListTy &Args,
999              SelectionDAG &DAG);
1000
1001  /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a
1002  /// memcpy. This can be used by targets to provide code sequences for cases
1003  /// that don't fit the target's parameters for simple loads/stores and can be
1004  /// more efficient than using a library call. This function can return a null
1005  /// SDValue if the target declines to use custom code and a different
1006  /// lowering strategy should be used.
1007  ///
1008  /// If AlwaysInline is true, the size is constant and the target should not
1009  /// emit any calls and is strongly encouraged to attempt to emit inline code
1010  /// even if it is beyond the usual threshold because this intrinsic is being
1011  /// expanded in a place where calls are not feasible (e.g. within the prologue
1012  /// for another call). If the target chooses to decline an AlwaysInline
1013  /// request here, legalize will resort to using simple loads and stores.
1014  virtual SDValue
1015  EmitTargetCodeForMemcpy(SelectionDAG &DAG,
1016                          SDValue Chain,
1017                          SDValue Op1, SDValue Op2,
1018                          SDValue Op3, unsigned Align,
1019                          bool AlwaysInline,
1020                          const Value *DstSV, uint64_t DstOff,
1021                          const Value *SrcSV, uint64_t SrcOff) {
1022    return SDValue();
1023  }
1024
1025  /// EmitTargetCodeForMemmove - Emit target-specific code that performs a
1026  /// memmove. This can be used by targets to provide code sequences for cases
1027  /// that don't fit the target's parameters for simple loads/stores and can be
1028  /// more efficient than using a library call. This function can return a null
1029  /// SDValue if the target declines to use custom code and a different
1030  /// lowering strategy should be used.
1031  virtual SDValue
1032  EmitTargetCodeForMemmove(SelectionDAG &DAG,
1033                           SDValue Chain,
1034                           SDValue Op1, SDValue Op2,
1035                           SDValue Op3, unsigned Align,
1036                           const Value *DstSV, uint64_t DstOff,
1037                           const Value *SrcSV, uint64_t SrcOff) {
1038    return SDValue();
1039  }
1040
1041  /// EmitTargetCodeForMemset - Emit target-specific code that performs a
1042  /// memset. This can be used by targets to provide code sequences for cases
1043  /// that don't fit the target's parameters for simple stores and can be more
1044  /// efficient than using a library call. This function can return a null
1045  /// SDValue if the target declines to use custom code and a different
1046  /// lowering strategy should be used.
1047  virtual SDValue
1048  EmitTargetCodeForMemset(SelectionDAG &DAG,
1049                          SDValue Chain,
1050                          SDValue Op1, SDValue Op2,
1051                          SDValue Op3, unsigned Align,
1052                          const Value *DstSV, uint64_t DstOff) {
1053    return SDValue();
1054  }
1055
1056  /// LowerOperation - This callback is invoked for operations that are
1057  /// unsupported by the target, which are registered to use 'custom' lowering,
1058  /// and whose defined values are all legal.
1059  /// If the target has no operations that require custom lowering, it need not
1060  /// implement this.  The default implementation of this aborts.
1061  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
1062
1063  /// ReplaceNodeResults - This callback is invoked for operations that are
1064  /// unsupported by the target, which are registered to use 'custom' lowering,
1065  /// and whose result type is illegal.  This must return a node whose results
1066  /// precisely match the results of the input node.  This typically involves a
1067  /// MERGE_VALUES node and/or BUILD_PAIR.
1068  ///
1069  /// If the target has no operations that require custom lowering, it need not
1070  /// implement this.  The default implementation aborts.
1071  virtual SDNode *ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) {
1072    assert(0 && "ReplaceNodeResults not implemented for this target!");
1073    return 0;
1074  }
1075
1076  /// IsEligibleForTailCallOptimization - Check whether the call is eligible for
1077  /// tail call optimization. Targets which want to do tail call optimization
1078  /// should override this function.
1079  virtual bool IsEligibleForTailCallOptimization(CallSDNode *Call,
1080                                                 SDValue Ret,
1081                                                 SelectionDAG &DAG) const {
1082    return false;
1083  }
1084
1085  /// CheckTailCallReturnConstraints - Check whether CALL node immediatly
1086  /// preceeds the RET node and whether the return uses the result of the node
1087  /// or is a void return. This function can be used by the target to determine
1088  /// eligiblity of tail call optimization.
1089  static bool CheckTailCallReturnConstraints(CallSDNode *TheCall, SDValue Ret) {
1090    unsigned NumOps = Ret.getNumOperands();
1091    if ((NumOps == 1 &&
1092       (Ret.getOperand(0) == SDValue(TheCall,1) ||
1093        Ret.getOperand(0) == SDValue(TheCall,0))) ||
1094      (NumOps > 1 &&
1095       Ret.getOperand(0) == SDValue(TheCall,
1096                                    TheCall->getNumValues()-1) &&
1097       Ret.getOperand(1) == SDValue(TheCall,0)))
1098      return true;
1099    return false;
1100  }
1101
1102  /// GetPossiblePreceedingTailCall - Get preceeding TailCallNodeOpCode node if
1103  /// it exists skip possible ISD:TokenFactor.
1104  static SDValue GetPossiblePreceedingTailCall(SDValue Chain,
1105                                                 unsigned TailCallNodeOpCode) {
1106    if (Chain.getOpcode() == TailCallNodeOpCode) {
1107      return Chain;
1108    } else if (Chain.getOpcode() == ISD::TokenFactor) {
1109      if (Chain.getNumOperands() &&
1110          Chain.getOperand(0).getOpcode() == TailCallNodeOpCode)
1111        return Chain.getOperand(0);
1112    }
1113    return Chain;
1114  }
1115
1116  /// getTargetNodeName() - This method returns the name of a target specific
1117  /// DAG node.
1118  virtual const char *getTargetNodeName(unsigned Opcode) const;
1119
1120  /// createFastISel - This method returns a target specific FastISel object,
1121  /// or null if the target does not support "fast" ISel.
1122  virtual FastISel *
1123  createFastISel(MachineFunction &,
1124                 MachineModuleInfo *,
1125                 DenseMap<const Value *, unsigned> &,
1126                 DenseMap<const BasicBlock *, MachineBasicBlock *> &,
1127                 DenseMap<const AllocaInst *, int> &) {
1128    return 0;
1129  }
1130
1131  //===--------------------------------------------------------------------===//
1132  // Inline Asm Support hooks
1133  //
1134
1135  enum ConstraintType {
1136    C_Register,            // Constraint represents a single register.
1137    C_RegisterClass,       // Constraint represents one or more registers.
1138    C_Memory,              // Memory constraint.
1139    C_Other,               // Something else.
1140    C_Unknown              // Unsupported constraint.
1141  };
1142
1143  /// AsmOperandInfo - This contains information for each constraint that we are
1144  /// lowering.
1145  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
1146    /// ConstraintCode - This contains the actual string for the code, like "m".
1147    std::string ConstraintCode;
1148
1149    /// ConstraintType - Information about the constraint code, e.g. Register,
1150    /// RegisterClass, Memory, Other, Unknown.
1151    TargetLowering::ConstraintType ConstraintType;
1152
1153    /// CallOperandval - If this is the result output operand or a
1154    /// clobber, this is null, otherwise it is the incoming operand to the
1155    /// CallInst.  This gets modified as the asm is processed.
1156    Value *CallOperandVal;
1157
1158    /// ConstraintVT - The ValueType for the operand value.
1159    MVT ConstraintVT;
1160
1161    AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
1162      : InlineAsm::ConstraintInfo(info),
1163        ConstraintType(TargetLowering::C_Unknown),
1164        CallOperandVal(0), ConstraintVT(MVT::Other) {
1165    }
1166  };
1167
1168  /// ComputeConstraintToUse - Determines the constraint code and constraint
1169  /// type to use for the specific AsmOperandInfo, setting
1170  /// OpInfo.ConstraintCode and OpInfo.ConstraintType.  If the actual operand
1171  /// being passed in is available, it can be passed in as Op, otherwise an
1172  /// empty SDValue can be passed. If hasMemory is true it means one of the asm
1173  /// constraint of the inline asm instruction being processed is 'm'.
1174  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
1175                                      SDValue Op,
1176                                      bool hasMemory,
1177                                      SelectionDAG *DAG = 0) const;
1178
1179  /// getConstraintType - Given a constraint, return the type of constraint it
1180  /// is for this target.
1181  virtual ConstraintType getConstraintType(const std::string &Constraint) const;
1182
1183  /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
1184  /// return a list of registers that can be used to satisfy the constraint.
1185  /// This should only be used for C_RegisterClass constraints.
1186  virtual std::vector<unsigned>
1187  getRegClassForInlineAsmConstraint(const std::string &Constraint,
1188                                    MVT VT) const;
1189
1190  /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g.
1191  /// {edx}), return the register number and the register class for the
1192  /// register.
1193  ///
1194  /// Given a register class constraint, like 'r', if this corresponds directly
1195  /// to an LLVM register class, return a register of 0 and the register class
1196  /// pointer.
1197  ///
1198  /// This should only be used for C_Register constraints.  On error,
1199  /// this returns a register number of 0 and a null register class pointer..
1200  virtual std::pair<unsigned, const TargetRegisterClass*>
1201    getRegForInlineAsmConstraint(const std::string &Constraint,
1202                                 MVT VT) const;
1203
1204  /// LowerXConstraint - try to replace an X constraint, which matches anything,
1205  /// with another that has more specific requirements based on the type of the
1206  /// corresponding operand.  This returns null if there is no replacement to
1207  /// make.
1208  virtual const char *LowerXConstraint(MVT ConstraintVT) const;
1209
1210  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
1211  /// vector.  If it is invalid, don't add anything to Ops. If hasMemory is true
1212  /// it means one of the asm constraint of the inline asm instruction being
1213  /// processed is 'm'.
1214  virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter,
1215                                            bool hasMemory,
1216                                            std::vector<SDValue> &Ops,
1217                                            SelectionDAG &DAG) const;
1218
1219  //===--------------------------------------------------------------------===//
1220  // Scheduler hooks
1221  //
1222
1223  // EmitInstrWithCustomInserter - This method should be implemented by targets
1224  // that mark instructions with the 'usesCustomDAGSchedInserter' flag.  These
1225  // instructions are special in various ways, which require special support to
1226  // insert.  The specified MachineInstr is created but not inserted into any
1227  // basic blocks, and the scheduler passes ownership of it to this method.
1228  virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
1229                                                        MachineBasicBlock *MBB);
1230
1231  //===--------------------------------------------------------------------===//
1232  // Addressing mode description hooks (used by LSR etc).
1233  //
1234
1235  /// AddrMode - This represents an addressing mode of:
1236  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1237  /// If BaseGV is null,  there is no BaseGV.
1238  /// If BaseOffs is zero, there is no base offset.
1239  /// If HasBaseReg is false, there is no base register.
1240  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
1241  /// no scale.
1242  ///
1243  struct AddrMode {
1244    GlobalValue *BaseGV;
1245    int64_t      BaseOffs;
1246    bool         HasBaseReg;
1247    int64_t      Scale;
1248    AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1249  };
1250
1251  /// isLegalAddressingMode - Return true if the addressing mode represented by
1252  /// AM is legal for this target, for a load/store of the specified type.
1253  /// TODO: Handle pre/postinc as well.
1254  virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const;
1255
1256  /// isTruncateFree - Return true if it's free to truncate a value of
1257  /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
1258  /// register EAX to i16 by referencing its sub-register AX.
1259  virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const {
1260    return false;
1261  }
1262
1263  virtual bool isTruncateFree(MVT VT1, MVT VT2) const {
1264    return false;
1265  }
1266
1267  //===--------------------------------------------------------------------===//
1268  // Div utility functions
1269  //
1270  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG,
1271                      std::vector<SDNode*>* Created) const;
1272  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG,
1273                      std::vector<SDNode*>* Created) const;
1274
1275
1276  //===--------------------------------------------------------------------===//
1277  // Runtime Library hooks
1278  //
1279
1280  /// setLibcallName - Rename the default libcall routine name for the specified
1281  /// libcall.
1282  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1283    LibcallRoutineNames[Call] = Name;
1284  }
1285
1286  /// getLibcallName - Get the libcall routine name for the specified libcall.
1287  ///
1288  const char *getLibcallName(RTLIB::Libcall Call) const {
1289    return LibcallRoutineNames[Call];
1290  }
1291
1292  /// setCmpLibcallCC - Override the default CondCode to be used to test the
1293  /// result of the comparison libcall against zero.
1294  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1295    CmpLibcallCCs[Call] = CC;
1296  }
1297
1298  /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of
1299  /// the comparison libcall against zero.
1300  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1301    return CmpLibcallCCs[Call];
1302  }
1303
1304private:
1305  TargetMachine &TM;
1306  const TargetData *TD;
1307
1308  /// PointerTy - The type to use for pointers, usually i32 or i64.
1309  ///
1310  MVT PointerTy;
1311
1312  /// IsLittleEndian - True if this is a little endian target.
1313  ///
1314  bool IsLittleEndian;
1315
1316  /// UsesGlobalOffsetTable - True if this target uses a GOT for PIC codegen.
1317  ///
1318  bool UsesGlobalOffsetTable;
1319
1320  /// SelectIsExpensive - Tells the code generator not to expand operations
1321  /// into sequences that use the select operations if possible.
1322  bool SelectIsExpensive;
1323
1324  /// IntDivIsCheap - Tells the code generator not to expand integer divides by
1325  /// constants into a sequence of muls, adds, and shifts.  This is a hack until
1326  /// a real cost model is in place.  If we ever optimize for size, this will be
1327  /// set to true unconditionally.
1328  bool IntDivIsCheap;
1329
1330  /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
1331  /// srl/add/sra for a signed divide by power of two, and let the target handle
1332  /// it.
1333  bool Pow2DivIsCheap;
1334
1335  /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
1336  /// llvm.setjmp.  Defaults to false.
1337  bool UseUnderscoreSetJmp;
1338
1339  /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement
1340  /// llvm.longjmp.  Defaults to false.
1341  bool UseUnderscoreLongJmp;
1342
1343  /// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever
1344  /// PointerTy is.
1345  MVT ShiftAmountTy;
1346
1347  OutOfRangeShiftAmount ShiftAmtHandling;
1348
1349  /// SetCCResultContents - Information about the contents of the high-bits in
1350  /// the result of a setcc comparison operation.
1351  SetCCResultValue SetCCResultContents;
1352
1353  /// SchedPreferenceInfo - The target scheduling preference: shortest possible
1354  /// total cycles or lowest register usage.
1355  SchedPreference SchedPreferenceInfo;
1356
1357  /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
1358  unsigned JumpBufSize;
1359
1360  /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
1361  /// buffers
1362  unsigned JumpBufAlignment;
1363
1364  /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be
1365  /// if-converted.
1366  unsigned IfCvtBlockSizeLimit;
1367
1368  /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be
1369  /// duplicated during if-conversion.
1370  unsigned IfCvtDupBlockSizeLimit;
1371
1372  /// PrefLoopAlignment - The perferred loop alignment.
1373  ///
1374  unsigned PrefLoopAlignment;
1375
1376  /// StackPointerRegisterToSaveRestore - If set to a physical register, this
1377  /// specifies the register that llvm.savestack/llvm.restorestack should save
1378  /// and restore.
1379  unsigned StackPointerRegisterToSaveRestore;
1380
1381  /// ExceptionPointerRegister - If set to a physical register, this specifies
1382  /// the register that receives the exception address on entry to a landing
1383  /// pad.
1384  unsigned ExceptionPointerRegister;
1385
1386  /// ExceptionSelectorRegister - If set to a physical register, this specifies
1387  /// the register that receives the exception typeid on entry to a landing
1388  /// pad.
1389  unsigned ExceptionSelectorRegister;
1390
1391  /// RegClassForVT - This indicates the default register class to use for
1392  /// each ValueType the target supports natively.
1393  TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1394  unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1395  MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
1396
1397  /// TransformToType - For any value types we are promoting or expanding, this
1398  /// contains the value type that we are changing to.  For Expanded types, this
1399  /// contains one step of the expand (e.g. i64 -> i32), even if there are
1400  /// multiple steps required (e.g. i64 -> i16).  For types natively supported
1401  /// by the system, this holds the same type (e.g. i32 -> i32).
1402  MVT TransformToType[MVT::LAST_VALUETYPE];
1403
1404  // Defines the capacity of the TargetLowering::OpActions table
1405  static const int OpActionsCapacity = 212;
1406
1407  /// OpActions - For each operation and each value type, keep a LegalizeAction
1408  /// that indicates how instruction selection should deal with the operation.
1409  /// Most operations are Legal (aka, supported natively by the target), but
1410  /// operations that are not should be described.  Note that operations on
1411  /// non-legal value types are not described here.
1412  uint64_t OpActions[OpActionsCapacity];
1413
1414  /// LoadXActions - For each load of load extension type and each value type,
1415  /// keep a LegalizeAction that indicates how instruction selection should deal
1416  /// with the load.
1417  uint64_t LoadXActions[ISD::LAST_LOADX_TYPE];
1418
1419  /// TruncStoreActions - For each truncating store, keep a LegalizeAction that
1420  /// indicates how instruction selection should deal with the store.
1421  uint64_t TruncStoreActions[MVT::LAST_VALUETYPE];
1422
1423  /// IndexedModeActions - For each indexed mode and each value type, keep a
1424  /// pair of LegalizeAction that indicates how instruction selection should
1425  /// deal with the load / store.
1426  uint64_t IndexedModeActions[2][ISD::LAST_INDEXED_MODE];
1427
1428  /// ConvertActions - For each conversion from source type to destination type,
1429  /// keep a LegalizeAction that indicates how instruction selection should
1430  /// deal with the conversion.
1431  /// Currently, this is used only for floating->floating conversions
1432  /// (FP_EXTEND and FP_ROUND).
1433  uint64_t ConvertActions[MVT::LAST_VALUETYPE];
1434
1435  ValueTypeActionImpl ValueTypeActions;
1436
1437  std::vector<APFloat> LegalFPImmediates;
1438
1439  std::vector<std::pair<MVT, TargetRegisterClass*> > AvailableRegClasses;
1440
1441  /// TargetDAGCombineArray - Targets can specify ISD nodes that they would
1442  /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(),
1443  /// which sets a bit in this array.
1444  unsigned char
1445  TargetDAGCombineArray[OpActionsCapacity/(sizeof(unsigned char)*8)];
1446
1447  /// PromoteToType - For operations that must be promoted to a specific type,
1448  /// this holds the destination type.  This map should be sparse, so don't hold
1449  /// it as an array.
1450  ///
1451  /// Targets add entries to this map with AddPromotedToType(..), clients access
1452  /// this with getTypeToPromoteTo(..).
1453  std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
1454    PromoteToType;
1455
1456  /// LibcallRoutineNames - Stores the name each libcall.
1457  ///
1458  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1459
1460  /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result
1461  /// of each of the comparison libcall against zero.
1462  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1463
1464protected:
1465  /// When lowering @llvm.memset this field specifies the maximum number of
1466  /// store operations that may be substituted for the call to memset. Targets
1467  /// must set this value based on the cost threshold for that target. Targets
1468  /// should assume that the memset will be done using as many of the largest
1469  /// store operations first, followed by smaller ones, if necessary, per
1470  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1471  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1472  /// store.  This only applies to setting a constant array of a constant size.
1473  /// @brief Specify maximum number of store instructions per memset call.
1474  unsigned maxStoresPerMemset;
1475
1476  /// When lowering @llvm.memcpy this field specifies the maximum number of
1477  /// store operations that may be substituted for a call to memcpy. Targets
1478  /// must set this value based on the cost threshold for that target. Targets
1479  /// should assume that the memcpy will be done using as many of the largest
1480  /// store operations first, followed by smaller ones, if necessary, per
1481  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1482  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1483  /// and one 1-byte store. This only applies to copying a constant array of
1484  /// constant size.
1485  /// @brief Specify maximum bytes of store instructions per memcpy call.
1486  unsigned maxStoresPerMemcpy;
1487
1488  /// When lowering @llvm.memmove this field specifies the maximum number of
1489  /// store instructions that may be substituted for a call to memmove. Targets
1490  /// must set this value based on the cost threshold for that target. Targets
1491  /// should assume that the memmove will be done using as many of the largest
1492  /// store operations first, followed by smaller ones, if necessary, per
1493  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
1494  /// with 8-bit alignment would result in nine 1-byte stores.  This only
1495  /// applies to copying a constant array of constant size.
1496  /// @brief Specify maximum bytes of store instructions per memmove call.
1497  unsigned maxStoresPerMemmove;
1498
1499  /// This field specifies whether the target machine permits unaligned memory
1500  /// accesses.  This is used, for example, to determine the size of store
1501  /// operations when copying small arrays and other similar tasks.
1502  /// @brief Indicate whether the target permits unaligned memory accesses.
1503  bool allowUnalignedMemoryAccesses;
1504};
1505} // end llvm namespace
1506
1507#endif
1508