TargetLowering.h revision 30e62c098b5841259f8026df1c5c45c7c1182a38
1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes how to lower LLVM code to machine code.  This has two
11// main components:
12//
13//  1. Which ValueTypes are natively supported by the target.
14//  2. Which operations are supported for supported ValueTypes.
15//  3. Cost thresholds for alternative implementations of certain operations.
16//
17// In addition it has a few other components, like information about FP
18// immediates.
19//
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_TARGET_TARGETLOWERING_H
23#define LLVM_TARGET_TARGETLOWERING_H
24
25#include "llvm/Constants.h"
26#include "llvm/InlineAsm.h"
27#include "llvm/CodeGen/SelectionDAGNodes.h"
28#include "llvm/CodeGen/RuntimeLibcalls.h"
29#include "llvm/ADT/APFloat.h"
30#include "llvm/ADT/STLExtras.h"
31#include <map>
32#include <vector>
33
34namespace llvm {
35  class Value;
36  class Function;
37  class TargetMachine;
38  class TargetData;
39  class TargetRegisterClass;
40  class SDNode;
41  class SDOperand;
42  class SelectionDAG;
43  class MachineBasicBlock;
44  class MachineInstr;
45  class VectorType;
46  class TargetSubtarget;
47
48//===----------------------------------------------------------------------===//
49/// TargetLowering - This class defines information used to lower LLVM code to
50/// legal SelectionDAG operators that the target instruction selector can accept
51/// natively.
52///
53/// This class also defines callbacks that targets must implement to lower
54/// target-specific constructs to SelectionDAG operators.
55///
56class TargetLowering {
57public:
58  /// LegalizeAction - This enum indicates whether operations are valid for a
59  /// target, and if not, what action should be used to make them valid.
60  enum LegalizeAction {
61    Legal,      // The target natively supports this operation.
62    Promote,    // This operation should be executed in a larger type.
63    Expand,     // Try to expand this to other ops, otherwise use a libcall.
64    Custom      // Use the LowerOperation hook to implement custom lowering.
65  };
66
67  enum OutOfRangeShiftAmount {
68    Undefined,  // Oversized shift amounts are undefined (default).
69    Mask,       // Shift amounts are auto masked (anded) to value size.
70    Extend      // Oversized shift pulls in zeros or sign bits.
71  };
72
73  enum SetCCResultValue {
74    UndefinedSetCCResult,          // SetCC returns a garbage/unknown extend.
75    ZeroOrOneSetCCResult,          // SetCC returns a zero extended result.
76    ZeroOrNegativeOneSetCCResult   // SetCC returns a sign extended result.
77  };
78
79  enum SchedPreference {
80    SchedulingForLatency,          // Scheduling for shortest total latency.
81    SchedulingForRegPressure       // Scheduling for lowest register pressure.
82  };
83
84  explicit TargetLowering(TargetMachine &TM);
85  virtual ~TargetLowering();
86
87  TargetMachine &getTargetMachine() const { return TM; }
88  const TargetData *getTargetData() const { return TD; }
89
90  bool isBigEndian() const { return !IsLittleEndian; }
91  bool isLittleEndian() const { return IsLittleEndian; }
92  MVT::ValueType getPointerTy() const { return PointerTy; }
93  MVT::ValueType getShiftAmountTy() const { return ShiftAmountTy; }
94  OutOfRangeShiftAmount getShiftAmountFlavor() const {return ShiftAmtHandling; }
95
96  /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC
97  /// codegen.
98  bool usesGlobalOffsetTable() const { return UsesGlobalOffsetTable; }
99
100  /// isSelectExpensive - Return true if the select operation is expensive for
101  /// this target.
102  bool isSelectExpensive() const { return SelectIsExpensive; }
103
104  /// isIntDivCheap() - Return true if integer divide is usually cheaper than
105  /// a sequence of several shifts, adds, and multiplies for this target.
106  bool isIntDivCheap() const { return IntDivIsCheap; }
107
108  /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
109  /// srl/add/sra.
110  bool isPow2DivCheap() const { return Pow2DivIsCheap; }
111
112  /// getSetCCResultType - Return the ValueType of the result of setcc
113  /// operations.
114  virtual MVT::ValueType getSetCCResultType(const SDOperand &) const;
115
116  /// getSetCCResultContents - For targets without boolean registers, this flag
117  /// returns information about the contents of the high-bits in the setcc
118  /// result register.
119  SetCCResultValue getSetCCResultContents() const { return SetCCResultContents;}
120
121  /// getSchedulingPreference - Return target scheduling preference.
122  SchedPreference getSchedulingPreference() const {
123    return SchedPreferenceInfo;
124  }
125
126  /// getRegClassFor - Return the register class that should be used for the
127  /// specified value type.  This may only be called on legal types.
128  TargetRegisterClass *getRegClassFor(MVT::ValueType VT) const {
129    assert(VT < array_lengthof(RegClassForVT));
130    TargetRegisterClass *RC = RegClassForVT[VT];
131    assert(RC && "This value type is not natively supported!");
132    return RC;
133  }
134
135  /// isTypeLegal - Return true if the target has native support for the
136  /// specified value type.  This means that it has a register that directly
137  /// holds it without promotions or expansions.
138  bool isTypeLegal(MVT::ValueType VT) const {
139    assert(MVT::isExtendedVT(VT) || VT < array_lengthof(RegClassForVT));
140    return !MVT::isExtendedVT(VT) && RegClassForVT[VT] != 0;
141  }
142
143  class ValueTypeActionImpl {
144    /// ValueTypeActions - This is a bitvector that contains two bits for each
145    /// value type, where the two bits correspond to the LegalizeAction enum.
146    /// This can be queried with "getTypeAction(VT)".
147    uint32_t ValueTypeActions[2];
148  public:
149    ValueTypeActionImpl() {
150      ValueTypeActions[0] = ValueTypeActions[1] = 0;
151    }
152    ValueTypeActionImpl(const ValueTypeActionImpl &RHS) {
153      ValueTypeActions[0] = RHS.ValueTypeActions[0];
154      ValueTypeActions[1] = RHS.ValueTypeActions[1];
155    }
156
157    LegalizeAction getTypeAction(MVT::ValueType VT) const {
158      if (MVT::isExtendedVT(VT)) {
159        if (MVT::isVector(VT)) return Expand;
160        if (MVT::isInteger(VT))
161          // First promote to a power-of-two size, then expand if necessary.
162          return VT == MVT::RoundIntegerType(VT) ? Expand : Promote;
163        assert(0 && "Unsupported extended type!");
164      }
165      assert(VT<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
166      return (LegalizeAction)((ValueTypeActions[VT>>4] >> ((2*VT) & 31)) & 3);
167    }
168    void setTypeAction(MVT::ValueType VT, LegalizeAction Action) {
169      assert(VT<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
170      ValueTypeActions[VT>>4] |= Action << ((VT*2) & 31);
171    }
172  };
173
174  const ValueTypeActionImpl &getValueTypeActions() const {
175    return ValueTypeActions;
176  }
177
178  /// getTypeAction - Return how we should legalize values of this type, either
179  /// it is already legal (return 'Legal') or we need to promote it to a larger
180  /// type (return 'Promote'), or we need to expand it into multiple registers
181  /// of smaller integer type (return 'Expand').  'Custom' is not an option.
182  LegalizeAction getTypeAction(MVT::ValueType VT) const {
183    return ValueTypeActions.getTypeAction(VT);
184  }
185
186  /// getTypeToTransformTo - For types supported by the target, this is an
187  /// identity function.  For types that must be promoted to larger types, this
188  /// returns the larger type to promote to.  For integer types that are larger
189  /// than the largest integer register, this contains one step in the expansion
190  /// to get to the smaller register. For illegal floating point types, this
191  /// returns the integer type to transform to.
192  MVT::ValueType getTypeToTransformTo(MVT::ValueType VT) const {
193    if (!MVT::isExtendedVT(VT)) {
194      assert(VT < array_lengthof(TransformToType));
195      MVT::ValueType NVT = TransformToType[VT];
196      assert(getTypeAction(NVT) != Promote &&
197             "Promote may not follow Expand or Promote");
198      return NVT;
199    }
200
201    if (MVT::isVector(VT))
202      return MVT::getVectorType(MVT::getVectorElementType(VT),
203                                MVT::getVectorNumElements(VT) / 2);
204    if (MVT::isInteger(VT)) {
205      MVT::ValueType NVT = MVT::RoundIntegerType(VT);
206      if (NVT == VT)
207        // Size is a power of two - expand to half the size.
208        return MVT::getIntegerType(MVT::getSizeInBits(VT) / 2);
209      else
210        // Promote to a power of two size, avoiding multi-step promotion.
211        return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT;
212    }
213    assert(0 && "Unsupported extended type!");
214    return MVT::ValueType(); // Not reached
215  }
216
217  /// getTypeToExpandTo - For types supported by the target, this is an
218  /// identity function.  For types that must be expanded (i.e. integer types
219  /// that are larger than the largest integer register or illegal floating
220  /// point types), this returns the largest legal type it will be expanded to.
221  MVT::ValueType getTypeToExpandTo(MVT::ValueType VT) const {
222    assert(!MVT::isVector(VT));
223    while (true) {
224      switch (getTypeAction(VT)) {
225      case Legal:
226        return VT;
227      case Expand:
228        VT = getTypeToTransformTo(VT);
229        break;
230      default:
231        assert(false && "Type is not legal nor is it to be expanded!");
232        return VT;
233      }
234    }
235    return VT;
236  }
237
238  /// getVectorTypeBreakdown - Vector types are broken down into some number of
239  /// legal first class types.  For example, MVT::v8f32 maps to 2 MVT::v4f32
240  /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
241  /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
242  ///
243  /// This method returns the number of registers needed, and the VT for each
244  /// register.  It also returns the VT and quantity of the intermediate values
245  /// before they are promoted/expanded.
246  ///
247  unsigned getVectorTypeBreakdown(MVT::ValueType VT,
248                                  MVT::ValueType &IntermediateVT,
249                                  unsigned &NumIntermediates,
250                                  MVT::ValueType &RegisterVT) const;
251
252  typedef std::vector<APFloat>::const_iterator legal_fpimm_iterator;
253  legal_fpimm_iterator legal_fpimm_begin() const {
254    return LegalFPImmediates.begin();
255  }
256  legal_fpimm_iterator legal_fpimm_end() const {
257    return LegalFPImmediates.end();
258  }
259
260  /// isShuffleMaskLegal - Targets can use this to indicate that they only
261  /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
262  /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
263  /// are assumed to be legal.
264  virtual bool isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
265    return true;
266  }
267
268  /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
269  /// used by Targets can use this to indicate if there is a suitable
270  /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
271  /// pool entry.
272  virtual bool isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps,
273                                      MVT::ValueType EVT,
274                                      SelectionDAG &DAG) const {
275    return false;
276  }
277
278  /// getOperationAction - Return how this operation should be treated: either
279  /// it is legal, needs to be promoted to a larger size, needs to be
280  /// expanded to some other code sequence, or the target has a custom expander
281  /// for it.
282  LegalizeAction getOperationAction(unsigned Op, MVT::ValueType VT) const {
283    if (MVT::isExtendedVT(VT)) return Expand;
284    assert(Op < array_lengthof(OpActions) &&
285           VT < sizeof(OpActions[0])*4 && "Table isn't big enough!");
286    return (LegalizeAction)((OpActions[Op] >> (2*VT)) & 3);
287  }
288
289  /// isOperationLegal - Return true if the specified operation is legal on this
290  /// target.
291  bool isOperationLegal(unsigned Op, MVT::ValueType VT) const {
292    return getOperationAction(Op, VT) == Legal ||
293           getOperationAction(Op, VT) == Custom;
294  }
295
296  /// getLoadXAction - Return how this load with extension should be treated:
297  /// either it is legal, needs to be promoted to a larger size, needs to be
298  /// expanded to some other code sequence, or the target has a custom expander
299  /// for it.
300  LegalizeAction getLoadXAction(unsigned LType, MVT::ValueType VT) const {
301    assert(LType < array_lengthof(LoadXActions) &&
302           VT < sizeof(LoadXActions[0])*4 && "Table isn't big enough!");
303    return (LegalizeAction)((LoadXActions[LType] >> (2*VT)) & 3);
304  }
305
306  /// isLoadXLegal - Return true if the specified load with extension is legal
307  /// on this target.
308  bool isLoadXLegal(unsigned LType, MVT::ValueType VT) const {
309    return !MVT::isExtendedVT(VT) &&
310      (getLoadXAction(LType, VT) == Legal ||
311       getLoadXAction(LType, VT) == Custom);
312  }
313
314  /// getTruncStoreAction - Return how this store with truncation should be
315  /// treated: either it is legal, needs to be promoted to a larger size, needs
316  /// to be expanded to some other code sequence, or the target has a custom
317  /// expander for it.
318  LegalizeAction getTruncStoreAction(MVT::ValueType ValVT,
319                                     MVT::ValueType MemVT) const {
320    assert(ValVT < array_lengthof(TruncStoreActions) &&
321           MemVT < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!");
322    return (LegalizeAction)((TruncStoreActions[ValVT] >> (2*MemVT)) & 3);
323  }
324
325  /// isTruncStoreLegal - Return true if the specified store with truncation is
326  /// legal on this target.
327  bool isTruncStoreLegal(MVT::ValueType ValVT, MVT::ValueType MemVT) const {
328    return !MVT::isExtendedVT(MemVT) &&
329      (getTruncStoreAction(ValVT, MemVT) == Legal ||
330       getTruncStoreAction(ValVT, MemVT) == Custom);
331  }
332
333  /// getIndexedLoadAction - Return how the indexed load should be treated:
334  /// either it is legal, needs to be promoted to a larger size, needs to be
335  /// expanded to some other code sequence, or the target has a custom expander
336  /// for it.
337  LegalizeAction
338  getIndexedLoadAction(unsigned IdxMode, MVT::ValueType VT) const {
339    assert(IdxMode < array_lengthof(IndexedModeActions[0]) &&
340           VT < sizeof(IndexedModeActions[0][0])*4 &&
341           "Table isn't big enough!");
342    return (LegalizeAction)((IndexedModeActions[0][IdxMode] >> (2*VT)) & 3);
343  }
344
345  /// isIndexedLoadLegal - Return true if the specified indexed load is legal
346  /// on this target.
347  bool isIndexedLoadLegal(unsigned IdxMode, MVT::ValueType VT) const {
348    return getIndexedLoadAction(IdxMode, VT) == Legal ||
349           getIndexedLoadAction(IdxMode, VT) == Custom;
350  }
351
352  /// getIndexedStoreAction - Return how the indexed store should be treated:
353  /// either it is legal, needs to be promoted to a larger size, needs to be
354  /// expanded to some other code sequence, or the target has a custom expander
355  /// for it.
356  LegalizeAction
357  getIndexedStoreAction(unsigned IdxMode, MVT::ValueType VT) const {
358    assert(IdxMode < array_lengthof(IndexedModeActions[1]) &&
359           VT < sizeof(IndexedModeActions[1][0])*4 &&
360           "Table isn't big enough!");
361    return (LegalizeAction)((IndexedModeActions[1][IdxMode] >> (2*VT)) & 3);
362  }
363
364  /// isIndexedStoreLegal - Return true if the specified indexed load is legal
365  /// on this target.
366  bool isIndexedStoreLegal(unsigned IdxMode, MVT::ValueType VT) const {
367    return getIndexedStoreAction(IdxMode, VT) == Legal ||
368           getIndexedStoreAction(IdxMode, VT) == Custom;
369  }
370
371  /// getConvertAction - Return how the conversion should be treated:
372  /// either it is legal, needs to be promoted to a larger size, needs to be
373  /// expanded to some other code sequence, or the target has a custom expander
374  /// for it.
375  LegalizeAction
376  getConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT) const {
377    assert(FromVT < array_lengthof(ConvertActions) &&
378           ToVT < sizeof(ConvertActions[0])*4 && "Table isn't big enough!");
379    return (LegalizeAction)((ConvertActions[FromVT] >> (2*ToVT)) & 3);
380  }
381
382  /// isConvertLegal - Return true if the specified conversion is legal
383  /// on this target.
384  bool isConvertLegal(MVT::ValueType FromVT, MVT::ValueType ToVT) const {
385    return getConvertAction(FromVT, ToVT) == Legal ||
386           getConvertAction(FromVT, ToVT) == Custom;
387  }
388
389  /// getTypeToPromoteTo - If the action for this operation is to promote, this
390  /// method returns the ValueType to promote to.
391  MVT::ValueType getTypeToPromoteTo(unsigned Op, MVT::ValueType VT) const {
392    assert(getOperationAction(Op, VT) == Promote &&
393           "This operation isn't promoted!");
394
395    // See if this has an explicit type specified.
396    std::map<std::pair<unsigned, MVT::ValueType>,
397             MVT::ValueType>::const_iterator PTTI =
398      PromoteToType.find(std::make_pair(Op, VT));
399    if (PTTI != PromoteToType.end()) return PTTI->second;
400
401    assert((MVT::isInteger(VT) || MVT::isFloatingPoint(VT)) &&
402           "Cannot autopromote this type, add it with AddPromotedToType.");
403
404    MVT::ValueType NVT = VT;
405    do {
406      NVT = (MVT::ValueType)(NVT+1);
407      assert(MVT::isInteger(NVT) == MVT::isInteger(VT) && NVT != MVT::isVoid &&
408             "Didn't find type to promote to!");
409    } while (!isTypeLegal(NVT) ||
410              getOperationAction(Op, NVT) == Promote);
411    return NVT;
412  }
413
414  /// getValueType - Return the MVT::ValueType corresponding to this LLVM type.
415  /// This is fixed by the LLVM operations except for the pointer size.  If
416  /// AllowUnknown is true, this will return MVT::Other for types with no MVT
417  /// counterpart (e.g. structs), otherwise it will assert.
418  MVT::ValueType getValueType(const Type *Ty, bool AllowUnknown = false) const {
419    MVT::ValueType VT = MVT::getValueType(Ty, AllowUnknown);
420    return VT == MVT::iPTR ? PointerTy : VT;
421  }
422
423  /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
424  /// function arguments in the caller parameter area.  This is the actual
425  /// alignment, not its logarithm.
426  virtual unsigned getByValTypeAlignment(const Type *Ty) const;
427
428  /// getRegisterType - Return the type of registers that this ValueType will
429  /// eventually require.
430  MVT::ValueType getRegisterType(MVT::ValueType VT) const {
431    if (!MVT::isExtendedVT(VT)) {
432      assert(VT < array_lengthof(RegisterTypeForVT));
433      return RegisterTypeForVT[VT];
434    }
435    if (MVT::isVector(VT)) {
436      MVT::ValueType VT1, RegisterVT;
437      unsigned NumIntermediates;
438      (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT);
439      return RegisterVT;
440    }
441    if (MVT::isInteger(VT)) {
442      return getRegisterType(getTypeToTransformTo(VT));
443    }
444    assert(0 && "Unsupported extended type!");
445    return MVT::ValueType(); // Not reached
446  }
447
448  /// getNumRegisters - Return the number of registers that this ValueType will
449  /// eventually require.  This is one for any types promoted to live in larger
450  /// registers, but may be more than one for types (like i64) that are split
451  /// into pieces.  For types like i140, which are first promoted then expanded,
452  /// it is the number of registers needed to hold all the bits of the original
453  /// type.  For an i140 on a 32 bit machine this means 5 registers.
454  unsigned getNumRegisters(MVT::ValueType VT) const {
455    if (!MVT::isExtendedVT(VT)) {
456      assert(VT < array_lengthof(NumRegistersForVT));
457      return NumRegistersForVT[VT];
458    }
459    if (MVT::isVector(VT)) {
460      MVT::ValueType VT1, VT2;
461      unsigned NumIntermediates;
462      return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2);
463    }
464    if (MVT::isInteger(VT)) {
465      unsigned BitWidth = MVT::getSizeInBits(VT);
466      unsigned RegWidth = MVT::getSizeInBits(getRegisterType(VT));
467      return (BitWidth + RegWidth - 1) / RegWidth;
468    }
469    assert(0 && "Unsupported extended type!");
470    return 0; // Not reached
471  }
472
473  /// ShouldShrinkFPConstant - If true, then instruction selection should
474  /// seek to shrink the FP constant of the specified type to a smaller type
475  /// in order to save space and / or reduce runtime.
476  virtual bool ShouldShrinkFPConstant(MVT::ValueType VT) const { return true; }
477
478  /// hasTargetDAGCombine - If true, the target has custom DAG combine
479  /// transformations that it can perform for the specified node.
480  bool hasTargetDAGCombine(ISD::NodeType NT) const {
481    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
482    return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
483  }
484
485  /// This function returns the maximum number of store operations permitted
486  /// to replace a call to llvm.memset. The value is set by the target at the
487  /// performance threshold for such a replacement.
488  /// @brief Get maximum # of store operations permitted for llvm.memset
489  unsigned getMaxStoresPerMemset() const { return maxStoresPerMemset; }
490
491  /// This function returns the maximum number of store operations permitted
492  /// to replace a call to llvm.memcpy. The value is set by the target at the
493  /// performance threshold for such a replacement.
494  /// @brief Get maximum # of store operations permitted for llvm.memcpy
495  unsigned getMaxStoresPerMemcpy() const { return maxStoresPerMemcpy; }
496
497  /// This function returns the maximum number of store operations permitted
498  /// to replace a call to llvm.memmove. The value is set by the target at the
499  /// performance threshold for such a replacement.
500  /// @brief Get maximum # of store operations permitted for llvm.memmove
501  unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; }
502
503  /// This function returns true if the target allows unaligned memory accesses.
504  /// This is used, for example, in situations where an array copy/move/set is
505  /// converted to a sequence of store operations. It's use helps to ensure that
506  /// such replacements don't generate code that causes an alignment error
507  /// (trap) on the target machine.
508  /// @brief Determine if the target supports unaligned memory accesses.
509  bool allowsUnalignedMemoryAccesses() const {
510    return allowUnalignedMemoryAccesses;
511  }
512
513  /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
514  /// to implement llvm.setjmp.
515  bool usesUnderscoreSetJmp() const {
516    return UseUnderscoreSetJmp;
517  }
518
519  /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp
520  /// to implement llvm.longjmp.
521  bool usesUnderscoreLongJmp() const {
522    return UseUnderscoreLongJmp;
523  }
524
525  /// getStackPointerRegisterToSaveRestore - If a physical register, this
526  /// specifies the register that llvm.savestack/llvm.restorestack should save
527  /// and restore.
528  unsigned getStackPointerRegisterToSaveRestore() const {
529    return StackPointerRegisterToSaveRestore;
530  }
531
532  /// getExceptionAddressRegister - If a physical register, this returns
533  /// the register that receives the exception address on entry to a landing
534  /// pad.
535  unsigned getExceptionAddressRegister() const {
536    return ExceptionPointerRegister;
537  }
538
539  /// getExceptionSelectorRegister - If a physical register, this returns
540  /// the register that receives the exception typeid on entry to a landing
541  /// pad.
542  unsigned getExceptionSelectorRegister() const {
543    return ExceptionSelectorRegister;
544  }
545
546  /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never
547  /// set, the default is 200)
548  unsigned getJumpBufSize() const {
549    return JumpBufSize;
550  }
551
552  /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes
553  /// (if never set, the default is 0)
554  unsigned getJumpBufAlignment() const {
555    return JumpBufAlignment;
556  }
557
558  /// getIfCvtBlockLimit - returns the target specific if-conversion block size
559  /// limit. Any block whose size is greater should not be predicated.
560  unsigned getIfCvtBlockSizeLimit() const {
561    return IfCvtBlockSizeLimit;
562  }
563
564  /// getIfCvtDupBlockLimit - returns the target specific size limit for a
565  /// block to be considered for duplication. Any block whose size is greater
566  /// should not be duplicated to facilitate its predication.
567  unsigned getIfCvtDupBlockSizeLimit() const {
568    return IfCvtDupBlockSizeLimit;
569  }
570
571  /// getPrefLoopAlignment - return the preferred loop alignment.
572  ///
573  unsigned getPrefLoopAlignment() const {
574    return PrefLoopAlignment;
575  }
576
577  /// getPreIndexedAddressParts - returns true by value, base pointer and
578  /// offset pointer and addressing mode by reference if the node's address
579  /// can be legally represented as pre-indexed load / store address.
580  virtual bool getPreIndexedAddressParts(SDNode *N, SDOperand &Base,
581                                         SDOperand &Offset,
582                                         ISD::MemIndexedMode &AM,
583                                         SelectionDAG &DAG) {
584    return false;
585  }
586
587  /// getPostIndexedAddressParts - returns true by value, base pointer and
588  /// offset pointer and addressing mode by reference if this node can be
589  /// combined with a load / store to form a post-indexed load / store.
590  virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
591                                          SDOperand &Base, SDOperand &Offset,
592                                          ISD::MemIndexedMode &AM,
593                                          SelectionDAG &DAG) {
594    return false;
595  }
596
597  /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
598  /// jumptable.
599  virtual SDOperand getPICJumpTableRelocBase(SDOperand Table,
600                                             SelectionDAG &DAG) const;
601
602  //===--------------------------------------------------------------------===//
603  // TargetLowering Optimization Methods
604  //
605
606  /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
607  /// SDOperands for returning information from TargetLowering to its clients
608  /// that want to combine
609  struct TargetLoweringOpt {
610    SelectionDAG &DAG;
611    bool AfterLegalize;
612    SDOperand Old;
613    SDOperand New;
614
615    explicit TargetLoweringOpt(SelectionDAG &InDAG, bool afterLegalize)
616      : DAG(InDAG), AfterLegalize(afterLegalize) {}
617
618    bool CombineTo(SDOperand O, SDOperand N) {
619      Old = O;
620      New = N;
621      return true;
622    }
623
624    /// ShrinkDemandedConstant - Check to see if the specified operand of the
625    /// specified instruction is a constant integer.  If so, check to see if
626    /// there are any bits set in the constant that are not demanded.  If so,
627    /// shrink the constant and return true.
628    bool ShrinkDemandedConstant(SDOperand Op, const APInt &Demanded);
629  };
630
631  /// SimplifyDemandedBits - Look at Op.  At this point, we know that only the
632  /// DemandedMask bits of the result of Op are ever used downstream.  If we can
633  /// use this information to simplify Op, create a new simplified DAG node and
634  /// return true, returning the original and new nodes in Old and New.
635  /// Otherwise, analyze the expression and return a mask of KnownOne and
636  /// KnownZero bits for the expression (used to simplify the caller).
637  /// The KnownZero/One bits may only be accurate for those bits in the
638  /// DemandedMask.
639  bool SimplifyDemandedBits(SDOperand Op, const APInt &DemandedMask,
640                            APInt &KnownZero, APInt &KnownOne,
641                            TargetLoweringOpt &TLO, unsigned Depth = 0) const;
642
643  /// computeMaskedBitsForTargetNode - Determine which of the bits specified in
644  /// Mask are known to be either zero or one and return them in the
645  /// KnownZero/KnownOne bitsets.
646  virtual void computeMaskedBitsForTargetNode(const SDOperand Op,
647                                              const APInt &Mask,
648                                              APInt &KnownZero,
649                                              APInt &KnownOne,
650                                              const SelectionDAG &DAG,
651                                              unsigned Depth = 0) const;
652
653  /// ComputeNumSignBitsForTargetNode - This method can be implemented by
654  /// targets that want to expose additional information about sign bits to the
655  /// DAG Combiner.
656  virtual unsigned ComputeNumSignBitsForTargetNode(SDOperand Op,
657                                                   unsigned Depth = 0) const;
658
659  struct DAGCombinerInfo {
660    void *DC;  // The DAG Combiner object.
661    bool BeforeLegalize;
662    bool CalledByLegalizer;
663  public:
664    SelectionDAG &DAG;
665
666    DAGCombinerInfo(SelectionDAG &dag, bool bl, bool cl, void *dc)
667      : DC(dc), BeforeLegalize(bl), CalledByLegalizer(cl), DAG(dag) {}
668
669    bool isBeforeLegalize() const { return BeforeLegalize; }
670    bool isCalledByLegalizer() const { return CalledByLegalizer; }
671
672    void AddToWorklist(SDNode *N);
673    SDOperand CombineTo(SDNode *N, const std::vector<SDOperand> &To);
674    SDOperand CombineTo(SDNode *N, SDOperand Res);
675    SDOperand CombineTo(SDNode *N, SDOperand Res0, SDOperand Res1);
676  };
677
678  /// SimplifySetCC - Try to simplify a setcc built with the specified operands
679  /// and cc. If it is unable to simplify it, return a null SDOperand.
680  SDOperand SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1,
681                          ISD::CondCode Cond, bool foldBooleans,
682                          DAGCombinerInfo &DCI) const;
683
684  /// PerformDAGCombine - This method will be invoked for all target nodes and
685  /// for any target-independent nodes that the target has registered with
686  /// invoke it for.
687  ///
688  /// The semantics are as follows:
689  /// Return Value:
690  ///   SDOperand.Val == 0   - No change was made
691  ///   SDOperand.Val == N   - N was replaced, is dead, and is already handled.
692  ///   otherwise            - N should be replaced by the returned Operand.
693  ///
694  /// In addition, methods provided by DAGCombinerInfo may be used to perform
695  /// more complex transformations.
696  ///
697  virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
698
699  //===--------------------------------------------------------------------===//
700  // TargetLowering Configuration Methods - These methods should be invoked by
701  // the derived class constructor to configure this object for the target.
702  //
703
704protected:
705  /// setUsesGlobalOffsetTable - Specify that this target does or doesn't use a
706  /// GOT for PC-relative code.
707  void setUsesGlobalOffsetTable(bool V) { UsesGlobalOffsetTable = V; }
708
709  /// setShiftAmountType - Describe the type that should be used for shift
710  /// amounts.  This type defaults to the pointer type.
711  void setShiftAmountType(MVT::ValueType VT) { ShiftAmountTy = VT; }
712
713  /// setSetCCResultContents - Specify how the target extends the result of a
714  /// setcc operation in a register.
715  void setSetCCResultContents(SetCCResultValue Ty) { SetCCResultContents = Ty; }
716
717  /// setSchedulingPreference - Specify the target scheduling preference.
718  void setSchedulingPreference(SchedPreference Pref) {
719    SchedPreferenceInfo = Pref;
720  }
721
722  /// setShiftAmountFlavor - Describe how the target handles out of range shift
723  /// amounts.
724  void setShiftAmountFlavor(OutOfRangeShiftAmount OORSA) {
725    ShiftAmtHandling = OORSA;
726  }
727
728  /// setUseUnderscoreSetJmp - Indicate whether this target prefers to
729  /// use _setjmp to implement llvm.setjmp or the non _ version.
730  /// Defaults to false.
731  void setUseUnderscoreSetJmp(bool Val) {
732    UseUnderscoreSetJmp = Val;
733  }
734
735  /// setUseUnderscoreLongJmp - Indicate whether this target prefers to
736  /// use _longjmp to implement llvm.longjmp or the non _ version.
737  /// Defaults to false.
738  void setUseUnderscoreLongJmp(bool Val) {
739    UseUnderscoreLongJmp = Val;
740  }
741
742  /// setStackPointerRegisterToSaveRestore - If set to a physical register, this
743  /// specifies the register that llvm.savestack/llvm.restorestack should save
744  /// and restore.
745  void setStackPointerRegisterToSaveRestore(unsigned R) {
746    StackPointerRegisterToSaveRestore = R;
747  }
748
749  /// setExceptionPointerRegister - If set to a physical register, this sets
750  /// the register that receives the exception address on entry to a landing
751  /// pad.
752  void setExceptionPointerRegister(unsigned R) {
753    ExceptionPointerRegister = R;
754  }
755
756  /// setExceptionSelectorRegister - If set to a physical register, this sets
757  /// the register that receives the exception typeid on entry to a landing
758  /// pad.
759  void setExceptionSelectorRegister(unsigned R) {
760    ExceptionSelectorRegister = R;
761  }
762
763  /// SelectIsExpensive - Tells the code generator not to expand operations
764  /// into sequences that use the select operations if possible.
765  void setSelectIsExpensive() { SelectIsExpensive = true; }
766
767  /// setIntDivIsCheap - Tells the code generator that integer divide is
768  /// expensive, and if possible, should be replaced by an alternate sequence
769  /// of instructions not containing an integer divide.
770  void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
771
772  /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
773  /// srl/add/sra for a signed divide by power of two, and let the target handle
774  /// it.
775  void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
776
777  /// addRegisterClass - Add the specified register class as an available
778  /// regclass for the specified value type.  This indicates the selector can
779  /// handle values of that class natively.
780  void addRegisterClass(MVT::ValueType VT, TargetRegisterClass *RC) {
781    assert(VT < array_lengthof(RegClassForVT));
782    AvailableRegClasses.push_back(std::make_pair(VT, RC));
783    RegClassForVT[VT] = RC;
784  }
785
786  /// computeRegisterProperties - Once all of the register classes are added,
787  /// this allows us to compute derived properties we expose.
788  void computeRegisterProperties();
789
790  /// setOperationAction - Indicate that the specified operation does not work
791  /// with the specified type and indicate what to do about it.
792  void setOperationAction(unsigned Op, MVT::ValueType VT,
793                          LegalizeAction Action) {
794    assert(VT < sizeof(OpActions[0])*4 && Op < array_lengthof(OpActions) &&
795           "Table isn't big enough!");
796    OpActions[Op] &= ~(uint64_t(3UL) << VT*2);
797    OpActions[Op] |= (uint64_t)Action << VT*2;
798  }
799
800  /// setLoadXAction - Indicate that the specified load with extension does not
801  /// work with the with specified type and indicate what to do about it.
802  void setLoadXAction(unsigned ExtType, MVT::ValueType VT,
803                      LegalizeAction Action) {
804    assert(VT < sizeof(LoadXActions[0])*4 &&
805           ExtType < array_lengthof(LoadXActions) &&
806           "Table isn't big enough!");
807    LoadXActions[ExtType] &= ~(uint64_t(3UL) << VT*2);
808    LoadXActions[ExtType] |= (uint64_t)Action << VT*2;
809  }
810
811  /// setTruncStoreAction - Indicate that the specified truncating store does
812  /// not work with the with specified type and indicate what to do about it.
813  void setTruncStoreAction(MVT::ValueType ValVT, MVT::ValueType MemVT,
814                           LegalizeAction Action) {
815    assert(ValVT < array_lengthof(TruncStoreActions) &&
816           MemVT < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!");
817    TruncStoreActions[ValVT] &= ~(uint64_t(3UL) << MemVT*2);
818    TruncStoreActions[ValVT] |= (uint64_t)Action << MemVT*2;
819  }
820
821  /// setIndexedLoadAction - Indicate that the specified indexed load does or
822  /// does not work with the with specified type and indicate what to do abort
823  /// it. NOTE: All indexed mode loads are initialized to Expand in
824  /// TargetLowering.cpp
825  void setIndexedLoadAction(unsigned IdxMode, MVT::ValueType VT,
826                            LegalizeAction Action) {
827    assert(VT < sizeof(IndexedModeActions[0])*4 && IdxMode <
828           array_lengthof(IndexedModeActions[0]) &&
829           "Table isn't big enough!");
830    IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT*2);
831    IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT*2;
832  }
833
834  /// setIndexedStoreAction - Indicate that the specified indexed store does or
835  /// does not work with the with specified type and indicate what to do about
836  /// it. NOTE: All indexed mode stores are initialized to Expand in
837  /// TargetLowering.cpp
838  void setIndexedStoreAction(unsigned IdxMode, MVT::ValueType VT,
839                             LegalizeAction Action) {
840    assert(VT < sizeof(IndexedModeActions[1][0])*4 &&
841           IdxMode < array_lengthof(IndexedModeActions[1]) &&
842           "Table isn't big enough!");
843    IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT*2);
844    IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT*2;
845  }
846
847  /// setConvertAction - Indicate that the specified conversion does or does
848  /// not work with the with specified type and indicate what to do about it.
849  void setConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT,
850                        LegalizeAction Action) {
851    assert(FromVT < array_lengthof(ConvertActions) &&
852           ToVT < sizeof(ConvertActions[0])*4 && "Table isn't big enough!");
853    ConvertActions[FromVT] &= ~(uint64_t(3UL) << ToVT*2);
854    ConvertActions[FromVT] |= (uint64_t)Action << ToVT*2;
855  }
856
857  /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
858  /// promotion code defaults to trying a larger integer/fp until it can find
859  /// one that works.  If that default is insufficient, this method can be used
860  /// by the target to override the default.
861  void AddPromotedToType(unsigned Opc, MVT::ValueType OrigVT,
862                         MVT::ValueType DestVT) {
863    PromoteToType[std::make_pair(Opc, OrigVT)] = DestVT;
864  }
865
866  /// addLegalFPImmediate - Indicate that this target can instruction select
867  /// the specified FP immediate natively.
868  void addLegalFPImmediate(const APFloat& Imm) {
869    LegalFPImmediates.push_back(Imm);
870  }
871
872  /// setTargetDAGCombine - Targets should invoke this method for each target
873  /// independent node that they want to provide a custom DAG combiner for by
874  /// implementing the PerformDAGCombine virtual method.
875  void setTargetDAGCombine(ISD::NodeType NT) {
876    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
877    TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
878  }
879
880  /// setJumpBufSize - Set the target's required jmp_buf buffer size (in
881  /// bytes); default is 200
882  void setJumpBufSize(unsigned Size) {
883    JumpBufSize = Size;
884  }
885
886  /// setJumpBufAlignment - Set the target's required jmp_buf buffer
887  /// alignment (in bytes); default is 0
888  void setJumpBufAlignment(unsigned Align) {
889    JumpBufAlignment = Align;
890  }
891
892  /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size
893  /// limit (in number of instructions); default is 2.
894  void setIfCvtBlockSizeLimit(unsigned Limit) {
895    IfCvtBlockSizeLimit = Limit;
896  }
897
898  /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number
899  /// of instructions) to be considered for code duplication during
900  /// if-conversion; default is 2.
901  void setIfCvtDupBlockSizeLimit(unsigned Limit) {
902    IfCvtDupBlockSizeLimit = Limit;
903  }
904
905  /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
906  /// alignment is zero, it means the target does not care about loop alignment.
907  void setPrefLoopAlignment(unsigned Align) {
908    PrefLoopAlignment = Align;
909  }
910
911public:
912
913  virtual const TargetSubtarget *getSubtarget() {
914    assert(0 && "Not Implemented");
915    return NULL;    // this is here to silence compiler errors
916  }
917  //===--------------------------------------------------------------------===//
918  // Lowering methods - These methods must be implemented by targets so that
919  // the SelectionDAGLowering code knows how to lower these.
920  //
921
922  /// LowerArguments - This hook must be implemented to indicate how we should
923  /// lower the arguments for the specified function, into the specified DAG.
924  virtual std::vector<SDOperand>
925  LowerArguments(Function &F, SelectionDAG &DAG);
926
927  /// LowerCallTo - This hook lowers an abstract call to a function into an
928  /// actual call.  This returns a pair of operands.  The first element is the
929  /// return value for the function (if RetTy is not VoidTy).  The second
930  /// element is the outgoing token chain.
931  struct ArgListEntry {
932    SDOperand Node;
933    const Type* Ty;
934    bool isSExt;
935    bool isZExt;
936    bool isInReg;
937    bool isSRet;
938    bool isNest;
939    bool isByVal;
940    uint16_t Alignment;
941
942    ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
943      isSRet(false), isNest(false), isByVal(false), Alignment(0) { }
944  };
945  typedef std::vector<ArgListEntry> ArgListTy;
946  virtual std::pair<SDOperand, SDOperand>
947  LowerCallTo(SDOperand Chain, const Type *RetTy, bool RetSExt, bool RetZExt,
948              bool isVarArg, unsigned CallingConv, bool isTailCall,
949              SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
950
951
952  /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a
953  /// memcpy. This can be used by targets to provide code sequences for cases
954  /// that don't fit the target's parameters for simple loads/stores and can be
955  /// more efficient than using a library call. This function can return a null
956  /// SDOperand if the target declines to use custom code and a different
957  /// lowering strategy should be used.
958  ///
959  /// If AlwaysInline is true, the size is constant and the target should not
960  /// emit any calls and is strongly encouraged to attempt to emit inline code
961  /// even if it is beyond the usual threshold because this intrinsic is being
962  /// expanded in a place where calls are not feasible (e.g. within the prologue
963  /// for another call). If the target chooses to decline an AlwaysInline
964  /// request here, legalize will resort to using simple loads and stores.
965  virtual SDOperand
966  EmitTargetCodeForMemcpy(SelectionDAG &DAG,
967                          SDOperand Chain,
968                          SDOperand Op1, SDOperand Op2,
969                          SDOperand Op3, unsigned Align,
970                          bool AlwaysInline,
971                          const Value *DstSV, uint64_t DstOff,
972                          const Value *SrcSV, uint64_t SrcOff) {
973    return SDOperand();
974  }
975
976  /// EmitTargetCodeForMemmove - Emit target-specific code that performs a
977  /// memmove. This can be used by targets to provide code sequences for cases
978  /// that don't fit the target's parameters for simple loads/stores and can be
979  /// more efficient than using a library call. This function can return a null
980  /// SDOperand if the target declines to use custom code and a different
981  /// lowering strategy should be used.
982  virtual SDOperand
983  EmitTargetCodeForMemmove(SelectionDAG &DAG,
984                           SDOperand Chain,
985                           SDOperand Op1, SDOperand Op2,
986                           SDOperand Op3, unsigned Align,
987                           const Value *DstSV, uint64_t DstOff,
988                           const Value *SrcSV, uint64_t SrcOff) {
989    return SDOperand();
990  }
991
992  /// EmitTargetCodeForMemset - Emit target-specific code that performs a
993  /// memset. This can be used by targets to provide code sequences for cases
994  /// that don't fit the target's parameters for simple stores and can be more
995  /// efficient than using a library call. This function can return a null
996  /// SDOperand if the target declines to use custom code and a different
997  /// lowering strategy should be used.
998  virtual SDOperand
999  EmitTargetCodeForMemset(SelectionDAG &DAG,
1000                          SDOperand Chain,
1001                          SDOperand Op1, SDOperand Op2,
1002                          SDOperand Op3, unsigned Align,
1003                          const Value *DstSV, uint64_t DstOff) {
1004    return SDOperand();
1005  }
1006
1007  /// LowerOperation - This callback is invoked for operations that are
1008  /// unsupported by the target, which are registered to use 'custom' lowering,
1009  /// and whose defined values are all legal.
1010  /// If the target has no operations that require custom lowering, it need not
1011  /// implement this.  The default implementation of this aborts.
1012  virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
1013
1014  /// ExpandOperationResult - This callback is invoked for operations that are
1015  /// unsupported by the target, which are registered to use 'custom' lowering,
1016  /// and whose result type needs to be expanded.  This must return a node whose
1017  /// results precisely match the results of the input node.  This typically
1018  /// involves a MERGE_VALUES node and/or BUILD_PAIR.
1019  ///
1020  /// If the target has no operations that require custom lowering, it need not
1021  /// implement this.  The default implementation of this aborts.
1022  virtual SDNode *ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
1023    assert(0 && "ExpandOperationResult not implemented for this target!");
1024    return 0;
1025  }
1026
1027  /// IsEligibleForTailCallOptimization - Check whether the call is eligible for
1028  /// tail call optimization. Targets which want to do tail call optimization
1029  /// should override this function.
1030  virtual bool IsEligibleForTailCallOptimization(SDOperand Call,
1031                                                 SDOperand Ret,
1032                                                 SelectionDAG &DAG) const {
1033    return false;
1034  }
1035
1036  /// CheckTailCallReturnConstraints - Check whether CALL node immediatly
1037  /// preceeds the RET node and whether the return uses the result of the node
1038  /// or is a void return. This function can be used by the target to determine
1039  /// eligiblity of tail call optimization.
1040  static bool CheckTailCallReturnConstraints(SDOperand Call, SDOperand Ret) {
1041    unsigned NumOps = Ret.getNumOperands();
1042    if ((NumOps == 1 &&
1043       (Ret.getOperand(0) == SDOperand(Call.Val,1) ||
1044        Ret.getOperand(0) == SDOperand(Call.Val,0))) ||
1045      (NumOps > 1 &&
1046       Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) &&
1047       Ret.getOperand(1) == SDOperand(Call.Val,0)))
1048      return true;
1049    return false;
1050  }
1051
1052  /// GetPossiblePreceedingTailCall - Get preceeding TailCallNodeOpCode node if
1053  /// it exists skip possible ISD:TokenFactor.
1054  static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain,
1055                                                 unsigned TailCallNodeOpCode) {
1056    if (Chain.getOpcode() == TailCallNodeOpCode) {
1057      return Chain;
1058    } else if (Chain.getOpcode() == ISD::TokenFactor) {
1059      if (Chain.getNumOperands() &&
1060          Chain.getOperand(0).getOpcode() == TailCallNodeOpCode)
1061        return Chain.getOperand(0);
1062    }
1063    return Chain;
1064  }
1065
1066  /// CustomPromoteOperation - This callback is invoked for operations that are
1067  /// unsupported by the target, are registered to use 'custom' lowering, and
1068  /// whose type needs to be promoted.
1069  virtual SDOperand CustomPromoteOperation(SDOperand Op, SelectionDAG &DAG);
1070
1071  /// getTargetNodeName() - This method returns the name of a target specific
1072  /// DAG node.
1073  virtual const char *getTargetNodeName(unsigned Opcode) const;
1074
1075  //===--------------------------------------------------------------------===//
1076  // Inline Asm Support hooks
1077  //
1078
1079  enum ConstraintType {
1080    C_Register,            // Constraint represents a single register.
1081    C_RegisterClass,       // Constraint represents one or more registers.
1082    C_Memory,              // Memory constraint.
1083    C_Other,               // Something else.
1084    C_Unknown              // Unsupported constraint.
1085  };
1086
1087  /// AsmOperandInfo - This contains information for each constraint that we are
1088  /// lowering.
1089  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
1090    /// ConstraintCode - This contains the actual string for the code, like "m".
1091    std::string ConstraintCode;
1092
1093    /// ConstraintType - Information about the constraint code, e.g. Register,
1094    /// RegisterClass, Memory, Other, Unknown.
1095    TargetLowering::ConstraintType ConstraintType;
1096
1097    /// CallOperandval - If this is the result output operand or a
1098    /// clobber, this is null, otherwise it is the incoming operand to the
1099    /// CallInst.  This gets modified as the asm is processed.
1100    Value *CallOperandVal;
1101
1102    /// ConstraintVT - The ValueType for the operand value.
1103    MVT::ValueType ConstraintVT;
1104
1105    AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
1106      : InlineAsm::ConstraintInfo(info),
1107        ConstraintType(TargetLowering::C_Unknown),
1108        CallOperandVal(0), ConstraintVT(MVT::Other) {
1109    }
1110  };
1111
1112  /// ComputeConstraintToUse - Determines the constraint code and constraint
1113  /// type to use for the specific AsmOperandInfo, setting
1114  /// OpInfo.ConstraintCode and OpInfo.ConstraintType.  If the actual operand
1115  /// being passed in is available, it can be passed in as Op, otherwise an
1116  /// empty SDOperand can be passed.
1117  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
1118                                      SDOperand Op,
1119                                      SelectionDAG *DAG = 0) const;
1120
1121  /// getConstraintType - Given a constraint, return the type of constraint it
1122  /// is for this target.
1123  virtual ConstraintType getConstraintType(const std::string &Constraint) const;
1124
1125  /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
1126  /// return a list of registers that can be used to satisfy the constraint.
1127  /// This should only be used for C_RegisterClass constraints.
1128  virtual std::vector<unsigned>
1129  getRegClassForInlineAsmConstraint(const std::string &Constraint,
1130                                    MVT::ValueType VT) const;
1131
1132  /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g.
1133  /// {edx}), return the register number and the register class for the
1134  /// register.
1135  ///
1136  /// Given a register class constraint, like 'r', if this corresponds directly
1137  /// to an LLVM register class, return a register of 0 and the register class
1138  /// pointer.
1139  ///
1140  /// This should only be used for C_Register constraints.  On error,
1141  /// this returns a register number of 0 and a null register class pointer..
1142  virtual std::pair<unsigned, const TargetRegisterClass*>
1143    getRegForInlineAsmConstraint(const std::string &Constraint,
1144                                 MVT::ValueType VT) const;
1145
1146  /// LowerXConstraint - try to replace an X constraint, which matches anything,
1147  /// with another that has more specific requirements based on the type of the
1148  /// corresponding operand.  This returns null if there is no replacement to
1149  /// make.
1150  virtual const char *LowerXConstraint(MVT::ValueType ConstraintVT) const;
1151
1152  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
1153  /// vector.  If it is invalid, don't add anything to Ops.
1154  virtual void LowerAsmOperandForConstraint(SDOperand Op, char ConstraintLetter,
1155                                            std::vector<SDOperand> &Ops,
1156                                            SelectionDAG &DAG) const;
1157
1158  //===--------------------------------------------------------------------===//
1159  // Scheduler hooks
1160  //
1161
1162  // EmitInstrWithCustomInserter - This method should be implemented by targets
1163  // that mark instructions with the 'usesCustomDAGSchedInserter' flag.  These
1164  // instructions are special in various ways, which require special support to
1165  // insert.  The specified MachineInstr is created but not inserted into any
1166  // basic blocks, and the scheduler passes ownership of it to this method.
1167  virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
1168                                                        MachineBasicBlock *MBB);
1169
1170  //===--------------------------------------------------------------------===//
1171  // Addressing mode description hooks (used by LSR etc).
1172  //
1173
1174  /// AddrMode - This represents an addressing mode of:
1175  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1176  /// If BaseGV is null,  there is no BaseGV.
1177  /// If BaseOffs is zero, there is no base offset.
1178  /// If HasBaseReg is false, there is no base register.
1179  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
1180  /// no scale.
1181  ///
1182  struct AddrMode {
1183    GlobalValue *BaseGV;
1184    int64_t      BaseOffs;
1185    bool         HasBaseReg;
1186    int64_t      Scale;
1187    AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1188  };
1189
1190  /// isLegalAddressingMode - Return true if the addressing mode represented by
1191  /// AM is legal for this target, for a load/store of the specified type.
1192  /// TODO: Handle pre/postinc as well.
1193  virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const;
1194
1195  /// isTruncateFree - Return true if it's free to truncate a value of
1196  /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
1197  /// register EAX to i16 by referencing its sub-register AX.
1198  virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const {
1199    return false;
1200  }
1201
1202  virtual bool isTruncateFree(MVT::ValueType VT1, MVT::ValueType VT2) const {
1203    return false;
1204  }
1205
1206  //===--------------------------------------------------------------------===//
1207  // Div utility functions
1208  //
1209  SDOperand BuildSDIV(SDNode *N, SelectionDAG &DAG,
1210                      std::vector<SDNode*>* Created) const;
1211  SDOperand BuildUDIV(SDNode *N, SelectionDAG &DAG,
1212                      std::vector<SDNode*>* Created) const;
1213
1214
1215  //===--------------------------------------------------------------------===//
1216  // Runtime Library hooks
1217  //
1218
1219  /// setLibcallName - Rename the default libcall routine name for the specified
1220  /// libcall.
1221  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1222    LibcallRoutineNames[Call] = Name;
1223  }
1224
1225  /// getLibcallName - Get the libcall routine name for the specified libcall.
1226  ///
1227  const char *getLibcallName(RTLIB::Libcall Call) const {
1228    return LibcallRoutineNames[Call];
1229  }
1230
1231  /// setCmpLibcallCC - Override the default CondCode to be used to test the
1232  /// result of the comparison libcall against zero.
1233  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1234    CmpLibcallCCs[Call] = CC;
1235  }
1236
1237  /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of
1238  /// the comparison libcall against zero.
1239  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1240    return CmpLibcallCCs[Call];
1241  }
1242
1243private:
1244  TargetMachine &TM;
1245  const TargetData *TD;
1246
1247  /// IsLittleEndian - True if this is a little endian target.
1248  ///
1249  bool IsLittleEndian;
1250
1251  /// PointerTy - The type to use for pointers, usually i32 or i64.
1252  ///
1253  MVT::ValueType PointerTy;
1254
1255  /// UsesGlobalOffsetTable - True if this target uses a GOT for PIC codegen.
1256  ///
1257  bool UsesGlobalOffsetTable;
1258
1259  /// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever
1260  /// PointerTy is.
1261  MVT::ValueType ShiftAmountTy;
1262
1263  OutOfRangeShiftAmount ShiftAmtHandling;
1264
1265  /// SelectIsExpensive - Tells the code generator not to expand operations
1266  /// into sequences that use the select operations if possible.
1267  bool SelectIsExpensive;
1268
1269  /// IntDivIsCheap - Tells the code generator not to expand integer divides by
1270  /// constants into a sequence of muls, adds, and shifts.  This is a hack until
1271  /// a real cost model is in place.  If we ever optimize for size, this will be
1272  /// set to true unconditionally.
1273  bool IntDivIsCheap;
1274
1275  /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
1276  /// srl/add/sra for a signed divide by power of two, and let the target handle
1277  /// it.
1278  bool Pow2DivIsCheap;
1279
1280  /// SetCCResultContents - Information about the contents of the high-bits in
1281  /// the result of a setcc comparison operation.
1282  SetCCResultValue SetCCResultContents;
1283
1284  /// SchedPreferenceInfo - The target scheduling preference: shortest possible
1285  /// total cycles or lowest register usage.
1286  SchedPreference SchedPreferenceInfo;
1287
1288  /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
1289  /// llvm.setjmp.  Defaults to false.
1290  bool UseUnderscoreSetJmp;
1291
1292  /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement
1293  /// llvm.longjmp.  Defaults to false.
1294  bool UseUnderscoreLongJmp;
1295
1296  /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
1297  unsigned JumpBufSize;
1298
1299  /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
1300  /// buffers
1301  unsigned JumpBufAlignment;
1302
1303  /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be
1304  /// if-converted.
1305  unsigned IfCvtBlockSizeLimit;
1306
1307  /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be
1308  /// duplicated during if-conversion.
1309  unsigned IfCvtDupBlockSizeLimit;
1310
1311  /// PrefLoopAlignment - The perferred loop alignment.
1312  ///
1313  unsigned PrefLoopAlignment;
1314
1315  /// StackPointerRegisterToSaveRestore - If set to a physical register, this
1316  /// specifies the register that llvm.savestack/llvm.restorestack should save
1317  /// and restore.
1318  unsigned StackPointerRegisterToSaveRestore;
1319
1320  /// ExceptionPointerRegister - If set to a physical register, this specifies
1321  /// the register that receives the exception address on entry to a landing
1322  /// pad.
1323  unsigned ExceptionPointerRegister;
1324
1325  /// ExceptionSelectorRegister - If set to a physical register, this specifies
1326  /// the register that receives the exception typeid on entry to a landing
1327  /// pad.
1328  unsigned ExceptionSelectorRegister;
1329
1330  /// RegClassForVT - This indicates the default register class to use for
1331  /// each ValueType the target supports natively.
1332  TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1333  unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1334  MVT::ValueType RegisterTypeForVT[MVT::LAST_VALUETYPE];
1335
1336  /// TransformToType - For any value types we are promoting or expanding, this
1337  /// contains the value type that we are changing to.  For Expanded types, this
1338  /// contains one step of the expand (e.g. i64 -> i32), even if there are
1339  /// multiple steps required (e.g. i64 -> i16).  For types natively supported
1340  /// by the system, this holds the same type (e.g. i32 -> i32).
1341  MVT::ValueType TransformToType[MVT::LAST_VALUETYPE];
1342
1343  /// OpActions - For each operation and each value type, keep a LegalizeAction
1344  /// that indicates how instruction selection should deal with the operation.
1345  /// Most operations are Legal (aka, supported natively by the target), but
1346  /// operations that are not should be described.  Note that operations on
1347  /// non-legal value types are not described here.
1348  uint64_t OpActions[156];
1349
1350  /// LoadXActions - For each load of load extension type and each value type,
1351  /// keep a LegalizeAction that indicates how instruction selection should deal
1352  /// with the load.
1353  uint64_t LoadXActions[ISD::LAST_LOADX_TYPE];
1354
1355  /// TruncStoreActions - For each truncating store, keep a LegalizeAction that
1356  /// indicates how instruction selection should deal with the store.
1357  uint64_t TruncStoreActions[MVT::LAST_VALUETYPE];
1358
1359  /// IndexedModeActions - For each indexed mode and each value type, keep a
1360  /// pair of LegalizeAction that indicates how instruction selection should
1361  /// deal with the load / store.
1362  uint64_t IndexedModeActions[2][ISD::LAST_INDEXED_MODE];
1363
1364  /// ConvertActions - For each conversion from source type to destination type,
1365  /// keep a LegalizeAction that indicates how instruction selection should
1366  /// deal with the conversion.
1367  /// Currently, this is used only for floating->floating conversions
1368  /// (FP_EXTEND and FP_ROUND).
1369  uint64_t ConvertActions[MVT::LAST_VALUETYPE];
1370
1371  ValueTypeActionImpl ValueTypeActions;
1372
1373  std::vector<APFloat> LegalFPImmediates;
1374
1375  std::vector<std::pair<MVT::ValueType,
1376                        TargetRegisterClass*> > AvailableRegClasses;
1377
1378  /// TargetDAGCombineArray - Targets can specify ISD nodes that they would
1379  /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(),
1380  /// which sets a bit in this array.
1381  unsigned char TargetDAGCombineArray[160/(sizeof(unsigned char)*8)];
1382
1383  /// PromoteToType - For operations that must be promoted to a specific type,
1384  /// this holds the destination type.  This map should be sparse, so don't hold
1385  /// it as an array.
1386  ///
1387  /// Targets add entries to this map with AddPromotedToType(..), clients access
1388  /// this with getTypeToPromoteTo(..).
1389  std::map<std::pair<unsigned, MVT::ValueType>, MVT::ValueType> PromoteToType;
1390
1391  /// LibcallRoutineNames - Stores the name each libcall.
1392  ///
1393  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1394
1395  /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result
1396  /// of each of the comparison libcall against zero.
1397  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1398
1399protected:
1400  /// When lowering %llvm.memset this field specifies the maximum number of
1401  /// store operations that may be substituted for the call to memset. Targets
1402  /// must set this value based on the cost threshold for that target. Targets
1403  /// should assume that the memset will be done using as many of the largest
1404  /// store operations first, followed by smaller ones, if necessary, per
1405  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1406  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1407  /// store.  This only applies to setting a constant array of a constant size.
1408  /// @brief Specify maximum number of store instructions per memset call.
1409  unsigned maxStoresPerMemset;
1410
1411  /// When lowering %llvm.memcpy this field specifies the maximum number of
1412  /// store operations that may be substituted for a call to memcpy. Targets
1413  /// must set this value based on the cost threshold for that target. Targets
1414  /// should assume that the memcpy will be done using as many of the largest
1415  /// store operations first, followed by smaller ones, if necessary, per
1416  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1417  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1418  /// and one 1-byte store. This only applies to copying a constant array of
1419  /// constant size.
1420  /// @brief Specify maximum bytes of store instructions per memcpy call.
1421  unsigned maxStoresPerMemcpy;
1422
1423  /// When lowering %llvm.memmove this field specifies the maximum number of
1424  /// store instructions that may be substituted for a call to memmove. Targets
1425  /// must set this value based on the cost threshold for that target. Targets
1426  /// should assume that the memmove will be done using as many of the largest
1427  /// store operations first, followed by smaller ones, if necessary, per
1428  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
1429  /// with 8-bit alignment would result in nine 1-byte stores.  This only
1430  /// applies to copying a constant array of constant size.
1431  /// @brief Specify maximum bytes of store instructions per memmove call.
1432  unsigned maxStoresPerMemmove;
1433
1434  /// This field specifies whether the target machine permits unaligned memory
1435  /// accesses.  This is used, for example, to determine the size of store
1436  /// operations when copying small arrays and other similar tasks.
1437  /// @brief Indicate whether the target permits unaligned memory accesses.
1438  bool allowUnalignedMemoryAccesses;
1439};
1440} // end llvm namespace
1441
1442#endif
1443