TargetLowering.h revision 6fd599fa6916bd9438dbea7994cf2437bdf4ab8c
1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes how to lower LLVM code to machine code.  This has two
11// main components:
12//
13//  1. Which ValueTypes are natively supported by the target.
14//  2. Which operations are supported for supported ValueTypes.
15//  3. Cost thresholds for alternative implementations of certain operations.
16//
17// In addition it has a few other components, like information about FP
18// immediates.
19//
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_TARGET_TARGETLOWERING_H
23#define LLVM_TARGET_TARGETLOWERING_H
24
25#include "llvm/Constants.h"
26#include "llvm/InlineAsm.h"
27#include "llvm/CodeGen/SelectionDAGNodes.h"
28#include "llvm/CodeGen/RuntimeLibcalls.h"
29#include "llvm/ADT/APFloat.h"
30#include "llvm/ADT/STLExtras.h"
31#include <map>
32#include <vector>
33
34namespace llvm {
35  class Value;
36  class Function;
37  class TargetMachine;
38  class TargetData;
39  class TargetRegisterClass;
40  class SDNode;
41  class SDOperand;
42  class SelectionDAG;
43  class MachineBasicBlock;
44  class MachineInstr;
45  class VectorType;
46  class TargetSubtarget;
47
48//===----------------------------------------------------------------------===//
49/// TargetLowering - This class defines information used to lower LLVM code to
50/// legal SelectionDAG operators that the target instruction selector can accept
51/// natively.
52///
53/// This class also defines callbacks that targets must implement to lower
54/// target-specific constructs to SelectionDAG operators.
55///
56class TargetLowering {
57public:
58  /// LegalizeAction - This enum indicates whether operations are valid for a
59  /// target, and if not, what action should be used to make them valid.
60  enum LegalizeAction {
61    Legal,      // The target natively supports this operation.
62    Promote,    // This operation should be executed in a larger type.
63    Expand,     // Try to expand this to other ops, otherwise use a libcall.
64    Custom      // Use the LowerOperation hook to implement custom lowering.
65  };
66
67  enum OutOfRangeShiftAmount {
68    Undefined,  // Oversized shift amounts are undefined (default).
69    Mask,       // Shift amounts are auto masked (anded) to value size.
70    Extend      // Oversized shift pulls in zeros or sign bits.
71  };
72
73  enum SetCCResultValue {
74    UndefinedSetCCResult,          // SetCC returns a garbage/unknown extend.
75    ZeroOrOneSetCCResult,          // SetCC returns a zero extended result.
76    ZeroOrNegativeOneSetCCResult   // SetCC returns a sign extended result.
77  };
78
79  enum SchedPreference {
80    SchedulingForLatency,          // Scheduling for shortest total latency.
81    SchedulingForRegPressure       // Scheduling for lowest register pressure.
82  };
83
84  explicit TargetLowering(TargetMachine &TM);
85  virtual ~TargetLowering();
86
87  TargetMachine &getTargetMachine() const { return TM; }
88  const TargetData *getTargetData() const { return TD; }
89
90  bool isBigEndian() const { return !IsLittleEndian; }
91  bool isLittleEndian() const { return IsLittleEndian; }
92  MVT::ValueType getPointerTy() const { return PointerTy; }
93  MVT::ValueType getShiftAmountTy() const { return ShiftAmountTy; }
94  OutOfRangeShiftAmount getShiftAmountFlavor() const {return ShiftAmtHandling; }
95
96  /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC
97  /// codegen.
98  bool usesGlobalOffsetTable() const { return UsesGlobalOffsetTable; }
99
100  /// isSelectExpensive - Return true if the select operation is expensive for
101  /// this target.
102  bool isSelectExpensive() const { return SelectIsExpensive; }
103
104  /// isIntDivCheap() - Return true if integer divide is usually cheaper than
105  /// a sequence of several shifts, adds, and multiplies for this target.
106  bool isIntDivCheap() const { return IntDivIsCheap; }
107
108  /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
109  /// srl/add/sra.
110  bool isPow2DivCheap() const { return Pow2DivIsCheap; }
111
112  /// getSetCCResultTy - Return the ValueType of the result of setcc operations.
113  ///
114  MVT::ValueType getSetCCResultTy() const { return SetCCResultTy; }
115
116  /// getSetCCResultContents - For targets without boolean registers, this flag
117  /// returns information about the contents of the high-bits in the setcc
118  /// result register.
119  SetCCResultValue getSetCCResultContents() const { return SetCCResultContents;}
120
121  /// getSchedulingPreference - Return target scheduling preference.
122  SchedPreference getSchedulingPreference() const {
123    return SchedPreferenceInfo;
124  }
125
126  /// getRegClassFor - Return the register class that should be used for the
127  /// specified value type.  This may only be called on legal types.
128  TargetRegisterClass *getRegClassFor(MVT::ValueType VT) const {
129    assert(VT < array_lengthof(RegClassForVT));
130    TargetRegisterClass *RC = RegClassForVT[VT];
131    assert(RC && "This value type is not natively supported!");
132    return RC;
133  }
134
135  /// isTypeLegal - Return true if the target has native support for the
136  /// specified value type.  This means that it has a register that directly
137  /// holds it without promotions or expansions.
138  bool isTypeLegal(MVT::ValueType VT) const {
139    assert(MVT::isExtendedVT(VT) || VT < array_lengthof(RegClassForVT));
140    return !MVT::isExtendedVT(VT) && RegClassForVT[VT] != 0;
141  }
142
143  class ValueTypeActionImpl {
144    /// ValueTypeActions - This is a bitvector that contains two bits for each
145    /// value type, where the two bits correspond to the LegalizeAction enum.
146    /// This can be queried with "getTypeAction(VT)".
147    uint32_t ValueTypeActions[2];
148  public:
149    ValueTypeActionImpl() {
150      ValueTypeActions[0] = ValueTypeActions[1] = 0;
151    }
152    ValueTypeActionImpl(const ValueTypeActionImpl &RHS) {
153      ValueTypeActions[0] = RHS.ValueTypeActions[0];
154      ValueTypeActions[1] = RHS.ValueTypeActions[1];
155    }
156
157    LegalizeAction getTypeAction(MVT::ValueType VT) const {
158      if (MVT::isExtendedVT(VT)) {
159        if (MVT::isVector(VT)) return Expand;
160        if (MVT::isInteger(VT))
161          // First promote to a power-of-two size, then expand if necessary.
162          return VT == MVT::RoundIntegerType(VT) ? Expand : Promote;
163        assert(0 && "Unsupported extended type!");
164      }
165      assert(VT<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
166      return (LegalizeAction)((ValueTypeActions[VT>>4] >> ((2*VT) & 31)) & 3);
167    }
168    void setTypeAction(MVT::ValueType VT, LegalizeAction Action) {
169      assert(VT<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
170      ValueTypeActions[VT>>4] |= Action << ((VT*2) & 31);
171    }
172  };
173
174  const ValueTypeActionImpl &getValueTypeActions() const {
175    return ValueTypeActions;
176  }
177
178  /// getTypeAction - Return how we should legalize values of this type, either
179  /// it is already legal (return 'Legal') or we need to promote it to a larger
180  /// type (return 'Promote'), or we need to expand it into multiple registers
181  /// of smaller integer type (return 'Expand').  'Custom' is not an option.
182  LegalizeAction getTypeAction(MVT::ValueType VT) const {
183    return ValueTypeActions.getTypeAction(VT);
184  }
185
186  /// getTypeToTransformTo - For types supported by the target, this is an
187  /// identity function.  For types that must be promoted to larger types, this
188  /// returns the larger type to promote to.  For integer types that are larger
189  /// than the largest integer register, this contains one step in the expansion
190  /// to get to the smaller register. For illegal floating point types, this
191  /// returns the integer type to transform to.
192  MVT::ValueType getTypeToTransformTo(MVT::ValueType VT) const {
193    if (!MVT::isExtendedVT(VT)) {
194      assert(VT < array_lengthof(TransformToType));
195      MVT::ValueType NVT = TransformToType[VT];
196      assert(getTypeAction(NVT) != Promote &&
197             "Promote may not follow Expand or Promote");
198      return NVT;
199    }
200
201    if (MVT::isVector(VT))
202      return MVT::getVectorType(MVT::getVectorElementType(VT),
203                                MVT::getVectorNumElements(VT) / 2);
204    if (MVT::isInteger(VT)) {
205      MVT::ValueType NVT = MVT::RoundIntegerType(VT);
206      if (NVT == VT)
207        // Size is a power of two - expand to half the size.
208        return MVT::getIntegerType(MVT::getSizeInBits(VT) / 2);
209      else
210        // Promote to a power of two size, avoiding multi-step promotion.
211        return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT;
212    }
213    assert(0 && "Unsupported extended type!");
214  }
215
216  /// getTypeToExpandTo - For types supported by the target, this is an
217  /// identity function.  For types that must be expanded (i.e. integer types
218  /// that are larger than the largest integer register or illegal floating
219  /// point types), this returns the largest legal type it will be expanded to.
220  MVT::ValueType getTypeToExpandTo(MVT::ValueType VT) const {
221    assert(!MVT::isVector(VT));
222    while (true) {
223      switch (getTypeAction(VT)) {
224      case Legal:
225        return VT;
226      case Expand:
227        VT = getTypeToTransformTo(VT);
228        break;
229      default:
230        assert(false && "Type is not legal nor is it to be expanded!");
231        return VT;
232      }
233    }
234    return VT;
235  }
236
237  /// getVectorTypeBreakdown - Vector types are broken down into some number of
238  /// legal first class types.  For example, MVT::v8f32 maps to 2 MVT::v4f32
239  /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
240  /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
241  ///
242  /// This method returns the number of registers needed, and the VT for each
243  /// register.  It also returns the VT and quantity of the intermediate values
244  /// before they are promoted/expanded.
245  ///
246  unsigned getVectorTypeBreakdown(MVT::ValueType VT,
247                                  MVT::ValueType &IntermediateVT,
248                                  unsigned &NumIntermediates,
249                                  MVT::ValueType &RegisterVT) const;
250
251  typedef std::vector<APFloat>::const_iterator legal_fpimm_iterator;
252  legal_fpimm_iterator legal_fpimm_begin() const {
253    return LegalFPImmediates.begin();
254  }
255  legal_fpimm_iterator legal_fpimm_end() const {
256    return LegalFPImmediates.end();
257  }
258
259  /// isShuffleMaskLegal - Targets can use this to indicate that they only
260  /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
261  /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
262  /// are assumed to be legal.
263  virtual bool isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
264    return true;
265  }
266
267  /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
268  /// used by Targets can use this to indicate if there is a suitable
269  /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
270  /// pool entry.
271  virtual bool isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
272                                      MVT::ValueType EVT,
273                                      SelectionDAG &DAG) const {
274    return false;
275  }
276
277  /// getOperationAction - Return how this operation should be treated: either
278  /// it is legal, needs to be promoted to a larger size, needs to be
279  /// expanded to some other code sequence, or the target has a custom expander
280  /// for it.
281  LegalizeAction getOperationAction(unsigned Op, MVT::ValueType VT) const {
282    if (MVT::isExtendedVT(VT)) return Expand;
283    assert(Op < array_lengthof(OpActions) &&
284           VT < sizeof(OpActions[0])*4 && "Table isn't big enough!");
285    return (LegalizeAction)((OpActions[Op] >> (2*VT)) & 3);
286  }
287
288  /// isOperationLegal - Return true if the specified operation is legal on this
289  /// target.
290  bool isOperationLegal(unsigned Op, MVT::ValueType VT) const {
291    return getOperationAction(Op, VT) == Legal ||
292           getOperationAction(Op, VT) == Custom;
293  }
294
295  /// getLoadXAction - Return how this load with extension should be treated:
296  /// either it is legal, needs to be promoted to a larger size, needs to be
297  /// expanded to some other code sequence, or the target has a custom expander
298  /// for it.
299  LegalizeAction getLoadXAction(unsigned LType, MVT::ValueType VT) const {
300    assert(LType < array_lengthof(LoadXActions) &&
301           VT < sizeof(LoadXActions[0])*4 && "Table isn't big enough!");
302    return (LegalizeAction)((LoadXActions[LType] >> (2*VT)) & 3);
303  }
304
305  /// isLoadXLegal - Return true if the specified load with extension is legal
306  /// on this target.
307  bool isLoadXLegal(unsigned LType, MVT::ValueType VT) const {
308    return !MVT::isExtendedVT(VT) &&
309      (getLoadXAction(LType, VT) == Legal ||
310       getLoadXAction(LType, VT) == Custom);
311  }
312
313  /// getTruncStoreAction - Return how this store with truncation should be
314  /// treated: either it is legal, needs to be promoted to a larger size, needs
315  /// to be expanded to some other code sequence, or the target has a custom
316  /// expander for it.
317  LegalizeAction getTruncStoreAction(MVT::ValueType ValVT,
318                                     MVT::ValueType MemVT) const {
319    assert(ValVT < array_lengthof(TruncStoreActions) &&
320           MemVT < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!");
321    return (LegalizeAction)((TruncStoreActions[ValVT] >> (2*MemVT)) & 3);
322  }
323
324  /// isTruncStoreLegal - Return true if the specified store with truncation is
325  /// legal on this target.
326  bool isTruncStoreLegal(MVT::ValueType ValVT, MVT::ValueType MemVT) const {
327    return !MVT::isExtendedVT(MemVT) &&
328      (getTruncStoreAction(ValVT, MemVT) == Legal ||
329       getTruncStoreAction(ValVT, MemVT) == Custom);
330  }
331
332  /// getIndexedLoadAction - Return how the indexed load should be treated:
333  /// either it is legal, needs to be promoted to a larger size, needs to be
334  /// expanded to some other code sequence, or the target has a custom expander
335  /// for it.
336  LegalizeAction
337  getIndexedLoadAction(unsigned IdxMode, MVT::ValueType VT) const {
338    assert(IdxMode < array_lengthof(IndexedModeActions[0]) &&
339           VT < sizeof(IndexedModeActions[0][0])*4 &&
340           "Table isn't big enough!");
341    return (LegalizeAction)((IndexedModeActions[0][IdxMode] >> (2*VT)) & 3);
342  }
343
344  /// isIndexedLoadLegal - Return true if the specified indexed load is legal
345  /// on this target.
346  bool isIndexedLoadLegal(unsigned IdxMode, MVT::ValueType VT) const {
347    return getIndexedLoadAction(IdxMode, VT) == Legal ||
348           getIndexedLoadAction(IdxMode, VT) == Custom;
349  }
350
351  /// getIndexedStoreAction - Return how the indexed store should be treated:
352  /// either it is legal, needs to be promoted to a larger size, needs to be
353  /// expanded to some other code sequence, or the target has a custom expander
354  /// for it.
355  LegalizeAction
356  getIndexedStoreAction(unsigned IdxMode, MVT::ValueType VT) const {
357    assert(IdxMode < array_lengthof(IndexedModeActions[1]) &&
358           VT < sizeof(IndexedModeActions[1][0])*4 &&
359           "Table isn't big enough!");
360    return (LegalizeAction)((IndexedModeActions[1][IdxMode] >> (2*VT)) & 3);
361  }
362
363  /// isIndexedStoreLegal - Return true if the specified indexed load is legal
364  /// on this target.
365  bool isIndexedStoreLegal(unsigned IdxMode, MVT::ValueType VT) const {
366    return getIndexedStoreAction(IdxMode, VT) == Legal ||
367           getIndexedStoreAction(IdxMode, VT) == Custom;
368  }
369
370  /// getConvertAction - Return how the conversion should be treated:
371  /// either it is legal, needs to be promoted to a larger size, needs to be
372  /// expanded to some other code sequence, or the target has a custom expander
373  /// for it.
374  LegalizeAction
375  getConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT) const {
376    assert(FromVT < array_lengthof(ConvertActions) &&
377           ToVT < sizeof(ConvertActions[0])*4 && "Table isn't big enough!");
378    return (LegalizeAction)((ConvertActions[FromVT] >> (2*ToVT)) & 3);
379  }
380
381  /// isConvertLegal - Return true if the specified conversion is legal
382  /// on this target.
383  bool isConvertLegal(MVT::ValueType FromVT, MVT::ValueType ToVT) const {
384    return getConvertAction(FromVT, ToVT) == Legal ||
385           getConvertAction(FromVT, ToVT) == Custom;
386  }
387
388  /// getTypeToPromoteTo - If the action for this operation is to promote, this
389  /// method returns the ValueType to promote to.
390  MVT::ValueType getTypeToPromoteTo(unsigned Op, MVT::ValueType VT) const {
391    assert(getOperationAction(Op, VT) == Promote &&
392           "This operation isn't promoted!");
393
394    // See if this has an explicit type specified.
395    std::map<std::pair<unsigned, MVT::ValueType>,
396             MVT::ValueType>::const_iterator PTTI =
397      PromoteToType.find(std::make_pair(Op, VT));
398    if (PTTI != PromoteToType.end()) return PTTI->second;
399
400    assert((MVT::isInteger(VT) || MVT::isFloatingPoint(VT)) &&
401           "Cannot autopromote this type, add it with AddPromotedToType.");
402
403    MVT::ValueType NVT = VT;
404    do {
405      NVT = (MVT::ValueType)(NVT+1);
406      assert(MVT::isInteger(NVT) == MVT::isInteger(VT) && NVT != MVT::isVoid &&
407             "Didn't find type to promote to!");
408    } while (!isTypeLegal(NVT) ||
409              getOperationAction(Op, NVT) == Promote);
410    return NVT;
411  }
412
413  /// getValueType - Return the MVT::ValueType corresponding to this LLVM type.
414  /// This is fixed by the LLVM operations except for the pointer size.  If
415  /// AllowUnknown is true, this will return MVT::Other for types with no MVT
416  /// counterpart (e.g. structs), otherwise it will assert.
417  MVT::ValueType getValueType(const Type *Ty, bool AllowUnknown = false) const {
418    MVT::ValueType VT = MVT::getValueType(Ty, AllowUnknown);
419    return VT == MVT::iPTR ? PointerTy : VT;
420  }
421
422  /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
423  /// function arguments in the caller parameter area.  This is the actual
424  /// alignment, not its logarithm.
425  virtual unsigned getByValTypeAlignment(const Type *Ty) const;
426
427  /// getRegisterType - Return the type of registers that this ValueType will
428  /// eventually require.
429  MVT::ValueType getRegisterType(MVT::ValueType VT) const {
430    if (!MVT::isExtendedVT(VT)) {
431      assert(VT < array_lengthof(RegisterTypeForVT));
432      return RegisterTypeForVT[VT];
433    }
434    if (MVT::isVector(VT)) {
435      MVT::ValueType VT1, RegisterVT;
436      unsigned NumIntermediates;
437      (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT);
438      return RegisterVT;
439    }
440    if (MVT::isInteger(VT)) {
441      return getRegisterType(getTypeToTransformTo(VT));
442    }
443    assert(0 && "Unsupported extended type!");
444  }
445
446  /// getNumRegisters - Return the number of registers that this ValueType will
447  /// eventually require.  This is one for any types promoted to live in larger
448  /// registers, but may be more than one for types (like i64) that are split
449  /// into pieces.  For types like i140, which are first promoted then expanded,
450  /// it is the number of registers needed to hold all the bits of the original
451  /// type.  For an i140 on a 32 bit machine this means 5 registers.
452  unsigned getNumRegisters(MVT::ValueType VT) const {
453    if (!MVT::isExtendedVT(VT)) {
454      assert(VT < array_lengthof(NumRegistersForVT));
455      return NumRegistersForVT[VT];
456    }
457    if (MVT::isVector(VT)) {
458      MVT::ValueType VT1, VT2;
459      unsigned NumIntermediates;
460      return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2);
461    }
462    if (MVT::isInteger(VT)) {
463      unsigned BitWidth = MVT::getSizeInBits(VT);
464      unsigned RegWidth = MVT::getSizeInBits(getRegisterType(VT));
465      return (BitWidth + RegWidth - 1) / RegWidth;
466    }
467    assert(0 && "Unsupported extended type!");
468  }
469
470  /// ShouldShrinkFPConstant - If true, then instruction selection should
471  /// seek to shrink the FP constant of the specified type to a smaller type
472  /// in order to save space and / or reduce runtime.
473  virtual bool ShouldShrinkFPConstant(MVT::ValueType VT) const { return true; }
474
475  /// hasTargetDAGCombine - If true, the target has custom DAG combine
476  /// transformations that it can perform for the specified node.
477  bool hasTargetDAGCombine(ISD::NodeType NT) const {
478    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
479    return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
480  }
481
482  /// This function returns the maximum number of store operations permitted
483  /// to replace a call to llvm.memset. The value is set by the target at the
484  /// performance threshold for such a replacement.
485  /// @brief Get maximum # of store operations permitted for llvm.memset
486  unsigned getMaxStoresPerMemset() const { return maxStoresPerMemset; }
487
488  /// This function returns the maximum number of store operations permitted
489  /// to replace a call to llvm.memcpy. The value is set by the target at the
490  /// performance threshold for such a replacement.
491  /// @brief Get maximum # of store operations permitted for llvm.memcpy
492  unsigned getMaxStoresPerMemcpy() const { return maxStoresPerMemcpy; }
493
494  /// This function returns the maximum number of store operations permitted
495  /// to replace a call to llvm.memmove. The value is set by the target at the
496  /// performance threshold for such a replacement.
497  /// @brief Get maximum # of store operations permitted for llvm.memmove
498  unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; }
499
500  /// This function returns true if the target allows unaligned memory accesses.
501  /// This is used, for example, in situations where an array copy/move/set is
502  /// converted to a sequence of store operations. It's use helps to ensure that
503  /// such replacements don't generate code that causes an alignment error
504  /// (trap) on the target machine.
505  /// @brief Determine if the target supports unaligned memory accesses.
506  bool allowsUnalignedMemoryAccesses() const {
507    return allowUnalignedMemoryAccesses;
508  }
509
510  /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
511  /// to implement llvm.setjmp.
512  bool usesUnderscoreSetJmp() const {
513    return UseUnderscoreSetJmp;
514  }
515
516  /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp
517  /// to implement llvm.longjmp.
518  bool usesUnderscoreLongJmp() const {
519    return UseUnderscoreLongJmp;
520  }
521
522  /// getStackPointerRegisterToSaveRestore - If a physical register, this
523  /// specifies the register that llvm.savestack/llvm.restorestack should save
524  /// and restore.
525  unsigned getStackPointerRegisterToSaveRestore() const {
526    return StackPointerRegisterToSaveRestore;
527  }
528
529  /// getExceptionAddressRegister - If a physical register, this returns
530  /// the register that receives the exception address on entry to a landing
531  /// pad.
532  unsigned getExceptionAddressRegister() const {
533    return ExceptionPointerRegister;
534  }
535
536  /// getExceptionSelectorRegister - If a physical register, this returns
537  /// the register that receives the exception typeid on entry to a landing
538  /// pad.
539  unsigned getExceptionSelectorRegister() const {
540    return ExceptionSelectorRegister;
541  }
542
543  /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never
544  /// set, the default is 200)
545  unsigned getJumpBufSize() const {
546    return JumpBufSize;
547  }
548
549  /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes
550  /// (if never set, the default is 0)
551  unsigned getJumpBufAlignment() const {
552    return JumpBufAlignment;
553  }
554
555  /// getIfCvtBlockLimit - returns the target specific if-conversion block size
556  /// limit. Any block whose size is greater should not be predicated.
557  unsigned getIfCvtBlockSizeLimit() const {
558    return IfCvtBlockSizeLimit;
559  }
560
561  /// getIfCvtDupBlockLimit - returns the target specific size limit for a
562  /// block to be considered for duplication. Any block whose size is greater
563  /// should not be duplicated to facilitate its predication.
564  unsigned getIfCvtDupBlockSizeLimit() const {
565    return IfCvtDupBlockSizeLimit;
566  }
567
568  /// getPrefLoopAlignment - return the preferred loop alignment.
569  ///
570  unsigned getPrefLoopAlignment() const {
571    return PrefLoopAlignment;
572  }
573
574  /// getPreIndexedAddressParts - returns true by value, base pointer and
575  /// offset pointer and addressing mode by reference if the node's address
576  /// can be legally represented as pre-indexed load / store address.
577  virtual bool getPreIndexedAddressParts(SDNode *N, SDOperand &Base,
578                                         SDOperand &Offset,
579                                         ISD::MemIndexedMode &AM,
580                                         SelectionDAG &DAG) {
581    return false;
582  }
583
584  /// getPostIndexedAddressParts - returns true by value, base pointer and
585  /// offset pointer and addressing mode by reference if this node can be
586  /// combined with a load / store to form a post-indexed load / store.
587  virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
588                                          SDOperand &Base, SDOperand &Offset,
589                                          ISD::MemIndexedMode &AM,
590                                          SelectionDAG &DAG) {
591    return false;
592  }
593
594  /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
595  /// jumptable.
596  virtual SDOperand getPICJumpTableRelocBase(SDOperand Table,
597                                             SelectionDAG &DAG) const;
598
599  //===--------------------------------------------------------------------===//
600  // TargetLowering Optimization Methods
601  //
602
603  /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
604  /// SDOperands for returning information from TargetLowering to its clients
605  /// that want to combine
606  struct TargetLoweringOpt {
607    SelectionDAG &DAG;
608    bool AfterLegalize;
609    SDOperand Old;
610    SDOperand New;
611
612    explicit TargetLoweringOpt(SelectionDAG &InDAG, bool afterLegalize)
613      : DAG(InDAG), AfterLegalize(afterLegalize) {}
614
615    bool CombineTo(SDOperand O, SDOperand N) {
616      Old = O;
617      New = N;
618      return true;
619    }
620
621    /// ShrinkDemandedConstant - Check to see if the specified operand of the
622    /// specified instruction is a constant integer.  If so, check to see if
623    /// there are any bits set in the constant that are not demanded.  If so,
624    /// shrink the constant and return true.
625    bool ShrinkDemandedConstant(SDOperand Op, const APInt &Demanded);
626  };
627
628  /// SimplifyDemandedBits - Look at Op.  At this point, we know that only the
629  /// DemandedMask bits of the result of Op are ever used downstream.  If we can
630  /// use this information to simplify Op, create a new simplified DAG node and
631  /// return true, returning the original and new nodes in Old and New.
632  /// Otherwise, analyze the expression and return a mask of KnownOne and
633  /// KnownZero bits for the expression (used to simplify the caller).
634  /// The KnownZero/One bits may only be accurate for those bits in the
635  /// DemandedMask.
636  bool SimplifyDemandedBits(SDOperand Op, const APInt &DemandedMask,
637                            APInt &KnownZero, APInt &KnownOne,
638                            TargetLoweringOpt &TLO, unsigned Depth = 0) const;
639
640  /// computeMaskedBitsForTargetNode - Determine which of the bits specified in
641  /// Mask are known to be either zero or one and return them in the
642  /// KnownZero/KnownOne bitsets.
643  virtual void computeMaskedBitsForTargetNode(const SDOperand Op,
644                                              const APInt &Mask,
645                                              APInt &KnownZero,
646                                              APInt &KnownOne,
647                                              const SelectionDAG &DAG,
648                                              unsigned Depth = 0) const;
649
650  /// ComputeNumSignBitsForTargetNode - This method can be implemented by
651  /// targets that want to expose additional information about sign bits to the
652  /// DAG Combiner.
653  virtual unsigned ComputeNumSignBitsForTargetNode(SDOperand Op,
654                                                   unsigned Depth = 0) const;
655
656  struct DAGCombinerInfo {
657    void *DC;  // The DAG Combiner object.
658    bool BeforeLegalize;
659    bool CalledByLegalizer;
660  public:
661    SelectionDAG &DAG;
662
663    DAGCombinerInfo(SelectionDAG &dag, bool bl, bool cl, void *dc)
664      : DC(dc), BeforeLegalize(bl), CalledByLegalizer(cl), DAG(dag) {}
665
666    bool isBeforeLegalize() const { return BeforeLegalize; }
667    bool isCalledByLegalizer() const { return CalledByLegalizer; }
668
669    void AddToWorklist(SDNode *N);
670    SDOperand CombineTo(SDNode *N, const std::vector<SDOperand> &To);
671    SDOperand CombineTo(SDNode *N, SDOperand Res);
672    SDOperand CombineTo(SDNode *N, SDOperand Res0, SDOperand Res1);
673  };
674
675  /// SimplifySetCC - Try to simplify a setcc built with the specified operands
676  /// and cc. If it is unable to simplify it, return a null SDOperand.
677  SDOperand SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1,
678                          ISD::CondCode Cond, bool foldBooleans,
679                          DAGCombinerInfo &DCI) const;
680
681  /// PerformDAGCombine - This method will be invoked for all target nodes and
682  /// for any target-independent nodes that the target has registered with
683  /// invoke it for.
684  ///
685  /// The semantics are as follows:
686  /// Return Value:
687  ///   SDOperand.Val == 0   - No change was made
688  ///   SDOperand.Val == N   - N was replaced, is dead, and is already handled.
689  ///   otherwise            - N should be replaced by the returned Operand.
690  ///
691  /// In addition, methods provided by DAGCombinerInfo may be used to perform
692  /// more complex transformations.
693  ///
694  virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
695
696  //===--------------------------------------------------------------------===//
697  // TargetLowering Configuration Methods - These methods should be invoked by
698  // the derived class constructor to configure this object for the target.
699  //
700
701protected:
702  /// setUsesGlobalOffsetTable - Specify that this target does or doesn't use a
703  /// GOT for PC-relative code.
704  void setUsesGlobalOffsetTable(bool V) { UsesGlobalOffsetTable = V; }
705
706  /// setShiftAmountType - Describe the type that should be used for shift
707  /// amounts.  This type defaults to the pointer type.
708  void setShiftAmountType(MVT::ValueType VT) { ShiftAmountTy = VT; }
709
710  /// setSetCCResultType - Describe the type that shoudl be used as the result
711  /// of a setcc operation.  This defaults to the pointer type.
712  void setSetCCResultType(MVT::ValueType VT) { SetCCResultTy = VT; }
713
714  /// setSetCCResultContents - Specify how the target extends the result of a
715  /// setcc operation in a register.
716  void setSetCCResultContents(SetCCResultValue Ty) { SetCCResultContents = Ty; }
717
718  /// setSchedulingPreference - Specify the target scheduling preference.
719  void setSchedulingPreference(SchedPreference Pref) {
720    SchedPreferenceInfo = Pref;
721  }
722
723  /// setShiftAmountFlavor - Describe how the target handles out of range shift
724  /// amounts.
725  void setShiftAmountFlavor(OutOfRangeShiftAmount OORSA) {
726    ShiftAmtHandling = OORSA;
727  }
728
729  /// setUseUnderscoreSetJmp - Indicate whether this target prefers to
730  /// use _setjmp to implement llvm.setjmp or the non _ version.
731  /// Defaults to false.
732  void setUseUnderscoreSetJmp(bool Val) {
733    UseUnderscoreSetJmp = Val;
734  }
735
736  /// setUseUnderscoreLongJmp - Indicate whether this target prefers to
737  /// use _longjmp to implement llvm.longjmp or the non _ version.
738  /// Defaults to false.
739  void setUseUnderscoreLongJmp(bool Val) {
740    UseUnderscoreLongJmp = Val;
741  }
742
743  /// setStackPointerRegisterToSaveRestore - If set to a physical register, this
744  /// specifies the register that llvm.savestack/llvm.restorestack should save
745  /// and restore.
746  void setStackPointerRegisterToSaveRestore(unsigned R) {
747    StackPointerRegisterToSaveRestore = R;
748  }
749
750  /// setExceptionPointerRegister - If set to a physical register, this sets
751  /// the register that receives the exception address on entry to a landing
752  /// pad.
753  void setExceptionPointerRegister(unsigned R) {
754    ExceptionPointerRegister = R;
755  }
756
757  /// setExceptionSelectorRegister - If set to a physical register, this sets
758  /// the register that receives the exception typeid on entry to a landing
759  /// pad.
760  void setExceptionSelectorRegister(unsigned R) {
761    ExceptionSelectorRegister = R;
762  }
763
764  /// SelectIsExpensive - Tells the code generator not to expand operations
765  /// into sequences that use the select operations if possible.
766  void setSelectIsExpensive() { SelectIsExpensive = true; }
767
768  /// setIntDivIsCheap - Tells the code generator that integer divide is
769  /// expensive, and if possible, should be replaced by an alternate sequence
770  /// of instructions not containing an integer divide.
771  void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
772
773  /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
774  /// srl/add/sra for a signed divide by power of two, and let the target handle
775  /// it.
776  void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
777
778  /// addRegisterClass - Add the specified register class as an available
779  /// regclass for the specified value type.  This indicates the selector can
780  /// handle values of that class natively.
781  void addRegisterClass(MVT::ValueType VT, TargetRegisterClass *RC) {
782    assert(VT < array_lengthof(RegClassForVT));
783    AvailableRegClasses.push_back(std::make_pair(VT, RC));
784    RegClassForVT[VT] = RC;
785  }
786
787  /// computeRegisterProperties - Once all of the register classes are added,
788  /// this allows us to compute derived properties we expose.
789  void computeRegisterProperties();
790
791  /// setOperationAction - Indicate that the specified operation does not work
792  /// with the specified type and indicate what to do about it.
793  void setOperationAction(unsigned Op, MVT::ValueType VT,
794                          LegalizeAction Action) {
795    assert(VT < sizeof(OpActions[0])*4 && Op < array_lengthof(OpActions) &&
796           "Table isn't big enough!");
797    OpActions[Op] &= ~(uint64_t(3UL) << VT*2);
798    OpActions[Op] |= (uint64_t)Action << VT*2;
799  }
800
801  /// setLoadXAction - Indicate that the specified load with extension does not
802  /// work with the with specified type and indicate what to do about it.
803  void setLoadXAction(unsigned ExtType, MVT::ValueType VT,
804                      LegalizeAction Action) {
805    assert(VT < sizeof(LoadXActions[0])*4 &&
806           ExtType < array_lengthof(LoadXActions) &&
807           "Table isn't big enough!");
808    LoadXActions[ExtType] &= ~(uint64_t(3UL) << VT*2);
809    LoadXActions[ExtType] |= (uint64_t)Action << VT*2;
810  }
811
812  /// setTruncStoreAction - Indicate that the specified truncating store does
813  /// not work with the with specified type and indicate what to do about it.
814  void setTruncStoreAction(MVT::ValueType ValVT, MVT::ValueType MemVT,
815                           LegalizeAction Action) {
816    assert(ValVT < array_lengthof(TruncStoreActions) &&
817           MemVT < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!");
818    TruncStoreActions[ValVT] &= ~(uint64_t(3UL) << MemVT*2);
819    TruncStoreActions[ValVT] |= (uint64_t)Action << MemVT*2;
820  }
821
822  /// setIndexedLoadAction - Indicate that the specified indexed load does or
823  /// does not work with the with specified type and indicate what to do abort
824  /// it. NOTE: All indexed mode loads are initialized to Expand in
825  /// TargetLowering.cpp
826  void setIndexedLoadAction(unsigned IdxMode, MVT::ValueType VT,
827                            LegalizeAction Action) {
828    assert(VT < sizeof(IndexedModeActions[0])*4 && IdxMode <
829           array_lengthof(IndexedModeActions[0]) &&
830           "Table isn't big enough!");
831    IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT*2);
832    IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT*2;
833  }
834
835  /// setIndexedStoreAction - Indicate that the specified indexed store does or
836  /// does not work with the with specified type and indicate what to do about
837  /// it. NOTE: All indexed mode stores are initialized to Expand in
838  /// TargetLowering.cpp
839  void setIndexedStoreAction(unsigned IdxMode, MVT::ValueType VT,
840                             LegalizeAction Action) {
841    assert(VT < sizeof(IndexedModeActions[1][0])*4 &&
842           IdxMode < array_lengthof(IndexedModeActions[1]) &&
843           "Table isn't big enough!");
844    IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT*2);
845    IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT*2;
846  }
847
848  /// setConvertAction - Indicate that the specified conversion does or does
849  /// not work with the with specified type and indicate what to do about it.
850  void setConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT,
851                        LegalizeAction Action) {
852    assert(FromVT < array_lengthof(ConvertActions) &&
853           ToVT < sizeof(ConvertActions[0])*4 && "Table isn't big enough!");
854    ConvertActions[FromVT] &= ~(uint64_t(3UL) << ToVT*2);
855    ConvertActions[FromVT] |= (uint64_t)Action << ToVT*2;
856  }
857
858  /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
859  /// promotion code defaults to trying a larger integer/fp until it can find
860  /// one that works.  If that default is insufficient, this method can be used
861  /// by the target to override the default.
862  void AddPromotedToType(unsigned Opc, MVT::ValueType OrigVT,
863                         MVT::ValueType DestVT) {
864    PromoteToType[std::make_pair(Opc, OrigVT)] = DestVT;
865  }
866
867  /// addLegalFPImmediate - Indicate that this target can instruction select
868  /// the specified FP immediate natively.
869  void addLegalFPImmediate(const APFloat& Imm) {
870    LegalFPImmediates.push_back(Imm);
871  }
872
873  /// setTargetDAGCombine - Targets should invoke this method for each target
874  /// independent node that they want to provide a custom DAG combiner for by
875  /// implementing the PerformDAGCombine virtual method.
876  void setTargetDAGCombine(ISD::NodeType NT) {
877    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
878    TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
879  }
880
881  /// setJumpBufSize - Set the target's required jmp_buf buffer size (in
882  /// bytes); default is 200
883  void setJumpBufSize(unsigned Size) {
884    JumpBufSize = Size;
885  }
886
887  /// setJumpBufAlignment - Set the target's required jmp_buf buffer
888  /// alignment (in bytes); default is 0
889  void setJumpBufAlignment(unsigned Align) {
890    JumpBufAlignment = Align;
891  }
892
893  /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size
894  /// limit (in number of instructions); default is 2.
895  void setIfCvtBlockSizeLimit(unsigned Limit) {
896    IfCvtBlockSizeLimit = Limit;
897  }
898
899  /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number
900  /// of instructions) to be considered for code duplication during
901  /// if-conversion; default is 2.
902  void setIfCvtDupBlockSizeLimit(unsigned Limit) {
903    IfCvtDupBlockSizeLimit = Limit;
904  }
905
906  /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
907  /// alignment is zero, it means the target does not care about loop alignment.
908  void setPrefLoopAlignment(unsigned Align) {
909    PrefLoopAlignment = Align;
910  }
911
912public:
913
914  virtual const TargetSubtarget *getSubtarget() {
915    assert(0 && "Not Implemented");
916    return NULL;    // this is here to silence compiler errors
917  }
918  //===--------------------------------------------------------------------===//
919  // Lowering methods - These methods must be implemented by targets so that
920  // the SelectionDAGLowering code knows how to lower these.
921  //
922
923  /// LowerArguments - This hook must be implemented to indicate how we should
924  /// lower the arguments for the specified function, into the specified DAG.
925  virtual std::vector<SDOperand>
926  LowerArguments(Function &F, SelectionDAG &DAG);
927
928  /// LowerCallTo - This hook lowers an abstract call to a function into an
929  /// actual call.  This returns a pair of operands.  The first element is the
930  /// return value for the function (if RetTy is not VoidTy).  The second
931  /// element is the outgoing token chain.
932  struct ArgListEntry {
933    SDOperand Node;
934    const Type* Ty;
935    bool isSExt;
936    bool isZExt;
937    bool isInReg;
938    bool isSRet;
939    bool isNest;
940    bool isByVal;
941    uint16_t Alignment;
942
943    ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
944      isSRet(false), isNest(false), isByVal(false), Alignment(0) { }
945  };
946  typedef std::vector<ArgListEntry> ArgListTy;
947  virtual std::pair<SDOperand, SDOperand>
948  LowerCallTo(SDOperand Chain, const Type *RetTy, bool RetSExt, bool RetZExt,
949              bool isVarArg, unsigned CallingConv, bool isTailCall,
950              SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
951
952
953  virtual SDOperand LowerMEMCPY(SDOperand Op, SelectionDAG &DAG);
954  virtual SDOperand LowerMEMCPYCall(SDOperand Chain, SDOperand Dest,
955                                    SDOperand Source, SDOperand Count,
956                                    SelectionDAG &DAG);
957  virtual SDOperand LowerMEMCPYInline(SDOperand Chain, SDOperand Dest,
958                                      SDOperand Source, unsigned Size,
959                                      unsigned Align, SelectionDAG &DAG) {
960    assert(0 && "Not Implemented");
961    return SDOperand();   // this is here to silence compiler errors
962  }
963
964
965  /// LowerOperation - This callback is invoked for operations that are
966  /// unsupported by the target, which are registered to use 'custom' lowering,
967  /// and whose defined values are all legal.
968  /// If the target has no operations that require custom lowering, it need not
969  /// implement this.  The default implementation of this aborts.
970  virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
971
972  /// ExpandOperationResult - This callback is invoked for operations that are
973  /// unsupported by the target, which are registered to use 'custom' lowering,
974  /// and whose result type needs to be expanded.  This must return a node whose
975  /// results precisely match the results of the input node.  This typically
976  /// involves a MERGE_VALUES node and/or BUILD_PAIR.
977  ///
978  /// If the target has no operations that require custom lowering, it need not
979  /// implement this.  The default implementation of this aborts.
980  virtual SDNode *ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
981    assert(0 && "ExpandOperationResult not implemented for this target!");
982    return 0;
983  }
984
985  /// IsEligibleForTailCallOptimization - Check whether the call is eligible for
986  /// tail call optimization. Targets which want to do tail call optimization
987  /// should override this function.
988  virtual bool IsEligibleForTailCallOptimization(SDOperand Call,
989                                                 SDOperand Ret,
990                                                 SelectionDAG &DAG) const {
991    return false;
992  }
993
994  /// CustomPromoteOperation - This callback is invoked for operations that are
995  /// unsupported by the target, are registered to use 'custom' lowering, and
996  /// whose type needs to be promoted.
997  virtual SDOperand CustomPromoteOperation(SDOperand Op, SelectionDAG &DAG);
998
999  /// getTargetNodeName() - This method returns the name of a target specific
1000  /// DAG node.
1001  virtual const char *getTargetNodeName(unsigned Opcode) const;
1002
1003  //===--------------------------------------------------------------------===//
1004  // Inline Asm Support hooks
1005  //
1006
1007  enum ConstraintType {
1008    C_Register,            // Constraint represents a single register.
1009    C_RegisterClass,       // Constraint represents one or more registers.
1010    C_Memory,              // Memory constraint.
1011    C_Other,               // Something else.
1012    C_Unknown              // Unsupported constraint.
1013  };
1014
1015  /// AsmOperandInfo - This contains information for each constraint that we are
1016  /// lowering.
1017  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
1018    /// ConstraintCode - This contains the actual string for the code, like "m".
1019    std::string ConstraintCode;
1020
1021    /// ConstraintType - Information about the constraint code, e.g. Register,
1022    /// RegisterClass, Memory, Other, Unknown.
1023    TargetLowering::ConstraintType ConstraintType;
1024
1025    /// CallOperandval - If this is the result output operand or a
1026    /// clobber, this is null, otherwise it is the incoming operand to the
1027    /// CallInst.  This gets modified as the asm is processed.
1028    Value *CallOperandVal;
1029
1030    /// ConstraintVT - The ValueType for the operand value.
1031    MVT::ValueType ConstraintVT;
1032
1033    AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
1034      : InlineAsm::ConstraintInfo(info),
1035        ConstraintType(TargetLowering::C_Unknown),
1036        CallOperandVal(0), ConstraintVT(MVT::Other) {
1037    }
1038
1039    /// getConstraintGenerality - Return an integer indicating how general CT is.
1040    unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
1041      switch (CT) {
1042      default: assert(0 && "Unknown constraint type!");
1043      case TargetLowering::C_Other:
1044      case TargetLowering::C_Unknown:
1045        return 0;
1046      case TargetLowering::C_Register:
1047        return 1;
1048      case TargetLowering::C_RegisterClass:
1049        return 2;
1050      case TargetLowering::C_Memory:
1051        return 3;
1052      }
1053    }
1054
1055    /// ComputeConstraintToUse - Determines the constraint code and constraint
1056    /// type to use.
1057    void ComputeConstraintToUse(const TargetLowering &TLI) {
1058      assert(!Codes.empty() && "Must have at least one constraint");
1059
1060      std::string *Current = &Codes[0];
1061      TargetLowering::ConstraintType CurType = TLI.getConstraintType(*Current);
1062      if (Codes.size() == 1) {   // Single-letter constraints ('r') are very common.
1063        ConstraintCode = *Current;
1064        ConstraintType = CurType;
1065      } else {
1066        unsigned CurGenerality = getConstraintGenerality(CurType);
1067
1068        // If we have multiple constraints, try to pick the most general one ahead
1069        // of time.  This isn't a wonderful solution, but handles common cases.
1070        for (unsigned j = 1, e = Codes.size(); j != e; ++j) {
1071          TargetLowering::ConstraintType ThisType = TLI.getConstraintType(Codes[j]);
1072          unsigned ThisGenerality = getConstraintGenerality(ThisType);
1073          if (ThisGenerality > CurGenerality) {
1074            // This constraint letter is more general than the previous one,
1075            // use it.
1076            CurType = ThisType;
1077            Current = &Codes[j];
1078            CurGenerality = ThisGenerality;
1079          }
1080        }
1081
1082        ConstraintCode = *Current;
1083        ConstraintType = CurType;
1084      }
1085
1086      if (ConstraintCode == "X" && CallOperandVal) {
1087        if (isa<BasicBlock>(CallOperandVal) || isa<ConstantInt>(CallOperandVal))
1088          return;
1089        // This matches anything.  Labels and constants we handle elsewhere
1090        // ('X' is the only thing that matches labels).  Otherwise, try to
1091        // resolve it to something we know about by looking at the actual
1092        // operand type.
1093        std::string s = "";
1094        TLI.lowerXConstraint(ConstraintVT, s);
1095        if (s!="") {
1096          ConstraintCode = s;
1097          ConstraintType = TLI.getConstraintType(ConstraintCode);
1098        }
1099      }
1100    }
1101  };
1102
1103  /// getConstraintType - Given a constraint, return the type of constraint it
1104  /// is for this target.
1105  virtual ConstraintType getConstraintType(const std::string &Constraint) const;
1106
1107  /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
1108  /// return a list of registers that can be used to satisfy the constraint.
1109  /// This should only be used for C_RegisterClass constraints.
1110  virtual std::vector<unsigned>
1111  getRegClassForInlineAsmConstraint(const std::string &Constraint,
1112                                    MVT::ValueType VT) const;
1113
1114  /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g.
1115  /// {edx}), return the register number and the register class for the
1116  /// register.
1117  ///
1118  /// Given a register class constraint, like 'r', if this corresponds directly
1119  /// to an LLVM register class, return a register of 0 and the register class
1120  /// pointer.
1121  ///
1122  /// This should only be used for C_Register constraints.  On error,
1123  /// this returns a register number of 0 and a null register class pointer..
1124  virtual std::pair<unsigned, const TargetRegisterClass*>
1125    getRegForInlineAsmConstraint(const std::string &Constraint,
1126                                 MVT::ValueType VT) const;
1127
1128  /// LowerXConstraint - try to replace an X constraint, which matches anything,
1129  /// with another that has more specific requirements based on the type of the
1130  /// corresponding operand.
1131  virtual void lowerXConstraint(MVT::ValueType ConstraintVT,
1132                                std::string&) const;
1133
1134  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
1135  /// vector.  If it is invalid, don't add anything to Ops.
1136  virtual void LowerAsmOperandForConstraint(SDOperand Op, char ConstraintLetter,
1137                                            std::vector<SDOperand> &Ops,
1138                                            SelectionDAG &DAG);
1139
1140  //===--------------------------------------------------------------------===//
1141  // Scheduler hooks
1142  //
1143
1144  // EmitInstrWithCustomInserter - This method should be implemented by targets
1145  // that mark instructions with the 'usesCustomDAGSchedInserter' flag.  These
1146  // instructions are special in various ways, which require special support to
1147  // insert.  The specified MachineInstr is created but not inserted into any
1148  // basic blocks, and the scheduler passes ownership of it to this method.
1149  virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
1150                                                         MachineBasicBlock *MBB);
1151
1152  //===--------------------------------------------------------------------===//
1153  // Addressing mode description hooks (used by LSR etc).
1154  //
1155
1156  /// AddrMode - This represents an addressing mode of:
1157  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1158  /// If BaseGV is null,  there is no BaseGV.
1159  /// If BaseOffs is zero, there is no base offset.
1160  /// If HasBaseReg is false, there is no base register.
1161  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
1162  /// no scale.
1163  ///
1164  struct AddrMode {
1165    GlobalValue *BaseGV;
1166    int64_t      BaseOffs;
1167    bool         HasBaseReg;
1168    int64_t      Scale;
1169    AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1170  };
1171
1172  /// isLegalAddressingMode - Return true if the addressing mode represented by
1173  /// AM is legal for this target, for a load/store of the specified type.
1174  /// TODO: Handle pre/postinc as well.
1175  virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const;
1176
1177  /// isTruncateFree - Return true if it's free to truncate a value of
1178  /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
1179  /// register EAX to i16 by referencing its sub-register AX.
1180  virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const {
1181    return false;
1182  }
1183
1184  virtual bool isTruncateFree(MVT::ValueType VT1, MVT::ValueType VT2) const {
1185    return false;
1186  }
1187
1188  //===--------------------------------------------------------------------===//
1189  // Div utility functions
1190  //
1191  SDOperand BuildSDIV(SDNode *N, SelectionDAG &DAG,
1192                      std::vector<SDNode*>* Created) const;
1193  SDOperand BuildUDIV(SDNode *N, SelectionDAG &DAG,
1194                      std::vector<SDNode*>* Created) const;
1195
1196
1197  //===--------------------------------------------------------------------===//
1198  // Runtime Library hooks
1199  //
1200
1201  /// setLibcallName - Rename the default libcall routine name for the specified
1202  /// libcall.
1203  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1204    LibcallRoutineNames[Call] = Name;
1205  }
1206
1207  /// getLibcallName - Get the libcall routine name for the specified libcall.
1208  ///
1209  const char *getLibcallName(RTLIB::Libcall Call) const {
1210    return LibcallRoutineNames[Call];
1211  }
1212
1213  /// setCmpLibcallCC - Override the default CondCode to be used to test the
1214  /// result of the comparison libcall against zero.
1215  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1216    CmpLibcallCCs[Call] = CC;
1217  }
1218
1219  /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of
1220  /// the comparison libcall against zero.
1221  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1222    return CmpLibcallCCs[Call];
1223  }
1224
1225private:
1226  TargetMachine &TM;
1227  const TargetData *TD;
1228
1229  /// IsLittleEndian - True if this is a little endian target.
1230  ///
1231  bool IsLittleEndian;
1232
1233  /// PointerTy - The type to use for pointers, usually i32 or i64.
1234  ///
1235  MVT::ValueType PointerTy;
1236
1237  /// UsesGlobalOffsetTable - True if this target uses a GOT for PIC codegen.
1238  ///
1239  bool UsesGlobalOffsetTable;
1240
1241  /// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever
1242  /// PointerTy is.
1243  MVT::ValueType ShiftAmountTy;
1244
1245  OutOfRangeShiftAmount ShiftAmtHandling;
1246
1247  /// SelectIsExpensive - Tells the code generator not to expand operations
1248  /// into sequences that use the select operations if possible.
1249  bool SelectIsExpensive;
1250
1251  /// IntDivIsCheap - Tells the code generator not to expand integer divides by
1252  /// constants into a sequence of muls, adds, and shifts.  This is a hack until
1253  /// a real cost model is in place.  If we ever optimize for size, this will be
1254  /// set to true unconditionally.
1255  bool IntDivIsCheap;
1256
1257  /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
1258  /// srl/add/sra for a signed divide by power of two, and let the target handle
1259  /// it.
1260  bool Pow2DivIsCheap;
1261
1262  /// SetCCResultTy - The type that SetCC operations use.  This defaults to the
1263  /// PointerTy.
1264  MVT::ValueType SetCCResultTy;
1265
1266  /// SetCCResultContents - Information about the contents of the high-bits in
1267  /// the result of a setcc comparison operation.
1268  SetCCResultValue SetCCResultContents;
1269
1270  /// SchedPreferenceInfo - The target scheduling preference: shortest possible
1271  /// total cycles or lowest register usage.
1272  SchedPreference SchedPreferenceInfo;
1273
1274  /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
1275  /// llvm.setjmp.  Defaults to false.
1276  bool UseUnderscoreSetJmp;
1277
1278  /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement
1279  /// llvm.longjmp.  Defaults to false.
1280  bool UseUnderscoreLongJmp;
1281
1282  /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
1283  unsigned JumpBufSize;
1284
1285  /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
1286  /// buffers
1287  unsigned JumpBufAlignment;
1288
1289  /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be
1290  /// if-converted.
1291  unsigned IfCvtBlockSizeLimit;
1292
1293  /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be
1294  /// duplicated during if-conversion.
1295  unsigned IfCvtDupBlockSizeLimit;
1296
1297  /// PrefLoopAlignment - The perferred loop alignment.
1298  ///
1299  unsigned PrefLoopAlignment;
1300
1301  /// StackPointerRegisterToSaveRestore - If set to a physical register, this
1302  /// specifies the register that llvm.savestack/llvm.restorestack should save
1303  /// and restore.
1304  unsigned StackPointerRegisterToSaveRestore;
1305
1306  /// ExceptionPointerRegister - If set to a physical register, this specifies
1307  /// the register that receives the exception address on entry to a landing
1308  /// pad.
1309  unsigned ExceptionPointerRegister;
1310
1311  /// ExceptionSelectorRegister - If set to a physical register, this specifies
1312  /// the register that receives the exception typeid on entry to a landing
1313  /// pad.
1314  unsigned ExceptionSelectorRegister;
1315
1316  /// RegClassForVT - This indicates the default register class to use for
1317  /// each ValueType the target supports natively.
1318  TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1319  unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1320  MVT::ValueType RegisterTypeForVT[MVT::LAST_VALUETYPE];
1321
1322  /// TransformToType - For any value types we are promoting or expanding, this
1323  /// contains the value type that we are changing to.  For Expanded types, this
1324  /// contains one step of the expand (e.g. i64 -> i32), even if there are
1325  /// multiple steps required (e.g. i64 -> i16).  For types natively supported
1326  /// by the system, this holds the same type (e.g. i32 -> i32).
1327  MVT::ValueType TransformToType[MVT::LAST_VALUETYPE];
1328
1329  /// OpActions - For each operation and each value type, keep a LegalizeAction
1330  /// that indicates how instruction selection should deal with the operation.
1331  /// Most operations are Legal (aka, supported natively by the target), but
1332  /// operations that are not should be described.  Note that operations on
1333  /// non-legal value types are not described here.
1334  uint64_t OpActions[156];
1335
1336  /// LoadXActions - For each load of load extension type and each value type,
1337  /// keep a LegalizeAction that indicates how instruction selection should deal
1338  /// with the load.
1339  uint64_t LoadXActions[ISD::LAST_LOADX_TYPE];
1340
1341  /// TruncStoreActions - For each truncating store, keep a LegalizeAction that
1342  /// indicates how instruction selection should deal with the store.
1343  uint64_t TruncStoreActions[MVT::LAST_VALUETYPE];
1344
1345  /// IndexedModeActions - For each indexed mode and each value type, keep a
1346  /// pair of LegalizeAction that indicates how instruction selection should
1347  /// deal with the load / store.
1348  uint64_t IndexedModeActions[2][ISD::LAST_INDEXED_MODE];
1349
1350  /// ConvertActions - For each conversion from source type to destination type,
1351  /// keep a LegalizeAction that indicates how instruction selection should
1352  /// deal with the conversion.
1353  /// Currently, this is used only for floating->floating conversions
1354  /// (FP_EXTEND and FP_ROUND).
1355  uint64_t ConvertActions[MVT::LAST_VALUETYPE];
1356
1357  ValueTypeActionImpl ValueTypeActions;
1358
1359  std::vector<APFloat> LegalFPImmediates;
1360
1361  std::vector<std::pair<MVT::ValueType,
1362                        TargetRegisterClass*> > AvailableRegClasses;
1363
1364  /// TargetDAGCombineArray - Targets can specify ISD nodes that they would
1365  /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(),
1366  /// which sets a bit in this array.
1367  unsigned char TargetDAGCombineArray[160/(sizeof(unsigned char)*8)];
1368
1369  /// PromoteToType - For operations that must be promoted to a specific type,
1370  /// this holds the destination type.  This map should be sparse, so don't hold
1371  /// it as an array.
1372  ///
1373  /// Targets add entries to this map with AddPromotedToType(..), clients access
1374  /// this with getTypeToPromoteTo(..).
1375  std::map<std::pair<unsigned, MVT::ValueType>, MVT::ValueType> PromoteToType;
1376
1377  /// LibcallRoutineNames - Stores the name each libcall.
1378  ///
1379  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1380
1381  /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result
1382  /// of each of the comparison libcall against zero.
1383  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1384
1385protected:
1386  /// When lowering %llvm.memset this field specifies the maximum number of
1387  /// store operations that may be substituted for the call to memset. Targets
1388  /// must set this value based on the cost threshold for that target. Targets
1389  /// should assume that the memset will be done using as many of the largest
1390  /// store operations first, followed by smaller ones, if necessary, per
1391  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1392  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1393  /// store.  This only applies to setting a constant array of a constant size.
1394  /// @brief Specify maximum number of store instructions per memset call.
1395  unsigned maxStoresPerMemset;
1396
1397  /// When lowering %llvm.memcpy this field specifies the maximum number of
1398  /// store operations that may be substituted for a call to memcpy. Targets
1399  /// must set this value based on the cost threshold for that target. Targets
1400  /// should assume that the memcpy will be done using as many of the largest
1401  /// store operations first, followed by smaller ones, if necessary, per
1402  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1403  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1404  /// and one 1-byte store. This only applies to copying a constant array of
1405  /// constant size.
1406  /// @brief Specify maximum bytes of store instructions per memcpy call.
1407  unsigned maxStoresPerMemcpy;
1408
1409  /// When lowering %llvm.memmove this field specifies the maximum number of
1410  /// store instructions that may be substituted for a call to memmove. Targets
1411  /// must set this value based on the cost threshold for that target. Targets
1412  /// should assume that the memmove will be done using as many of the largest
1413  /// store operations first, followed by smaller ones, if necessary, per
1414  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
1415  /// with 8-bit alignment would result in nine 1-byte stores.  This only
1416  /// applies to copying a constant array of constant size.
1417  /// @brief Specify maximum bytes of store instructions per memmove call.
1418  unsigned maxStoresPerMemmove;
1419
1420  /// This field specifies whether the target machine permits unaligned memory
1421  /// accesses.  This is used, for example, to determine the size of store
1422  /// operations when copying small arrays and other similar tasks.
1423  /// @brief Indicate whether the target permits unaligned memory accesses.
1424  bool allowUnalignedMemoryAccesses;
1425};
1426} // end llvm namespace
1427
1428#endif
1429