TargetLowering.h revision f86f211ec2fef2231f4ac4e8e4f4c0d4cf0f58f5
1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes how to lower LLVM code to machine code.  This has two
11// main components:
12//
13//  1. Which ValueTypes are natively supported by the target.
14//  2. Which operations are supported for supported ValueTypes.
15//  3. Cost thresholds for alternative implementations of certain operations.
16//
17// In addition it has a few other components, like information about FP
18// immediates.
19//
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_TARGET_TARGETLOWERING_H
23#define LLVM_TARGET_TARGETLOWERING_H
24
25#include "llvm/Constants.h"
26#include "llvm/InlineAsm.h"
27#include "llvm/CodeGen/SelectionDAGNodes.h"
28#include "llvm/CodeGen/RuntimeLibcalls.h"
29#include "llvm/ADT/APFloat.h"
30#include "llvm/ADT/STLExtras.h"
31#include <map>
32#include <vector>
33
34namespace llvm {
35  class Function;
36  class MachineBasicBlock;
37  class MachineFrameInfo;
38  class MachineInstr;
39  class SDNode;
40  class SDOperand;
41  class SelectionDAG;
42  class TargetData;
43  class TargetMachine;
44  class TargetRegisterClass;
45  class TargetSubtarget;
46  class Value;
47  class VectorType;
48
49//===----------------------------------------------------------------------===//
50/// TargetLowering - This class defines information used to lower LLVM code to
51/// legal SelectionDAG operators that the target instruction selector can accept
52/// natively.
53///
54/// This class also defines callbacks that targets must implement to lower
55/// target-specific constructs to SelectionDAG operators.
56///
57class TargetLowering {
58public:
59  /// LegalizeAction - This enum indicates whether operations are valid for a
60  /// target, and if not, what action should be used to make them valid.
61  enum LegalizeAction {
62    Legal,      // The target natively supports this operation.
63    Promote,    // This operation should be executed in a larger type.
64    Expand,     // Try to expand this to other ops, otherwise use a libcall.
65    Custom      // Use the LowerOperation hook to implement custom lowering.
66  };
67
68  enum OutOfRangeShiftAmount {
69    Undefined,  // Oversized shift amounts are undefined (default).
70    Mask,       // Shift amounts are auto masked (anded) to value size.
71    Extend      // Oversized shift pulls in zeros or sign bits.
72  };
73
74  enum SetCCResultValue {
75    UndefinedSetCCResult,          // SetCC returns a garbage/unknown extend.
76    ZeroOrOneSetCCResult,          // SetCC returns a zero extended result.
77    ZeroOrNegativeOneSetCCResult   // SetCC returns a sign extended result.
78  };
79
80  enum SchedPreference {
81    SchedulingForLatency,          // Scheduling for shortest total latency.
82    SchedulingForRegPressure       // Scheduling for lowest register pressure.
83  };
84
85  explicit TargetLowering(TargetMachine &TM);
86  virtual ~TargetLowering();
87
88  TargetMachine &getTargetMachine() const { return TM; }
89  const TargetData *getTargetData() const { return TD; }
90
91  bool isBigEndian() const { return !IsLittleEndian; }
92  bool isLittleEndian() const { return IsLittleEndian; }
93  MVT::ValueType getPointerTy() const { return PointerTy; }
94  MVT::ValueType getShiftAmountTy() const { return ShiftAmountTy; }
95  OutOfRangeShiftAmount getShiftAmountFlavor() const {return ShiftAmtHandling; }
96
97  /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC
98  /// codegen.
99  bool usesGlobalOffsetTable() const { return UsesGlobalOffsetTable; }
100
101  /// isSelectExpensive - Return true if the select operation is expensive for
102  /// this target.
103  bool isSelectExpensive() const { return SelectIsExpensive; }
104
105  /// isIntDivCheap() - Return true if integer divide is usually cheaper than
106  /// a sequence of several shifts, adds, and multiplies for this target.
107  bool isIntDivCheap() const { return IntDivIsCheap; }
108
109  /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
110  /// srl/add/sra.
111  bool isPow2DivCheap() const { return Pow2DivIsCheap; }
112
113  /// getSetCCResultType - Return the ValueType of the result of setcc
114  /// operations.
115  virtual MVT::ValueType getSetCCResultType(const SDOperand &) const;
116
117  /// getSetCCResultContents - For targets without boolean registers, this flag
118  /// returns information about the contents of the high-bits in the setcc
119  /// result register.
120  SetCCResultValue getSetCCResultContents() const { return SetCCResultContents;}
121
122  /// getSchedulingPreference - Return target scheduling preference.
123  SchedPreference getSchedulingPreference() const {
124    return SchedPreferenceInfo;
125  }
126
127  /// getRegClassFor - Return the register class that should be used for the
128  /// specified value type.  This may only be called on legal types.
129  TargetRegisterClass *getRegClassFor(MVT::ValueType VT) const {
130    assert(VT < array_lengthof(RegClassForVT));
131    TargetRegisterClass *RC = RegClassForVT[VT];
132    assert(RC && "This value type is not natively supported!");
133    return RC;
134  }
135
136  /// isTypeLegal - Return true if the target has native support for the
137  /// specified value type.  This means that it has a register that directly
138  /// holds it without promotions or expansions.
139  bool isTypeLegal(MVT::ValueType VT) const {
140    assert(MVT::isExtendedVT(VT) || VT < array_lengthof(RegClassForVT));
141    return !MVT::isExtendedVT(VT) && RegClassForVT[VT] != 0;
142  }
143
144  class ValueTypeActionImpl {
145    /// ValueTypeActions - This is a bitvector that contains two bits for each
146    /// value type, where the two bits correspond to the LegalizeAction enum.
147    /// This can be queried with "getTypeAction(VT)".
148    uint32_t ValueTypeActions[2];
149  public:
150    ValueTypeActionImpl() {
151      ValueTypeActions[0] = ValueTypeActions[1] = 0;
152    }
153    ValueTypeActionImpl(const ValueTypeActionImpl &RHS) {
154      ValueTypeActions[0] = RHS.ValueTypeActions[0];
155      ValueTypeActions[1] = RHS.ValueTypeActions[1];
156    }
157
158    LegalizeAction getTypeAction(MVT::ValueType VT) const {
159      if (MVT::isExtendedVT(VT)) {
160        if (MVT::isVector(VT)) return Expand;
161        if (MVT::isInteger(VT))
162          // First promote to a power-of-two size, then expand if necessary.
163          return VT == MVT::RoundIntegerType(VT) ? Expand : Promote;
164        assert(0 && "Unsupported extended type!");
165      }
166      assert(VT<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
167      return (LegalizeAction)((ValueTypeActions[VT>>4] >> ((2*VT) & 31)) & 3);
168    }
169    void setTypeAction(MVT::ValueType VT, LegalizeAction Action) {
170      assert(VT<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
171      ValueTypeActions[VT>>4] |= Action << ((VT*2) & 31);
172    }
173  };
174
175  const ValueTypeActionImpl &getValueTypeActions() const {
176    return ValueTypeActions;
177  }
178
179  /// getTypeAction - Return how we should legalize values of this type, either
180  /// it is already legal (return 'Legal') or we need to promote it to a larger
181  /// type (return 'Promote'), or we need to expand it into multiple registers
182  /// of smaller integer type (return 'Expand').  'Custom' is not an option.
183  LegalizeAction getTypeAction(MVT::ValueType VT) const {
184    return ValueTypeActions.getTypeAction(VT);
185  }
186
187  /// getTypeToTransformTo - For types supported by the target, this is an
188  /// identity function.  For types that must be promoted to larger types, this
189  /// returns the larger type to promote to.  For integer types that are larger
190  /// than the largest integer register, this contains one step in the expansion
191  /// to get to the smaller register. For illegal floating point types, this
192  /// returns the integer type to transform to.
193  MVT::ValueType getTypeToTransformTo(MVT::ValueType VT) const {
194    if (!MVT::isExtendedVT(VT)) {
195      assert(VT < array_lengthof(TransformToType));
196      MVT::ValueType NVT = TransformToType[VT];
197      assert(getTypeAction(NVT) != Promote &&
198             "Promote may not follow Expand or Promote");
199      return NVT;
200    }
201
202    if (MVT::isVector(VT))
203      return MVT::getVectorType(MVT::getVectorElementType(VT),
204                                MVT::getVectorNumElements(VT) / 2);
205    if (MVT::isInteger(VT)) {
206      MVT::ValueType NVT = MVT::RoundIntegerType(VT);
207      if (NVT == VT)
208        // Size is a power of two - expand to half the size.
209        return MVT::getIntegerType(MVT::getSizeInBits(VT) / 2);
210      else
211        // Promote to a power of two size, avoiding multi-step promotion.
212        return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT;
213    }
214    assert(0 && "Unsupported extended type!");
215    return MVT::ValueType(); // Not reached
216  }
217
218  /// getTypeToExpandTo - For types supported by the target, this is an
219  /// identity function.  For types that must be expanded (i.e. integer types
220  /// that are larger than the largest integer register or illegal floating
221  /// point types), this returns the largest legal type it will be expanded to.
222  MVT::ValueType getTypeToExpandTo(MVT::ValueType VT) const {
223    assert(!MVT::isVector(VT));
224    while (true) {
225      switch (getTypeAction(VT)) {
226      case Legal:
227        return VT;
228      case Expand:
229        VT = getTypeToTransformTo(VT);
230        break;
231      default:
232        assert(false && "Type is not legal nor is it to be expanded!");
233        return VT;
234      }
235    }
236    return VT;
237  }
238
239  /// getVectorTypeBreakdown - Vector types are broken down into some number of
240  /// legal first class types.  For example, MVT::v8f32 maps to 2 MVT::v4f32
241  /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
242  /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
243  ///
244  /// This method returns the number of registers needed, and the VT for each
245  /// register.  It also returns the VT and quantity of the intermediate values
246  /// before they are promoted/expanded.
247  ///
248  unsigned getVectorTypeBreakdown(MVT::ValueType VT,
249                                  MVT::ValueType &IntermediateVT,
250                                  unsigned &NumIntermediates,
251                                  MVT::ValueType &RegisterVT) const;
252
253  typedef std::vector<APFloat>::const_iterator legal_fpimm_iterator;
254  legal_fpimm_iterator legal_fpimm_begin() const {
255    return LegalFPImmediates.begin();
256  }
257  legal_fpimm_iterator legal_fpimm_end() const {
258    return LegalFPImmediates.end();
259  }
260
261  /// isShuffleMaskLegal - Targets can use this to indicate that they only
262  /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
263  /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
264  /// are assumed to be legal.
265  virtual bool isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
266    return true;
267  }
268
269  /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
270  /// used by Targets can use this to indicate if there is a suitable
271  /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
272  /// pool entry.
273  virtual bool isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps,
274                                      MVT::ValueType EVT,
275                                      SelectionDAG &DAG) const {
276    return false;
277  }
278
279  /// getOperationAction - Return how this operation should be treated: either
280  /// it is legal, needs to be promoted to a larger size, needs to be
281  /// expanded to some other code sequence, or the target has a custom expander
282  /// for it.
283  LegalizeAction getOperationAction(unsigned Op, MVT::ValueType VT) const {
284    if (MVT::isExtendedVT(VT)) return Expand;
285    assert(Op < array_lengthof(OpActions) &&
286           VT < sizeof(OpActions[0])*4 && "Table isn't big enough!");
287    return (LegalizeAction)((OpActions[Op] >> (2*VT)) & 3);
288  }
289
290  /// isOperationLegal - Return true if the specified operation is legal on this
291  /// target.
292  bool isOperationLegal(unsigned Op, MVT::ValueType VT) const {
293    return getOperationAction(Op, VT) == Legal ||
294           getOperationAction(Op, VT) == Custom;
295  }
296
297  /// getLoadXAction - Return how this load with extension should be treated:
298  /// either it is legal, needs to be promoted to a larger size, needs to be
299  /// expanded to some other code sequence, or the target has a custom expander
300  /// for it.
301  LegalizeAction getLoadXAction(unsigned LType, MVT::ValueType VT) const {
302    assert(LType < array_lengthof(LoadXActions) &&
303           VT < sizeof(LoadXActions[0])*4 && "Table isn't big enough!");
304    return (LegalizeAction)((LoadXActions[LType] >> (2*VT)) & 3);
305  }
306
307  /// isLoadXLegal - Return true if the specified load with extension is legal
308  /// on this target.
309  bool isLoadXLegal(unsigned LType, MVT::ValueType VT) const {
310    return !MVT::isExtendedVT(VT) &&
311      (getLoadXAction(LType, VT) == Legal ||
312       getLoadXAction(LType, VT) == Custom);
313  }
314
315  /// getTruncStoreAction - Return how this store with truncation should be
316  /// treated: either it is legal, needs to be promoted to a larger size, needs
317  /// to be expanded to some other code sequence, or the target has a custom
318  /// expander for it.
319  LegalizeAction getTruncStoreAction(MVT::ValueType ValVT,
320                                     MVT::ValueType MemVT) const {
321    assert(ValVT < array_lengthof(TruncStoreActions) &&
322           MemVT < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!");
323    return (LegalizeAction)((TruncStoreActions[ValVT] >> (2*MemVT)) & 3);
324  }
325
326  /// isTruncStoreLegal - Return true if the specified store with truncation is
327  /// legal on this target.
328  bool isTruncStoreLegal(MVT::ValueType ValVT, MVT::ValueType MemVT) const {
329    return !MVT::isExtendedVT(MemVT) &&
330      (getTruncStoreAction(ValVT, MemVT) == Legal ||
331       getTruncStoreAction(ValVT, MemVT) == Custom);
332  }
333
334  /// getIndexedLoadAction - Return how the indexed load should be treated:
335  /// either it is legal, needs to be promoted to a larger size, needs to be
336  /// expanded to some other code sequence, or the target has a custom expander
337  /// for it.
338  LegalizeAction
339  getIndexedLoadAction(unsigned IdxMode, MVT::ValueType VT) const {
340    assert(IdxMode < array_lengthof(IndexedModeActions[0]) &&
341           VT < sizeof(IndexedModeActions[0][0])*4 &&
342           "Table isn't big enough!");
343    return (LegalizeAction)((IndexedModeActions[0][IdxMode] >> (2*VT)) & 3);
344  }
345
346  /// isIndexedLoadLegal - Return true if the specified indexed load is legal
347  /// on this target.
348  bool isIndexedLoadLegal(unsigned IdxMode, MVT::ValueType VT) const {
349    return getIndexedLoadAction(IdxMode, VT) == Legal ||
350           getIndexedLoadAction(IdxMode, VT) == Custom;
351  }
352
353  /// getIndexedStoreAction - Return how the indexed store should be treated:
354  /// either it is legal, needs to be promoted to a larger size, needs to be
355  /// expanded to some other code sequence, or the target has a custom expander
356  /// for it.
357  LegalizeAction
358  getIndexedStoreAction(unsigned IdxMode, MVT::ValueType VT) const {
359    assert(IdxMode < array_lengthof(IndexedModeActions[1]) &&
360           VT < sizeof(IndexedModeActions[1][0])*4 &&
361           "Table isn't big enough!");
362    return (LegalizeAction)((IndexedModeActions[1][IdxMode] >> (2*VT)) & 3);
363  }
364
365  /// isIndexedStoreLegal - Return true if the specified indexed load is legal
366  /// on this target.
367  bool isIndexedStoreLegal(unsigned IdxMode, MVT::ValueType VT) const {
368    return getIndexedStoreAction(IdxMode, VT) == Legal ||
369           getIndexedStoreAction(IdxMode, VT) == Custom;
370  }
371
372  /// getConvertAction - Return how the conversion should be treated:
373  /// either it is legal, needs to be promoted to a larger size, needs to be
374  /// expanded to some other code sequence, or the target has a custom expander
375  /// for it.
376  LegalizeAction
377  getConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT) const {
378    assert(FromVT < array_lengthof(ConvertActions) &&
379           ToVT < sizeof(ConvertActions[0])*4 && "Table isn't big enough!");
380    return (LegalizeAction)((ConvertActions[FromVT] >> (2*ToVT)) & 3);
381  }
382
383  /// isConvertLegal - Return true if the specified conversion is legal
384  /// on this target.
385  bool isConvertLegal(MVT::ValueType FromVT, MVT::ValueType ToVT) const {
386    return getConvertAction(FromVT, ToVT) == Legal ||
387           getConvertAction(FromVT, ToVT) == Custom;
388  }
389
390  /// getTypeToPromoteTo - If the action for this operation is to promote, this
391  /// method returns the ValueType to promote to.
392  MVT::ValueType getTypeToPromoteTo(unsigned Op, MVT::ValueType VT) const {
393    assert(getOperationAction(Op, VT) == Promote &&
394           "This operation isn't promoted!");
395
396    // See if this has an explicit type specified.
397    std::map<std::pair<unsigned, MVT::ValueType>,
398             MVT::ValueType>::const_iterator PTTI =
399      PromoteToType.find(std::make_pair(Op, VT));
400    if (PTTI != PromoteToType.end()) return PTTI->second;
401
402    assert((MVT::isInteger(VT) || MVT::isFloatingPoint(VT)) &&
403           "Cannot autopromote this type, add it with AddPromotedToType.");
404
405    MVT::ValueType NVT = VT;
406    do {
407      NVT = (MVT::ValueType)(NVT+1);
408      assert(MVT::isInteger(NVT) == MVT::isInteger(VT) && NVT != MVT::isVoid &&
409             "Didn't find type to promote to!");
410    } while (!isTypeLegal(NVT) ||
411              getOperationAction(Op, NVT) == Promote);
412    return NVT;
413  }
414
415  /// getValueType - Return the MVT::ValueType corresponding to this LLVM type.
416  /// This is fixed by the LLVM operations except for the pointer size.  If
417  /// AllowUnknown is true, this will return MVT::Other for types with no MVT
418  /// counterpart (e.g. structs), otherwise it will assert.
419  MVT::ValueType getValueType(const Type *Ty, bool AllowUnknown = false) const {
420    MVT::ValueType VT = MVT::getValueType(Ty, AllowUnknown);
421    return VT == MVT::iPTR ? PointerTy : VT;
422  }
423
424  /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
425  /// function arguments in the caller parameter area.  This is the actual
426  /// alignment, not its logarithm.
427  virtual unsigned getByValTypeAlignment(const Type *Ty) const;
428
429  /// getRegisterType - Return the type of registers that this ValueType will
430  /// eventually require.
431  MVT::ValueType getRegisterType(MVT::ValueType VT) const {
432    if (!MVT::isExtendedVT(VT)) {
433      assert(VT < array_lengthof(RegisterTypeForVT));
434      return RegisterTypeForVT[VT];
435    }
436    if (MVT::isVector(VT)) {
437      MVT::ValueType VT1, RegisterVT;
438      unsigned NumIntermediates;
439      (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT);
440      return RegisterVT;
441    }
442    if (MVT::isInteger(VT)) {
443      return getRegisterType(getTypeToTransformTo(VT));
444    }
445    assert(0 && "Unsupported extended type!");
446    return MVT::ValueType(); // Not reached
447  }
448
449  /// getNumRegisters - Return the number of registers that this ValueType will
450  /// eventually require.  This is one for any types promoted to live in larger
451  /// registers, but may be more than one for types (like i64) that are split
452  /// into pieces.  For types like i140, which are first promoted then expanded,
453  /// it is the number of registers needed to hold all the bits of the original
454  /// type.  For an i140 on a 32 bit machine this means 5 registers.
455  unsigned getNumRegisters(MVT::ValueType VT) const {
456    if (!MVT::isExtendedVT(VT)) {
457      assert(VT < array_lengthof(NumRegistersForVT));
458      return NumRegistersForVT[VT];
459    }
460    if (MVT::isVector(VT)) {
461      MVT::ValueType VT1, VT2;
462      unsigned NumIntermediates;
463      return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2);
464    }
465    if (MVT::isInteger(VT)) {
466      unsigned BitWidth = MVT::getSizeInBits(VT);
467      unsigned RegWidth = MVT::getSizeInBits(getRegisterType(VT));
468      return (BitWidth + RegWidth - 1) / RegWidth;
469    }
470    assert(0 && "Unsupported extended type!");
471    return 0; // Not reached
472  }
473
474  /// ShouldShrinkFPConstant - If true, then instruction selection should
475  /// seek to shrink the FP constant of the specified type to a smaller type
476  /// in order to save space and / or reduce runtime.
477  virtual bool ShouldShrinkFPConstant(MVT::ValueType VT) const { return true; }
478
479  /// hasTargetDAGCombine - If true, the target has custom DAG combine
480  /// transformations that it can perform for the specified node.
481  bool hasTargetDAGCombine(ISD::NodeType NT) const {
482    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
483    return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
484  }
485
486  /// This function returns the maximum number of store operations permitted
487  /// to replace a call to llvm.memset. The value is set by the target at the
488  /// performance threshold for such a replacement.
489  /// @brief Get maximum # of store operations permitted for llvm.memset
490  unsigned getMaxStoresPerMemset() const { return maxStoresPerMemset; }
491
492  /// This function returns the maximum number of store operations permitted
493  /// to replace a call to llvm.memcpy. The value is set by the target at the
494  /// performance threshold for such a replacement.
495  /// @brief Get maximum # of store operations permitted for llvm.memcpy
496  unsigned getMaxStoresPerMemcpy() const { return maxStoresPerMemcpy; }
497
498  /// This function returns the maximum number of store operations permitted
499  /// to replace a call to llvm.memmove. The value is set by the target at the
500  /// performance threshold for such a replacement.
501  /// @brief Get maximum # of store operations permitted for llvm.memmove
502  unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; }
503
504  /// This function returns true if the target allows unaligned memory accesses.
505  /// This is used, for example, in situations where an array copy/move/set is
506  /// converted to a sequence of store operations. It's use helps to ensure that
507  /// such replacements don't generate code that causes an alignment error
508  /// (trap) on the target machine.
509  /// @brief Determine if the target supports unaligned memory accesses.
510  bool allowsUnalignedMemoryAccesses() const {
511    return allowUnalignedMemoryAccesses;
512  }
513
514  /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
515  /// to implement llvm.setjmp.
516  bool usesUnderscoreSetJmp() const {
517    return UseUnderscoreSetJmp;
518  }
519
520  /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp
521  /// to implement llvm.longjmp.
522  bool usesUnderscoreLongJmp() const {
523    return UseUnderscoreLongJmp;
524  }
525
526  /// getStackPointerRegisterToSaveRestore - If a physical register, this
527  /// specifies the register that llvm.savestack/llvm.restorestack should save
528  /// and restore.
529  unsigned getStackPointerRegisterToSaveRestore() const {
530    return StackPointerRegisterToSaveRestore;
531  }
532
533  /// getExceptionAddressRegister - If a physical register, this returns
534  /// the register that receives the exception address on entry to a landing
535  /// pad.
536  unsigned getExceptionAddressRegister() const {
537    return ExceptionPointerRegister;
538  }
539
540  /// getExceptionSelectorRegister - If a physical register, this returns
541  /// the register that receives the exception typeid on entry to a landing
542  /// pad.
543  unsigned getExceptionSelectorRegister() const {
544    return ExceptionSelectorRegister;
545  }
546
547  /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never
548  /// set, the default is 200)
549  unsigned getJumpBufSize() const {
550    return JumpBufSize;
551  }
552
553  /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes
554  /// (if never set, the default is 0)
555  unsigned getJumpBufAlignment() const {
556    return JumpBufAlignment;
557  }
558
559  /// getIfCvtBlockLimit - returns the target specific if-conversion block size
560  /// limit. Any block whose size is greater should not be predicated.
561  unsigned getIfCvtBlockSizeLimit() const {
562    return IfCvtBlockSizeLimit;
563  }
564
565  /// getIfCvtDupBlockLimit - returns the target specific size limit for a
566  /// block to be considered for duplication. Any block whose size is greater
567  /// should not be duplicated to facilitate its predication.
568  unsigned getIfCvtDupBlockSizeLimit() const {
569    return IfCvtDupBlockSizeLimit;
570  }
571
572  /// getPrefLoopAlignment - return the preferred loop alignment.
573  ///
574  unsigned getPrefLoopAlignment() const {
575    return PrefLoopAlignment;
576  }
577
578  /// getPreIndexedAddressParts - returns true by value, base pointer and
579  /// offset pointer and addressing mode by reference if the node's address
580  /// can be legally represented as pre-indexed load / store address.
581  virtual bool getPreIndexedAddressParts(SDNode *N, SDOperand &Base,
582                                         SDOperand &Offset,
583                                         ISD::MemIndexedMode &AM,
584                                         SelectionDAG &DAG) {
585    return false;
586  }
587
588  /// getPostIndexedAddressParts - returns true by value, base pointer and
589  /// offset pointer and addressing mode by reference if this node can be
590  /// combined with a load / store to form a post-indexed load / store.
591  virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
592                                          SDOperand &Base, SDOperand &Offset,
593                                          ISD::MemIndexedMode &AM,
594                                          SelectionDAG &DAG) {
595    return false;
596  }
597
598  /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
599  /// jumptable.
600  virtual SDOperand getPICJumpTableRelocBase(SDOperand Table,
601                                             SelectionDAG &DAG) const;
602
603  //===--------------------------------------------------------------------===//
604  // TargetLowering Optimization Methods
605  //
606
607  /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
608  /// SDOperands for returning information from TargetLowering to its clients
609  /// that want to combine
610  struct TargetLoweringOpt {
611    SelectionDAG &DAG;
612    bool AfterLegalize;
613    SDOperand Old;
614    SDOperand New;
615
616    explicit TargetLoweringOpt(SelectionDAG &InDAG, bool afterLegalize)
617      : DAG(InDAG), AfterLegalize(afterLegalize) {}
618
619    bool CombineTo(SDOperand O, SDOperand N) {
620      Old = O;
621      New = N;
622      return true;
623    }
624
625    /// ShrinkDemandedConstant - Check to see if the specified operand of the
626    /// specified instruction is a constant integer.  If so, check to see if
627    /// there are any bits set in the constant that are not demanded.  If so,
628    /// shrink the constant and return true.
629    bool ShrinkDemandedConstant(SDOperand Op, const APInt &Demanded);
630  };
631
632  /// SimplifyDemandedBits - Look at Op.  At this point, we know that only the
633  /// DemandedMask bits of the result of Op are ever used downstream.  If we can
634  /// use this information to simplify Op, create a new simplified DAG node and
635  /// return true, returning the original and new nodes in Old and New.
636  /// Otherwise, analyze the expression and return a mask of KnownOne and
637  /// KnownZero bits for the expression (used to simplify the caller).
638  /// The KnownZero/One bits may only be accurate for those bits in the
639  /// DemandedMask.
640  bool SimplifyDemandedBits(SDOperand Op, const APInt &DemandedMask,
641                            APInt &KnownZero, APInt &KnownOne,
642                            TargetLoweringOpt &TLO, unsigned Depth = 0) const;
643
644  /// computeMaskedBitsForTargetNode - Determine which of the bits specified in
645  /// Mask are known to be either zero or one and return them in the
646  /// KnownZero/KnownOne bitsets.
647  virtual void computeMaskedBitsForTargetNode(const SDOperand Op,
648                                              const APInt &Mask,
649                                              APInt &KnownZero,
650                                              APInt &KnownOne,
651                                              const SelectionDAG &DAG,
652                                              unsigned Depth = 0) const;
653
654  /// ComputeNumSignBitsForTargetNode - This method can be implemented by
655  /// targets that want to expose additional information about sign bits to the
656  /// DAG Combiner.
657  virtual unsigned ComputeNumSignBitsForTargetNode(SDOperand Op,
658                                                   unsigned Depth = 0) const;
659
660  struct DAGCombinerInfo {
661    void *DC;  // The DAG Combiner object.
662    bool BeforeLegalize;
663    bool CalledByLegalizer;
664  public:
665    SelectionDAG &DAG;
666
667    DAGCombinerInfo(SelectionDAG &dag, bool bl, bool cl, void *dc)
668      : DC(dc), BeforeLegalize(bl), CalledByLegalizer(cl), DAG(dag) {}
669
670    bool isBeforeLegalize() const { return BeforeLegalize; }
671    bool isCalledByLegalizer() const { return CalledByLegalizer; }
672
673    void AddToWorklist(SDNode *N);
674    SDOperand CombineTo(SDNode *N, const std::vector<SDOperand> &To);
675    SDOperand CombineTo(SDNode *N, SDOperand Res);
676    SDOperand CombineTo(SDNode *N, SDOperand Res0, SDOperand Res1);
677  };
678
679  /// SimplifySetCC - Try to simplify a setcc built with the specified operands
680  /// and cc. If it is unable to simplify it, return a null SDOperand.
681  SDOperand SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1,
682                          ISD::CondCode Cond, bool foldBooleans,
683                          DAGCombinerInfo &DCI) const;
684
685  /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
686  /// node is a GlobalAddress + offset.
687  virtual bool
688  isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const;
689
690  /// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is
691  /// loading 'Bytes' bytes from a location that is 'Dist' units away from the
692  /// location that the 'Base' load is loading from.
693  bool isConsecutiveLoad(SDNode *LD, SDNode *Base, unsigned Bytes, int Dist,
694                         MachineFrameInfo *MFI) const;
695
696  /// PerformDAGCombine - This method will be invoked for all target nodes and
697  /// for any target-independent nodes that the target has registered with
698  /// invoke it for.
699  ///
700  /// The semantics are as follows:
701  /// Return Value:
702  ///   SDOperand.Val == 0   - No change was made
703  ///   SDOperand.Val == N   - N was replaced, is dead, and is already handled.
704  ///   otherwise            - N should be replaced by the returned Operand.
705  ///
706  /// In addition, methods provided by DAGCombinerInfo may be used to perform
707  /// more complex transformations.
708  ///
709  virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
710
711  //===--------------------------------------------------------------------===//
712  // TargetLowering Configuration Methods - These methods should be invoked by
713  // the derived class constructor to configure this object for the target.
714  //
715
716protected:
717  /// setUsesGlobalOffsetTable - Specify that this target does or doesn't use a
718  /// GOT for PC-relative code.
719  void setUsesGlobalOffsetTable(bool V) { UsesGlobalOffsetTable = V; }
720
721  /// setShiftAmountType - Describe the type that should be used for shift
722  /// amounts.  This type defaults to the pointer type.
723  void setShiftAmountType(MVT::ValueType VT) { ShiftAmountTy = VT; }
724
725  /// setSetCCResultContents - Specify how the target extends the result of a
726  /// setcc operation in a register.
727  void setSetCCResultContents(SetCCResultValue Ty) { SetCCResultContents = Ty; }
728
729  /// setSchedulingPreference - Specify the target scheduling preference.
730  void setSchedulingPreference(SchedPreference Pref) {
731    SchedPreferenceInfo = Pref;
732  }
733
734  /// setShiftAmountFlavor - Describe how the target handles out of range shift
735  /// amounts.
736  void setShiftAmountFlavor(OutOfRangeShiftAmount OORSA) {
737    ShiftAmtHandling = OORSA;
738  }
739
740  /// setUseUnderscoreSetJmp - Indicate whether this target prefers to
741  /// use _setjmp to implement llvm.setjmp or the non _ version.
742  /// Defaults to false.
743  void setUseUnderscoreSetJmp(bool Val) {
744    UseUnderscoreSetJmp = Val;
745  }
746
747  /// setUseUnderscoreLongJmp - Indicate whether this target prefers to
748  /// use _longjmp to implement llvm.longjmp or the non _ version.
749  /// Defaults to false.
750  void setUseUnderscoreLongJmp(bool Val) {
751    UseUnderscoreLongJmp = Val;
752  }
753
754  /// setStackPointerRegisterToSaveRestore - If set to a physical register, this
755  /// specifies the register that llvm.savestack/llvm.restorestack should save
756  /// and restore.
757  void setStackPointerRegisterToSaveRestore(unsigned R) {
758    StackPointerRegisterToSaveRestore = R;
759  }
760
761  /// setExceptionPointerRegister - If set to a physical register, this sets
762  /// the register that receives the exception address on entry to a landing
763  /// pad.
764  void setExceptionPointerRegister(unsigned R) {
765    ExceptionPointerRegister = R;
766  }
767
768  /// setExceptionSelectorRegister - If set to a physical register, this sets
769  /// the register that receives the exception typeid on entry to a landing
770  /// pad.
771  void setExceptionSelectorRegister(unsigned R) {
772    ExceptionSelectorRegister = R;
773  }
774
775  /// SelectIsExpensive - Tells the code generator not to expand operations
776  /// into sequences that use the select operations if possible.
777  void setSelectIsExpensive() { SelectIsExpensive = true; }
778
779  /// setIntDivIsCheap - Tells the code generator that integer divide is
780  /// expensive, and if possible, should be replaced by an alternate sequence
781  /// of instructions not containing an integer divide.
782  void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
783
784  /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
785  /// srl/add/sra for a signed divide by power of two, and let the target handle
786  /// it.
787  void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
788
789  /// addRegisterClass - Add the specified register class as an available
790  /// regclass for the specified value type.  This indicates the selector can
791  /// handle values of that class natively.
792  void addRegisterClass(MVT::ValueType VT, TargetRegisterClass *RC) {
793    assert(VT < array_lengthof(RegClassForVT));
794    AvailableRegClasses.push_back(std::make_pair(VT, RC));
795    RegClassForVT[VT] = RC;
796  }
797
798  /// computeRegisterProperties - Once all of the register classes are added,
799  /// this allows us to compute derived properties we expose.
800  void computeRegisterProperties();
801
802  /// setOperationAction - Indicate that the specified operation does not work
803  /// with the specified type and indicate what to do about it.
804  void setOperationAction(unsigned Op, MVT::ValueType VT,
805                          LegalizeAction Action) {
806    assert(VT < sizeof(OpActions[0])*4 && Op < array_lengthof(OpActions) &&
807           "Table isn't big enough!");
808    OpActions[Op] &= ~(uint64_t(3UL) << VT*2);
809    OpActions[Op] |= (uint64_t)Action << VT*2;
810  }
811
812  /// setLoadXAction - Indicate that the specified load with extension does not
813  /// work with the with specified type and indicate what to do about it.
814  void setLoadXAction(unsigned ExtType, MVT::ValueType VT,
815                      LegalizeAction Action) {
816    assert(VT < sizeof(LoadXActions[0])*4 &&
817           ExtType < array_lengthof(LoadXActions) &&
818           "Table isn't big enough!");
819    LoadXActions[ExtType] &= ~(uint64_t(3UL) << VT*2);
820    LoadXActions[ExtType] |= (uint64_t)Action << VT*2;
821  }
822
823  /// setTruncStoreAction - Indicate that the specified truncating store does
824  /// not work with the with specified type and indicate what to do about it.
825  void setTruncStoreAction(MVT::ValueType ValVT, MVT::ValueType MemVT,
826                           LegalizeAction Action) {
827    assert(ValVT < array_lengthof(TruncStoreActions) &&
828           MemVT < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!");
829    TruncStoreActions[ValVT] &= ~(uint64_t(3UL) << MemVT*2);
830    TruncStoreActions[ValVT] |= (uint64_t)Action << MemVT*2;
831  }
832
833  /// setIndexedLoadAction - Indicate that the specified indexed load does or
834  /// does not work with the with specified type and indicate what to do abort
835  /// it. NOTE: All indexed mode loads are initialized to Expand in
836  /// TargetLowering.cpp
837  void setIndexedLoadAction(unsigned IdxMode, MVT::ValueType VT,
838                            LegalizeAction Action) {
839    assert(VT < sizeof(IndexedModeActions[0])*4 && IdxMode <
840           array_lengthof(IndexedModeActions[0]) &&
841           "Table isn't big enough!");
842    IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT*2);
843    IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT*2;
844  }
845
846  /// setIndexedStoreAction - Indicate that the specified indexed store does or
847  /// does not work with the with specified type and indicate what to do about
848  /// it. NOTE: All indexed mode stores are initialized to Expand in
849  /// TargetLowering.cpp
850  void setIndexedStoreAction(unsigned IdxMode, MVT::ValueType VT,
851                             LegalizeAction Action) {
852    assert(VT < sizeof(IndexedModeActions[1][0])*4 &&
853           IdxMode < array_lengthof(IndexedModeActions[1]) &&
854           "Table isn't big enough!");
855    IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT*2);
856    IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT*2;
857  }
858
859  /// setConvertAction - Indicate that the specified conversion does or does
860  /// not work with the with specified type and indicate what to do about it.
861  void setConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT,
862                        LegalizeAction Action) {
863    assert(FromVT < array_lengthof(ConvertActions) &&
864           ToVT < sizeof(ConvertActions[0])*4 && "Table isn't big enough!");
865    ConvertActions[FromVT] &= ~(uint64_t(3UL) << ToVT*2);
866    ConvertActions[FromVT] |= (uint64_t)Action << ToVT*2;
867  }
868
869  /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
870  /// promotion code defaults to trying a larger integer/fp until it can find
871  /// one that works.  If that default is insufficient, this method can be used
872  /// by the target to override the default.
873  void AddPromotedToType(unsigned Opc, MVT::ValueType OrigVT,
874                         MVT::ValueType DestVT) {
875    PromoteToType[std::make_pair(Opc, OrigVT)] = DestVT;
876  }
877
878  /// addLegalFPImmediate - Indicate that this target can instruction select
879  /// the specified FP immediate natively.
880  void addLegalFPImmediate(const APFloat& Imm) {
881    LegalFPImmediates.push_back(Imm);
882  }
883
884  /// setTargetDAGCombine - Targets should invoke this method for each target
885  /// independent node that they want to provide a custom DAG combiner for by
886  /// implementing the PerformDAGCombine virtual method.
887  void setTargetDAGCombine(ISD::NodeType NT) {
888    assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
889    TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
890  }
891
892  /// setJumpBufSize - Set the target's required jmp_buf buffer size (in
893  /// bytes); default is 200
894  void setJumpBufSize(unsigned Size) {
895    JumpBufSize = Size;
896  }
897
898  /// setJumpBufAlignment - Set the target's required jmp_buf buffer
899  /// alignment (in bytes); default is 0
900  void setJumpBufAlignment(unsigned Align) {
901    JumpBufAlignment = Align;
902  }
903
904  /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size
905  /// limit (in number of instructions); default is 2.
906  void setIfCvtBlockSizeLimit(unsigned Limit) {
907    IfCvtBlockSizeLimit = Limit;
908  }
909
910  /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number
911  /// of instructions) to be considered for code duplication during
912  /// if-conversion; default is 2.
913  void setIfCvtDupBlockSizeLimit(unsigned Limit) {
914    IfCvtDupBlockSizeLimit = Limit;
915  }
916
917  /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
918  /// alignment is zero, it means the target does not care about loop alignment.
919  void setPrefLoopAlignment(unsigned Align) {
920    PrefLoopAlignment = Align;
921  }
922
923public:
924
925  virtual const TargetSubtarget *getSubtarget() {
926    assert(0 && "Not Implemented");
927    return NULL;    // this is here to silence compiler errors
928  }
929  //===--------------------------------------------------------------------===//
930  // Lowering methods - These methods must be implemented by targets so that
931  // the SelectionDAGLowering code knows how to lower these.
932  //
933
934  /// LowerArguments - This hook must be implemented to indicate how we should
935  /// lower the arguments for the specified function, into the specified DAG.
936  virtual std::vector<SDOperand>
937  LowerArguments(Function &F, SelectionDAG &DAG);
938
939  /// LowerCallTo - This hook lowers an abstract call to a function into an
940  /// actual call.  This returns a pair of operands.  The first element is the
941  /// return value for the function (if RetTy is not VoidTy).  The second
942  /// element is the outgoing token chain.
943  struct ArgListEntry {
944    SDOperand Node;
945    const Type* Ty;
946    bool isSExt;
947    bool isZExt;
948    bool isInReg;
949    bool isSRet;
950    bool isNest;
951    bool isByVal;
952    uint16_t Alignment;
953
954    ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
955      isSRet(false), isNest(false), isByVal(false), Alignment(0) { }
956  };
957  typedef std::vector<ArgListEntry> ArgListTy;
958  virtual std::pair<SDOperand, SDOperand>
959  LowerCallTo(SDOperand Chain, const Type *RetTy, bool RetSExt, bool RetZExt,
960              bool isVarArg, unsigned CallingConv, bool isTailCall,
961              SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
962
963
964  /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a
965  /// memcpy. This can be used by targets to provide code sequences for cases
966  /// that don't fit the target's parameters for simple loads/stores and can be
967  /// more efficient than using a library call. This function can return a null
968  /// SDOperand if the target declines to use custom code and a different
969  /// lowering strategy should be used.
970  ///
971  /// If AlwaysInline is true, the size is constant and the target should not
972  /// emit any calls and is strongly encouraged to attempt to emit inline code
973  /// even if it is beyond the usual threshold because this intrinsic is being
974  /// expanded in a place where calls are not feasible (e.g. within the prologue
975  /// for another call). If the target chooses to decline an AlwaysInline
976  /// request here, legalize will resort to using simple loads and stores.
977  virtual SDOperand
978  EmitTargetCodeForMemcpy(SelectionDAG &DAG,
979                          SDOperand Chain,
980                          SDOperand Op1, SDOperand Op2,
981                          SDOperand Op3, unsigned Align,
982                          bool AlwaysInline,
983                          const Value *DstSV, uint64_t DstOff,
984                          const Value *SrcSV, uint64_t SrcOff) {
985    return SDOperand();
986  }
987
988  /// EmitTargetCodeForMemmove - Emit target-specific code that performs a
989  /// memmove. This can be used by targets to provide code sequences for cases
990  /// that don't fit the target's parameters for simple loads/stores and can be
991  /// more efficient than using a library call. This function can return a null
992  /// SDOperand if the target declines to use custom code and a different
993  /// lowering strategy should be used.
994  virtual SDOperand
995  EmitTargetCodeForMemmove(SelectionDAG &DAG,
996                           SDOperand Chain,
997                           SDOperand Op1, SDOperand Op2,
998                           SDOperand Op3, unsigned Align,
999                           const Value *DstSV, uint64_t DstOff,
1000                           const Value *SrcSV, uint64_t SrcOff) {
1001    return SDOperand();
1002  }
1003
1004  /// EmitTargetCodeForMemset - Emit target-specific code that performs a
1005  /// memset. This can be used by targets to provide code sequences for cases
1006  /// that don't fit the target's parameters for simple stores and can be more
1007  /// efficient than using a library call. This function can return a null
1008  /// SDOperand if the target declines to use custom code and a different
1009  /// lowering strategy should be used.
1010  virtual SDOperand
1011  EmitTargetCodeForMemset(SelectionDAG &DAG,
1012                          SDOperand Chain,
1013                          SDOperand Op1, SDOperand Op2,
1014                          SDOperand Op3, unsigned Align,
1015                          const Value *DstSV, uint64_t DstOff) {
1016    return SDOperand();
1017  }
1018
1019  /// LowerOperation - This callback is invoked for operations that are
1020  /// unsupported by the target, which are registered to use 'custom' lowering,
1021  /// and whose defined values are all legal.
1022  /// If the target has no operations that require custom lowering, it need not
1023  /// implement this.  The default implementation of this aborts.
1024  virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
1025
1026  /// ExpandOperationResult - This callback is invoked for operations that are
1027  /// unsupported by the target, which are registered to use 'custom' lowering,
1028  /// and whose result type needs to be expanded.  This must return a node whose
1029  /// results precisely match the results of the input node.  This typically
1030  /// involves a MERGE_VALUES node and/or BUILD_PAIR.
1031  ///
1032  /// If the target has no operations that require custom lowering, it need not
1033  /// implement this.  The default implementation of this aborts.
1034  virtual SDNode *ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
1035    assert(0 && "ExpandOperationResult not implemented for this target!");
1036    return 0;
1037  }
1038
1039  /// IsEligibleForTailCallOptimization - Check whether the call is eligible for
1040  /// tail call optimization. Targets which want to do tail call optimization
1041  /// should override this function.
1042  virtual bool IsEligibleForTailCallOptimization(SDOperand Call,
1043                                                 SDOperand Ret,
1044                                                 SelectionDAG &DAG) const {
1045    return false;
1046  }
1047
1048  /// CheckTailCallReturnConstraints - Check whether CALL node immediatly
1049  /// preceeds the RET node and whether the return uses the result of the node
1050  /// or is a void return. This function can be used by the target to determine
1051  /// eligiblity of tail call optimization.
1052  static bool CheckTailCallReturnConstraints(SDOperand Call, SDOperand Ret) {
1053    unsigned NumOps = Ret.getNumOperands();
1054    if ((NumOps == 1 &&
1055       (Ret.getOperand(0) == SDOperand(Call.Val,1) ||
1056        Ret.getOperand(0) == SDOperand(Call.Val,0))) ||
1057      (NumOps > 1 &&
1058       Ret.getOperand(0) == SDOperand(Call.Val,Call.Val->getNumValues()-1) &&
1059       Ret.getOperand(1) == SDOperand(Call.Val,0)))
1060      return true;
1061    return false;
1062  }
1063
1064  /// GetPossiblePreceedingTailCall - Get preceeding TailCallNodeOpCode node if
1065  /// it exists skip possible ISD:TokenFactor.
1066  static SDOperand GetPossiblePreceedingTailCall(SDOperand Chain,
1067                                                 unsigned TailCallNodeOpCode) {
1068    if (Chain.getOpcode() == TailCallNodeOpCode) {
1069      return Chain;
1070    } else if (Chain.getOpcode() == ISD::TokenFactor) {
1071      if (Chain.getNumOperands() &&
1072          Chain.getOperand(0).getOpcode() == TailCallNodeOpCode)
1073        return Chain.getOperand(0);
1074    }
1075    return Chain;
1076  }
1077
1078  /// CustomPromoteOperation - This callback is invoked for operations that are
1079  /// unsupported by the target, are registered to use 'custom' lowering, and
1080  /// whose type needs to be promoted.
1081  virtual SDOperand CustomPromoteOperation(SDOperand Op, SelectionDAG &DAG);
1082
1083  /// getTargetNodeName() - This method returns the name of a target specific
1084  /// DAG node.
1085  virtual const char *getTargetNodeName(unsigned Opcode) const;
1086
1087  //===--------------------------------------------------------------------===//
1088  // Inline Asm Support hooks
1089  //
1090
1091  enum ConstraintType {
1092    C_Register,            // Constraint represents a single register.
1093    C_RegisterClass,       // Constraint represents one or more registers.
1094    C_Memory,              // Memory constraint.
1095    C_Other,               // Something else.
1096    C_Unknown              // Unsupported constraint.
1097  };
1098
1099  /// AsmOperandInfo - This contains information for each constraint that we are
1100  /// lowering.
1101  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
1102    /// ConstraintCode - This contains the actual string for the code, like "m".
1103    std::string ConstraintCode;
1104
1105    /// ConstraintType - Information about the constraint code, e.g. Register,
1106    /// RegisterClass, Memory, Other, Unknown.
1107    TargetLowering::ConstraintType ConstraintType;
1108
1109    /// CallOperandval - If this is the result output operand or a
1110    /// clobber, this is null, otherwise it is the incoming operand to the
1111    /// CallInst.  This gets modified as the asm is processed.
1112    Value *CallOperandVal;
1113
1114    /// ConstraintVT - The ValueType for the operand value.
1115    MVT::ValueType ConstraintVT;
1116
1117    AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
1118      : InlineAsm::ConstraintInfo(info),
1119        ConstraintType(TargetLowering::C_Unknown),
1120        CallOperandVal(0), ConstraintVT(MVT::Other) {
1121    }
1122  };
1123
1124  /// ComputeConstraintToUse - Determines the constraint code and constraint
1125  /// type to use for the specific AsmOperandInfo, setting
1126  /// OpInfo.ConstraintCode and OpInfo.ConstraintType.  If the actual operand
1127  /// being passed in is available, it can be passed in as Op, otherwise an
1128  /// empty SDOperand can be passed.
1129  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
1130                                      SDOperand Op,
1131                                      SelectionDAG *DAG = 0) const;
1132
1133  /// getConstraintType - Given a constraint, return the type of constraint it
1134  /// is for this target.
1135  virtual ConstraintType getConstraintType(const std::string &Constraint) const;
1136
1137  /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
1138  /// return a list of registers that can be used to satisfy the constraint.
1139  /// This should only be used for C_RegisterClass constraints.
1140  virtual std::vector<unsigned>
1141  getRegClassForInlineAsmConstraint(const std::string &Constraint,
1142                                    MVT::ValueType VT) const;
1143
1144  /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g.
1145  /// {edx}), return the register number and the register class for the
1146  /// register.
1147  ///
1148  /// Given a register class constraint, like 'r', if this corresponds directly
1149  /// to an LLVM register class, return a register of 0 and the register class
1150  /// pointer.
1151  ///
1152  /// This should only be used for C_Register constraints.  On error,
1153  /// this returns a register number of 0 and a null register class pointer..
1154  virtual std::pair<unsigned, const TargetRegisterClass*>
1155    getRegForInlineAsmConstraint(const std::string &Constraint,
1156                                 MVT::ValueType VT) const;
1157
1158  /// LowerXConstraint - try to replace an X constraint, which matches anything,
1159  /// with another that has more specific requirements based on the type of the
1160  /// corresponding operand.  This returns null if there is no replacement to
1161  /// make.
1162  virtual const char *LowerXConstraint(MVT::ValueType ConstraintVT) const;
1163
1164  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
1165  /// vector.  If it is invalid, don't add anything to Ops.
1166  virtual void LowerAsmOperandForConstraint(SDOperand Op, char ConstraintLetter,
1167                                            std::vector<SDOperand> &Ops,
1168                                            SelectionDAG &DAG) const;
1169
1170  //===--------------------------------------------------------------------===//
1171  // Scheduler hooks
1172  //
1173
1174  // EmitInstrWithCustomInserter - This method should be implemented by targets
1175  // that mark instructions with the 'usesCustomDAGSchedInserter' flag.  These
1176  // instructions are special in various ways, which require special support to
1177  // insert.  The specified MachineInstr is created but not inserted into any
1178  // basic blocks, and the scheduler passes ownership of it to this method.
1179  virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
1180                                                        MachineBasicBlock *MBB);
1181
1182  //===--------------------------------------------------------------------===//
1183  // Addressing mode description hooks (used by LSR etc).
1184  //
1185
1186  /// AddrMode - This represents an addressing mode of:
1187  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1188  /// If BaseGV is null,  there is no BaseGV.
1189  /// If BaseOffs is zero, there is no base offset.
1190  /// If HasBaseReg is false, there is no base register.
1191  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
1192  /// no scale.
1193  ///
1194  struct AddrMode {
1195    GlobalValue *BaseGV;
1196    int64_t      BaseOffs;
1197    bool         HasBaseReg;
1198    int64_t      Scale;
1199    AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1200  };
1201
1202  /// isLegalAddressingMode - Return true if the addressing mode represented by
1203  /// AM is legal for this target, for a load/store of the specified type.
1204  /// TODO: Handle pre/postinc as well.
1205  virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const;
1206
1207  /// isTruncateFree - Return true if it's free to truncate a value of
1208  /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
1209  /// register EAX to i16 by referencing its sub-register AX.
1210  virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const {
1211    return false;
1212  }
1213
1214  virtual bool isTruncateFree(MVT::ValueType VT1, MVT::ValueType VT2) const {
1215    return false;
1216  }
1217
1218  //===--------------------------------------------------------------------===//
1219  // Div utility functions
1220  //
1221  SDOperand BuildSDIV(SDNode *N, SelectionDAG &DAG,
1222                      std::vector<SDNode*>* Created) const;
1223  SDOperand BuildUDIV(SDNode *N, SelectionDAG &DAG,
1224                      std::vector<SDNode*>* Created) const;
1225
1226
1227  //===--------------------------------------------------------------------===//
1228  // Runtime Library hooks
1229  //
1230
1231  /// setLibcallName - Rename the default libcall routine name for the specified
1232  /// libcall.
1233  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1234    LibcallRoutineNames[Call] = Name;
1235  }
1236
1237  /// getLibcallName - Get the libcall routine name for the specified libcall.
1238  ///
1239  const char *getLibcallName(RTLIB::Libcall Call) const {
1240    return LibcallRoutineNames[Call];
1241  }
1242
1243  /// setCmpLibcallCC - Override the default CondCode to be used to test the
1244  /// result of the comparison libcall against zero.
1245  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1246    CmpLibcallCCs[Call] = CC;
1247  }
1248
1249  /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of
1250  /// the comparison libcall against zero.
1251  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1252    return CmpLibcallCCs[Call];
1253  }
1254
1255private:
1256  TargetMachine &TM;
1257  const TargetData *TD;
1258
1259  /// IsLittleEndian - True if this is a little endian target.
1260  ///
1261  bool IsLittleEndian;
1262
1263  /// PointerTy - The type to use for pointers, usually i32 or i64.
1264  ///
1265  MVT::ValueType PointerTy;
1266
1267  /// UsesGlobalOffsetTable - True if this target uses a GOT for PIC codegen.
1268  ///
1269  bool UsesGlobalOffsetTable;
1270
1271  /// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever
1272  /// PointerTy is.
1273  MVT::ValueType ShiftAmountTy;
1274
1275  OutOfRangeShiftAmount ShiftAmtHandling;
1276
1277  /// SelectIsExpensive - Tells the code generator not to expand operations
1278  /// into sequences that use the select operations if possible.
1279  bool SelectIsExpensive;
1280
1281  /// IntDivIsCheap - Tells the code generator not to expand integer divides by
1282  /// constants into a sequence of muls, adds, and shifts.  This is a hack until
1283  /// a real cost model is in place.  If we ever optimize for size, this will be
1284  /// set to true unconditionally.
1285  bool IntDivIsCheap;
1286
1287  /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
1288  /// srl/add/sra for a signed divide by power of two, and let the target handle
1289  /// it.
1290  bool Pow2DivIsCheap;
1291
1292  /// SetCCResultContents - Information about the contents of the high-bits in
1293  /// the result of a setcc comparison operation.
1294  SetCCResultValue SetCCResultContents;
1295
1296  /// SchedPreferenceInfo - The target scheduling preference: shortest possible
1297  /// total cycles or lowest register usage.
1298  SchedPreference SchedPreferenceInfo;
1299
1300  /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
1301  /// llvm.setjmp.  Defaults to false.
1302  bool UseUnderscoreSetJmp;
1303
1304  /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement
1305  /// llvm.longjmp.  Defaults to false.
1306  bool UseUnderscoreLongJmp;
1307
1308  /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
1309  unsigned JumpBufSize;
1310
1311  /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
1312  /// buffers
1313  unsigned JumpBufAlignment;
1314
1315  /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be
1316  /// if-converted.
1317  unsigned IfCvtBlockSizeLimit;
1318
1319  /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be
1320  /// duplicated during if-conversion.
1321  unsigned IfCvtDupBlockSizeLimit;
1322
1323  /// PrefLoopAlignment - The perferred loop alignment.
1324  ///
1325  unsigned PrefLoopAlignment;
1326
1327  /// StackPointerRegisterToSaveRestore - If set to a physical register, this
1328  /// specifies the register that llvm.savestack/llvm.restorestack should save
1329  /// and restore.
1330  unsigned StackPointerRegisterToSaveRestore;
1331
1332  /// ExceptionPointerRegister - If set to a physical register, this specifies
1333  /// the register that receives the exception address on entry to a landing
1334  /// pad.
1335  unsigned ExceptionPointerRegister;
1336
1337  /// ExceptionSelectorRegister - If set to a physical register, this specifies
1338  /// the register that receives the exception typeid on entry to a landing
1339  /// pad.
1340  unsigned ExceptionSelectorRegister;
1341
1342  /// RegClassForVT - This indicates the default register class to use for
1343  /// each ValueType the target supports natively.
1344  TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1345  unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1346  MVT::ValueType RegisterTypeForVT[MVT::LAST_VALUETYPE];
1347
1348  /// TransformToType - For any value types we are promoting or expanding, this
1349  /// contains the value type that we are changing to.  For Expanded types, this
1350  /// contains one step of the expand (e.g. i64 -> i32), even if there are
1351  /// multiple steps required (e.g. i64 -> i16).  For types natively supported
1352  /// by the system, this holds the same type (e.g. i32 -> i32).
1353  MVT::ValueType TransformToType[MVT::LAST_VALUETYPE];
1354
1355  // Defines the capacity of the TargetLowering::OpActions table
1356  static const int OpActionsCapacity = 176;
1357
1358  /// OpActions - For each operation and each value type, keep a LegalizeAction
1359  /// that indicates how instruction selection should deal with the operation.
1360  /// Most operations are Legal (aka, supported natively by the target), but
1361  /// operations that are not should be described.  Note that operations on
1362  /// non-legal value types are not described here.
1363  uint64_t OpActions[OpActionsCapacity];
1364
1365  /// LoadXActions - For each load of load extension type and each value type,
1366  /// keep a LegalizeAction that indicates how instruction selection should deal
1367  /// with the load.
1368  uint64_t LoadXActions[ISD::LAST_LOADX_TYPE];
1369
1370  /// TruncStoreActions - For each truncating store, keep a LegalizeAction that
1371  /// indicates how instruction selection should deal with the store.
1372  uint64_t TruncStoreActions[MVT::LAST_VALUETYPE];
1373
1374  /// IndexedModeActions - For each indexed mode and each value type, keep a
1375  /// pair of LegalizeAction that indicates how instruction selection should
1376  /// deal with the load / store.
1377  uint64_t IndexedModeActions[2][ISD::LAST_INDEXED_MODE];
1378
1379  /// ConvertActions - For each conversion from source type to destination type,
1380  /// keep a LegalizeAction that indicates how instruction selection should
1381  /// deal with the conversion.
1382  /// Currently, this is used only for floating->floating conversions
1383  /// (FP_EXTEND and FP_ROUND).
1384  uint64_t ConvertActions[MVT::LAST_VALUETYPE];
1385
1386  ValueTypeActionImpl ValueTypeActions;
1387
1388  std::vector<APFloat> LegalFPImmediates;
1389
1390  std::vector<std::pair<MVT::ValueType,
1391                        TargetRegisterClass*> > AvailableRegClasses;
1392
1393  /// TargetDAGCombineArray - Targets can specify ISD nodes that they would
1394  /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(),
1395  /// which sets a bit in this array.
1396  unsigned char
1397  TargetDAGCombineArray[OpActionsCapacity/(sizeof(unsigned char)*8)];
1398
1399  /// PromoteToType - For operations that must be promoted to a specific type,
1400  /// this holds the destination type.  This map should be sparse, so don't hold
1401  /// it as an array.
1402  ///
1403  /// Targets add entries to this map with AddPromotedToType(..), clients access
1404  /// this with getTypeToPromoteTo(..).
1405  std::map<std::pair<unsigned, MVT::ValueType>, MVT::ValueType> PromoteToType;
1406
1407  /// LibcallRoutineNames - Stores the name each libcall.
1408  ///
1409  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1410
1411  /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result
1412  /// of each of the comparison libcall against zero.
1413  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1414
1415protected:
1416  /// When lowering %llvm.memset this field specifies the maximum number of
1417  /// store operations that may be substituted for the call to memset. Targets
1418  /// must set this value based on the cost threshold for that target. Targets
1419  /// should assume that the memset will be done using as many of the largest
1420  /// store operations first, followed by smaller ones, if necessary, per
1421  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1422  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1423  /// store.  This only applies to setting a constant array of a constant size.
1424  /// @brief Specify maximum number of store instructions per memset call.
1425  unsigned maxStoresPerMemset;
1426
1427  /// When lowering %llvm.memcpy this field specifies the maximum number of
1428  /// store operations that may be substituted for a call to memcpy. Targets
1429  /// must set this value based on the cost threshold for that target. Targets
1430  /// should assume that the memcpy will be done using as many of the largest
1431  /// store operations first, followed by smaller ones, if necessary, per
1432  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1433  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1434  /// and one 1-byte store. This only applies to copying a constant array of
1435  /// constant size.
1436  /// @brief Specify maximum bytes of store instructions per memcpy call.
1437  unsigned maxStoresPerMemcpy;
1438
1439  /// When lowering %llvm.memmove this field specifies the maximum number of
1440  /// store instructions that may be substituted for a call to memmove. Targets
1441  /// must set this value based on the cost threshold for that target. Targets
1442  /// should assume that the memmove will be done using as many of the largest
1443  /// store operations first, followed by smaller ones, if necessary, per
1444  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
1445  /// with 8-bit alignment would result in nine 1-byte stores.  This only
1446  /// applies to copying a constant array of constant size.
1447  /// @brief Specify maximum bytes of store instructions per memmove call.
1448  unsigned maxStoresPerMemmove;
1449
1450  /// This field specifies whether the target machine permits unaligned memory
1451  /// accesses.  This is used, for example, to determine the size of store
1452  /// operations when copying small arrays and other similar tasks.
1453  /// @brief Indicate whether the target permits unaligned memory accesses.
1454  bool allowUnalignedMemoryAccesses;
1455};
1456} // end llvm namespace
1457
1458#endif
1459