ARMTargetTransformInfo.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- ARMTargetTransformInfo.cpp - ARM specific TTI pass ----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file implements a TargetTransformInfo analysis pass specific to the
11/// ARM target machine. It uses the target's detailed information to provide
12/// more precise answers to certain TTI queries, while letting the target
13/// independent and default TTI implementations handle the rest.
14///
15//===----------------------------------------------------------------------===//
16
17#define DEBUG_TYPE "armtti"
18#include "ARM.h"
19#include "ARMTargetMachine.h"
20#include "llvm/Analysis/TargetTransformInfo.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Target/CostTable.h"
23#include "llvm/Target/TargetLowering.h"
24using namespace llvm;
25
26// Declare the pass initialization routine locally as target-specific passes
27// don't havve a target-wide initialization entry point, and so we rely on the
28// pass constructor initialization.
29namespace llvm {
30void initializeARMTTIPass(PassRegistry &);
31}
32
33namespace {
34
35class ARMTTI final : public ImmutablePass, public TargetTransformInfo {
36  const ARMBaseTargetMachine *TM;
37  const ARMSubtarget *ST;
38  const ARMTargetLowering *TLI;
39
40  /// Estimate the overhead of scalarizing an instruction. Insert and Extract
41  /// are set if the result needs to be inserted and/or extracted from vectors.
42  unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
43
44public:
45  ARMTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
46    llvm_unreachable("This pass cannot be directly constructed");
47  }
48
49  ARMTTI(const ARMBaseTargetMachine *TM)
50      : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
51        TLI(TM->getTargetLowering()) {
52    initializeARMTTIPass(*PassRegistry::getPassRegistry());
53  }
54
55  void initializePass() override {
56    pushTTIStack(this);
57  }
58
59  void getAnalysisUsage(AnalysisUsage &AU) const override {
60    TargetTransformInfo::getAnalysisUsage(AU);
61  }
62
63  /// Pass identification.
64  static char ID;
65
66  /// Provide necessary pointer adjustments for the two base classes.
67  void *getAdjustedAnalysisPointer(const void *ID) override {
68    if (ID == &TargetTransformInfo::ID)
69      return (TargetTransformInfo*)this;
70    return this;
71  }
72
73  /// \name Scalar TTI Implementations
74  /// @{
75  using TargetTransformInfo::getIntImmCost;
76  unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
77
78  /// @}
79
80
81  /// \name Vector TTI Implementations
82  /// @{
83
84  unsigned getNumberOfRegisters(bool Vector) const override {
85    if (Vector) {
86      if (ST->hasNEON())
87        return 16;
88      return 0;
89    }
90
91    if (ST->isThumb1Only())
92      return 8;
93    return 13;
94  }
95
96  unsigned getRegisterBitWidth(bool Vector) const override {
97    if (Vector) {
98      if (ST->hasNEON())
99        return 128;
100      return 0;
101    }
102
103    return 32;
104  }
105
106  unsigned getMaximumUnrollFactor() const override {
107    // These are out of order CPUs:
108    if (ST->isCortexA15() || ST->isSwift())
109      return 2;
110    return 1;
111  }
112
113  unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
114                          int Index, Type *SubTp) const override;
115
116  unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
117                            Type *Src) const override;
118
119  unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
120                              Type *CondTy) const override;
121
122  unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
123                              unsigned Index) const override;
124
125  unsigned getAddressComputationCost(Type *Val,
126                                     bool IsComplex) const override;
127
128  unsigned
129  getArithmeticInstrCost(unsigned Opcode, Type *Ty,
130                         OperandValueKind Op1Info = OK_AnyValue,
131                         OperandValueKind Op2Info = OK_AnyValue) const override;
132
133  unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
134                           unsigned AddressSpace) const override;
135  /// @}
136};
137
138} // end anonymous namespace
139
140INITIALIZE_AG_PASS(ARMTTI, TargetTransformInfo, "armtti",
141                   "ARM Target Transform Info", true, true, false)
142char ARMTTI::ID = 0;
143
144ImmutablePass *
145llvm::createARMTargetTransformInfoPass(const ARMBaseTargetMachine *TM) {
146  return new ARMTTI(TM);
147}
148
149
150unsigned ARMTTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
151  assert(Ty->isIntegerTy());
152
153  unsigned Bits = Ty->getPrimitiveSizeInBits();
154  if (Bits == 0 || Bits > 32)
155    return 4;
156
157  int32_t SImmVal = Imm.getSExtValue();
158  uint32_t ZImmVal = Imm.getZExtValue();
159  if (!ST->isThumb()) {
160    if ((SImmVal >= 0 && SImmVal < 65536) ||
161        (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
162        (ARM_AM::getSOImmVal(~ZImmVal) != -1))
163      return 1;
164    return ST->hasV6T2Ops() ? 2 : 3;
165  }
166  if (ST->isThumb2()) {
167    if ((SImmVal >= 0 && SImmVal < 65536) ||
168        (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
169        (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
170      return 1;
171    return ST->hasV6T2Ops() ? 2 : 3;
172  }
173  // Thumb1.
174  if (SImmVal >= 0 && SImmVal < 256)
175    return 1;
176  if ((~ZImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
177    return 2;
178  // Load from constantpool.
179  return 3;
180}
181
182unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
183                                  Type *Src) const {
184  int ISD = TLI->InstructionOpcodeToISD(Opcode);
185  assert(ISD && "Invalid opcode");
186
187  // Single to/from double precision conversions.
188  static const CostTblEntry<MVT::SimpleValueType> NEONFltDblTbl[] = {
189    // Vector fptrunc/fpext conversions.
190    { ISD::FP_ROUND,   MVT::v2f64, 2 },
191    { ISD::FP_EXTEND,  MVT::v2f32, 2 },
192    { ISD::FP_EXTEND,  MVT::v4f32, 4 }
193  };
194
195  if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
196                                          ISD == ISD::FP_EXTEND)) {
197    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
198    int Idx = CostTableLookup(NEONFltDblTbl, ISD, LT.second);
199    if (Idx != -1)
200      return LT.first * NEONFltDblTbl[Idx].Cost;
201  }
202
203  EVT SrcTy = TLI->getValueType(Src);
204  EVT DstTy = TLI->getValueType(Dst);
205
206  if (!SrcTy.isSimple() || !DstTy.isSimple())
207    return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
208
209  // Some arithmetic, load and store operations have specific instructions
210  // to cast up/down their types automatically at no extra cost.
211  // TODO: Get these tables to know at least what the related operations are.
212  static const TypeConversionCostTblEntry<MVT::SimpleValueType>
213  NEONVectorConversionTbl[] = {
214    { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
215    { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
216    { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
217    { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
218    { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
219    { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
220
221    // The number of vmovl instructions for the extension.
222    { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
223    { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
224    { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
225    { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
226    { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
227    { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
228    { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
229    { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
230    { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
231    { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
232
233    // Operations that we legalize using splitting.
234    { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
235    { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
236
237    // Vector float <-> i32 conversions.
238    { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
239    { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
240
241    { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
242    { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
243    { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
244    { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
245    { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
246    { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
247    { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
248    { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
249    { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
250    { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
251    { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
252    { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
253    { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
254    { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
255    { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
256    { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
257    { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
258    { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
259    { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
260    { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
261
262    { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
263    { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
264    { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
265    { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
266    { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
267    { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
268
269    // Vector double <-> i32 conversions.
270    { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
271    { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
272
273    { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
274    { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
275    { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
276    { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
277    { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
278    { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
279
280    { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
281    { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
282    { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
283    { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
284    { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
285    { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
286  };
287
288  if (SrcTy.isVector() && ST->hasNEON()) {
289    int Idx = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
290                                     DstTy.getSimpleVT(), SrcTy.getSimpleVT());
291    if (Idx != -1)
292      return NEONVectorConversionTbl[Idx].Cost;
293  }
294
295  // Scalar float to integer conversions.
296  static const TypeConversionCostTblEntry<MVT::SimpleValueType>
297  NEONFloatConversionTbl[] = {
298    { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
299    { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
300    { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
301    { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
302    { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
303    { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
304    { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
305    { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
306    { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
307    { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
308    { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
309    { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
310    { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
311    { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
312    { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
313    { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
314    { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
315    { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
316    { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
317    { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
318  };
319  if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
320    int Idx = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
321                                     DstTy.getSimpleVT(), SrcTy.getSimpleVT());
322    if (Idx != -1)
323        return NEONFloatConversionTbl[Idx].Cost;
324  }
325
326  // Scalar integer to float conversions.
327  static const TypeConversionCostTblEntry<MVT::SimpleValueType>
328  NEONIntegerConversionTbl[] = {
329    { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
330    { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
331    { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
332    { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
333    { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
334    { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
335    { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
336    { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
337    { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
338    { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
339    { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
340    { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
341    { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
342    { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
343    { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
344    { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
345    { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
346    { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
347    { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
348    { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
349  };
350
351  if (SrcTy.isInteger() && ST->hasNEON()) {
352    int Idx = ConvertCostTableLookup(NEONIntegerConversionTbl, ISD,
353                                     DstTy.getSimpleVT(), SrcTy.getSimpleVT());
354    if (Idx != -1)
355      return NEONIntegerConversionTbl[Idx].Cost;
356  }
357
358  // Scalar integer conversion costs.
359  static const TypeConversionCostTblEntry<MVT::SimpleValueType>
360  ARMIntegerConversionTbl[] = {
361    // i16 -> i64 requires two dependent operations.
362    { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
363
364    // Truncates on i64 are assumed to be free.
365    { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
366    { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
367    { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
368    { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
369  };
370
371  if (SrcTy.isInteger()) {
372    int Idx = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
373                                     DstTy.getSimpleVT(), SrcTy.getSimpleVT());
374    if (Idx != -1)
375      return ARMIntegerConversionTbl[Idx].Cost;
376  }
377
378  return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
379}
380
381unsigned ARMTTI::getVectorInstrCost(unsigned Opcode, Type *ValTy,
382                                    unsigned Index) const {
383  // Penalize inserting into an D-subregister. We end up with a three times
384  // lower estimated throughput on swift.
385  if (ST->isSwift() &&
386      Opcode == Instruction::InsertElement &&
387      ValTy->isVectorTy() &&
388      ValTy->getScalarSizeInBits() <= 32)
389    return 3;
390
391  return TargetTransformInfo::getVectorInstrCost(Opcode, ValTy, Index);
392}
393
394unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
395                                    Type *CondTy) const {
396
397  int ISD = TLI->InstructionOpcodeToISD(Opcode);
398  // On NEON a a vector select gets lowered to vbsl.
399  if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
400    // Lowering of some vector selects is currently far from perfect.
401    static const TypeConversionCostTblEntry<MVT::SimpleValueType>
402    NEONVectorSelectTbl[] = {
403      { ISD::SELECT, MVT::v16i1, MVT::v16i16, 2*16 + 1 + 3*1 + 4*1 },
404      { ISD::SELECT, MVT::v8i1, MVT::v8i32, 4*8 + 1*3 + 1*4 + 1*2 },
405      { ISD::SELECT, MVT::v16i1, MVT::v16i32, 4*16 + 1*6 + 1*8 + 1*4 },
406      { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
407      { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
408      { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
409    };
410
411    EVT SelCondTy = TLI->getValueType(CondTy);
412    EVT SelValTy = TLI->getValueType(ValTy);
413    if (SelCondTy.isSimple() && SelValTy.isSimple()) {
414      int Idx = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
415                                       SelCondTy.getSimpleVT(),
416                                       SelValTy.getSimpleVT());
417      if (Idx != -1)
418        return NEONVectorSelectTbl[Idx].Cost;
419    }
420
421    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
422    return LT.first;
423  }
424
425  return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
426}
427
428unsigned ARMTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
429  // Address computations in vectorized code with non-consecutive addresses will
430  // likely result in more instructions compared to scalar code where the
431  // computation can more often be merged into the index mode. The resulting
432  // extra micro-ops can significantly decrease throughput.
433  unsigned NumVectorInstToHideOverhead = 10;
434
435  if (Ty->isVectorTy() && IsComplex)
436    return NumVectorInstToHideOverhead;
437
438  // In many cases the address computation is not merged into the instruction
439  // addressing mode.
440  return 1;
441}
442
443unsigned ARMTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
444                                Type *SubTp) const {
445  // We only handle costs of reverse shuffles for now.
446  if (Kind != SK_Reverse)
447    return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
448
449  static const CostTblEntry<MVT::SimpleValueType> NEONShuffleTbl[] = {
450    // Reverse shuffle cost one instruction if we are shuffling within a double
451    // word (vrev) or two if we shuffle a quad word (vrev, vext).
452    { ISD::VECTOR_SHUFFLE, MVT::v2i32, 1 },
453    { ISD::VECTOR_SHUFFLE, MVT::v2f32, 1 },
454    { ISD::VECTOR_SHUFFLE, MVT::v2i64, 1 },
455    { ISD::VECTOR_SHUFFLE, MVT::v2f64, 1 },
456
457    { ISD::VECTOR_SHUFFLE, MVT::v4i32, 2 },
458    { ISD::VECTOR_SHUFFLE, MVT::v4f32, 2 },
459    { ISD::VECTOR_SHUFFLE, MVT::v8i16, 2 },
460    { ISD::VECTOR_SHUFFLE, MVT::v16i8, 2 }
461  };
462
463  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
464
465  int Idx = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
466  if (Idx == -1)
467    return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
468
469  return LT.first * NEONShuffleTbl[Idx].Cost;
470}
471
472unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
473                                        OperandValueKind Op1Info,
474                                        OperandValueKind Op2Info) const {
475
476  int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
477  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
478
479  const unsigned FunctionCallDivCost = 20;
480  const unsigned ReciprocalDivCost = 10;
481  static const CostTblEntry<MVT::SimpleValueType> CostTbl[] = {
482    // Division.
483    // These costs are somewhat random. Choose a cost of 20 to indicate that
484    // vectorizing devision (added function call) is going to be very expensive.
485    // Double registers types.
486    { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
487    { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
488    { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
489    { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
490    { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
491    { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
492    { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
493    { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
494    { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
495    { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
496    { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
497    { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
498    { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
499    { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
500    { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
501    { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
502    // Quad register types.
503    { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
504    { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
505    { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
506    { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
507    { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
508    { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
509    { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
510    { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
511    { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
512    { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
513    { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
514    { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
515    { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
516    { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
517    { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
518    { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
519    // Multiplication.
520  };
521
522  int Idx = -1;
523
524  if (ST->hasNEON())
525    Idx = CostTableLookup(CostTbl, ISDOpcode, LT.second);
526
527  if (Idx != -1)
528    return LT.first * CostTbl[Idx].Cost;
529
530  unsigned Cost =
531      TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
532
533  // This is somewhat of a hack. The problem that we are facing is that SROA
534  // creates a sequence of shift, and, or instructions to construct values.
535  // These sequences are recognized by the ISel and have zero-cost. Not so for
536  // the vectorized code. Because we have support for v2i64 but not i64 those
537  // sequences look particularly beneficial to vectorize.
538  // To work around this we increase the cost of v2i64 operations to make them
539  // seem less beneficial.
540  if (LT.second == MVT::v2i64 &&
541      Op2Info == TargetTransformInfo::OK_UniformConstantValue)
542    Cost += 4;
543
544  return Cost;
545}
546
547unsigned ARMTTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
548                                 unsigned AddressSpace) const {
549  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
550
551  if (Src->isVectorTy() && Alignment != 16 &&
552      Src->getVectorElementType()->isDoubleTy()) {
553    // Unaligned loads/stores are extremely inefficient.
554    // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
555    return LT.first * 4;
556  }
557  return LT.first;
558}
559