1//===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "AArch64TargetTransformInfo.h"
11#include "MCTargetDesc/AArch64AddressingModes.h"
12#include "llvm/Analysis/TargetTransformInfo.h"
13#include "llvm/Analysis/LoopInfo.h"
14#include "llvm/CodeGen/BasicTTIImpl.h"
15#include "llvm/Support/Debug.h"
16#include "llvm/Target/CostTable.h"
17#include "llvm/Target/TargetLowering.h"
18#include <algorithm>
19using namespace llvm;
20
21#define DEBUG_TYPE "aarch64tti"
22
23/// \brief Calculate the cost of materializing a 64-bit value. This helper
24/// method might only calculate a fraction of a larger immediate. Therefore it
25/// is valid to return a cost of ZERO.
26int AArch64TTIImpl::getIntImmCost(int64_t Val) {
27  // Check if the immediate can be encoded within an instruction.
28  if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
29    return 0;
30
31  if (Val < 0)
32    Val = ~Val;
33
34  // Calculate how many moves we will need to materialize this constant.
35  unsigned LZ = countLeadingZeros((uint64_t)Val);
36  return (64 - LZ + 15) / 16;
37}
38
39/// \brief Calculate the cost of materializing the given constant.
40int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
41  assert(Ty->isIntegerTy());
42
43  unsigned BitSize = Ty->getPrimitiveSizeInBits();
44  if (BitSize == 0)
45    return ~0U;
46
47  // Sign-extend all constants to a multiple of 64-bit.
48  APInt ImmVal = Imm;
49  if (BitSize & 0x3f)
50    ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
51
52  // Split the constant into 64-bit chunks and calculate the cost for each
53  // chunk.
54  int Cost = 0;
55  for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
56    APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
57    int64_t Val = Tmp.getSExtValue();
58    Cost += getIntImmCost(Val);
59  }
60  // We need at least one instruction to materialze the constant.
61  return std::max(1, Cost);
62}
63
64int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
65                                  const APInt &Imm, Type *Ty) {
66  assert(Ty->isIntegerTy());
67
68  unsigned BitSize = Ty->getPrimitiveSizeInBits();
69  // There is no cost model for constants with a bit size of 0. Return TCC_Free
70  // here, so that constant hoisting will ignore this constant.
71  if (BitSize == 0)
72    return TTI::TCC_Free;
73
74  unsigned ImmIdx = ~0U;
75  switch (Opcode) {
76  default:
77    return TTI::TCC_Free;
78  case Instruction::GetElementPtr:
79    // Always hoist the base address of a GetElementPtr.
80    if (Idx == 0)
81      return 2 * TTI::TCC_Basic;
82    return TTI::TCC_Free;
83  case Instruction::Store:
84    ImmIdx = 0;
85    break;
86  case Instruction::Add:
87  case Instruction::Sub:
88  case Instruction::Mul:
89  case Instruction::UDiv:
90  case Instruction::SDiv:
91  case Instruction::URem:
92  case Instruction::SRem:
93  case Instruction::And:
94  case Instruction::Or:
95  case Instruction::Xor:
96  case Instruction::ICmp:
97    ImmIdx = 1;
98    break;
99  // Always return TCC_Free for the shift value of a shift instruction.
100  case Instruction::Shl:
101  case Instruction::LShr:
102  case Instruction::AShr:
103    if (Idx == 1)
104      return TTI::TCC_Free;
105    break;
106  case Instruction::Trunc:
107  case Instruction::ZExt:
108  case Instruction::SExt:
109  case Instruction::IntToPtr:
110  case Instruction::PtrToInt:
111  case Instruction::BitCast:
112  case Instruction::PHI:
113  case Instruction::Call:
114  case Instruction::Select:
115  case Instruction::Ret:
116  case Instruction::Load:
117    break;
118  }
119
120  if (Idx == ImmIdx) {
121    int NumConstants = (BitSize + 63) / 64;
122    int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
123    return (Cost <= NumConstants * TTI::TCC_Basic)
124               ? static_cast<int>(TTI::TCC_Free)
125               : Cost;
126  }
127  return AArch64TTIImpl::getIntImmCost(Imm, Ty);
128}
129
130int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
131                                  const APInt &Imm, Type *Ty) {
132  assert(Ty->isIntegerTy());
133
134  unsigned BitSize = Ty->getPrimitiveSizeInBits();
135  // There is no cost model for constants with a bit size of 0. Return TCC_Free
136  // here, so that constant hoisting will ignore this constant.
137  if (BitSize == 0)
138    return TTI::TCC_Free;
139
140  switch (IID) {
141  default:
142    return TTI::TCC_Free;
143  case Intrinsic::sadd_with_overflow:
144  case Intrinsic::uadd_with_overflow:
145  case Intrinsic::ssub_with_overflow:
146  case Intrinsic::usub_with_overflow:
147  case Intrinsic::smul_with_overflow:
148  case Intrinsic::umul_with_overflow:
149    if (Idx == 1) {
150      int NumConstants = (BitSize + 63) / 64;
151      int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
152      return (Cost <= NumConstants * TTI::TCC_Basic)
153                 ? static_cast<int>(TTI::TCC_Free)
154                 : Cost;
155    }
156    break;
157  case Intrinsic::experimental_stackmap:
158    if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
159      return TTI::TCC_Free;
160    break;
161  case Intrinsic::experimental_patchpoint_void:
162  case Intrinsic::experimental_patchpoint_i64:
163    if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
164      return TTI::TCC_Free;
165    break;
166  }
167  return AArch64TTIImpl::getIntImmCost(Imm, Ty);
168}
169
170TargetTransformInfo::PopcntSupportKind
171AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
172  assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
173  if (TyWidth == 32 || TyWidth == 64)
174    return TTI::PSK_FastHardware;
175  // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
176  return TTI::PSK_Software;
177}
178
179int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
180  int ISD = TLI->InstructionOpcodeToISD(Opcode);
181  assert(ISD && "Invalid opcode");
182
183  EVT SrcTy = TLI->getValueType(DL, Src);
184  EVT DstTy = TLI->getValueType(DL, Dst);
185
186  if (!SrcTy.isSimple() || !DstTy.isSimple())
187    return BaseT::getCastInstrCost(Opcode, Dst, Src);
188
189  static const TypeConversionCostTblEntry
190  ConversionTbl[] = {
191    { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32,  1 },
192    { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64,  0 },
193    { ISD::TRUNCATE, MVT::v8i8,  MVT::v8i32,  3 },
194    { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
195
196    // The number of shll instructions for the extension.
197    { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
198    { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
199    { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
200    { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
201    { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
202    { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
203    { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
204    { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
205    { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
206    { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
207    { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
208    { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
209    { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
210    { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
211    { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
212    { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
213
214    // LowerVectorINT_TO_FP:
215    { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
216    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
217    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
218    { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
219    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
220    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
221
222    // Complex: to v2f32
223    { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
224    { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
225    { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
226    { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
227    { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
228    { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
229
230    // Complex: to v4f32
231    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
232    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
233    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
234    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
235
236    // Complex: to v8f32
237    { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
238    { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
239    { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
240    { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
241
242    // Complex: to v16f32
243    { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
244    { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
245
246    // Complex: to v2f64
247    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
248    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
249    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
250    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
251    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
252    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
253
254
255    // LowerVectorFP_TO_INT
256    { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
257    { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
258    { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
259    { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
260    { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
261    { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
262
263    // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
264    { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
265    { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
266    { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
267    { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
268    { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
269    { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
270
271    // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
272    { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
273    { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
274    { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
275    { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
276
277    // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
278    { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
279    { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
280    { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
281    { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
282    { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
283    { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
284  };
285
286  if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
287                                                 DstTy.getSimpleVT(),
288                                                 SrcTy.getSimpleVT()))
289    return Entry->Cost;
290
291  return BaseT::getCastInstrCost(Opcode, Dst, Src);
292}
293
294int AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst,
295                                             VectorType *VecTy,
296                                             unsigned Index) {
297
298  // Make sure we were given a valid extend opcode.
299  assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
300         "Invalid opcode");
301
302  // We are extending an element we extract from a vector, so the source type
303  // of the extend is the element type of the vector.
304  auto *Src = VecTy->getElementType();
305
306  // Sign- and zero-extends are for integer types only.
307  assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
308
309  // Get the cost for the extract. We compute the cost (if any) for the extend
310  // below.
311  auto Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
312
313  // Legalize the types.
314  auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
315  auto DstVT = TLI->getValueType(DL, Dst);
316  auto SrcVT = TLI->getValueType(DL, Src);
317
318  // If the resulting type is still a vector and the destination type is legal,
319  // we may get the extension for free. If not, get the default cost for the
320  // extend.
321  if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
322    return Cost + getCastInstrCost(Opcode, Dst, Src);
323
324  // The destination type should be larger than the element type. If not, get
325  // the default cost for the extend.
326  if (DstVT.getSizeInBits() < SrcVT.getSizeInBits())
327    return Cost + getCastInstrCost(Opcode, Dst, Src);
328
329  switch (Opcode) {
330  default:
331    llvm_unreachable("Opcode should be either SExt or ZExt");
332
333  // For sign-extends, we only need a smov, which performs the extension
334  // automatically.
335  case Instruction::SExt:
336    return Cost;
337
338  // For zero-extends, the extend is performed automatically by a umov unless
339  // the destination type is i64 and the element type is i8 or i16.
340  case Instruction::ZExt:
341    if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
342      return Cost;
343  }
344
345  // If we are unable to perform the extend for free, get the default cost.
346  return Cost + getCastInstrCost(Opcode, Dst, Src);
347}
348
349int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
350                                       unsigned Index) {
351  assert(Val->isVectorTy() && "This must be a vector type");
352
353  if (Index != -1U) {
354    // Legalize the type.
355    std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
356
357    // This type is legalized to a scalar type.
358    if (!LT.second.isVector())
359      return 0;
360
361    // The type may be split. Normalize the index to the new type.
362    unsigned Width = LT.second.getVectorNumElements();
363    Index = Index % Width;
364
365    // The element at index zero is already inside the vector.
366    if (Index == 0)
367      return 0;
368  }
369
370  // All other insert/extracts cost this much.
371  return ST->getVectorInsertExtractBaseCost();
372}
373
374int AArch64TTIImpl::getArithmeticInstrCost(
375    unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
376    TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
377    TTI::OperandValueProperties Opd2PropInfo) {
378  // Legalize the type.
379  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
380
381  int ISD = TLI->InstructionOpcodeToISD(Opcode);
382
383  if (ISD == ISD::SDIV &&
384      Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
385      Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
386    // On AArch64, scalar signed division by constants power-of-two are
387    // normally expanded to the sequence ADD + CMP + SELECT + SRA.
388    // The OperandValue properties many not be same as that of previous
389    // operation; conservatively assume OP_None.
390    int Cost = getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
391                                      TargetTransformInfo::OP_None,
392                                      TargetTransformInfo::OP_None);
393    Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
394                                   TargetTransformInfo::OP_None,
395                                   TargetTransformInfo::OP_None);
396    Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
397                                   TargetTransformInfo::OP_None,
398                                   TargetTransformInfo::OP_None);
399    Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
400                                   TargetTransformInfo::OP_None,
401                                   TargetTransformInfo::OP_None);
402    return Cost;
403  }
404
405  switch (ISD) {
406  default:
407    return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
408                                         Opd1PropInfo, Opd2PropInfo);
409  case ISD::ADD:
410  case ISD::MUL:
411  case ISD::XOR:
412  case ISD::OR:
413  case ISD::AND:
414    // These nodes are marked as 'custom' for combining purposes only.
415    // We know that they are legal. See LowerAdd in ISelLowering.
416    return 1 * LT.first;
417  }
418}
419
420int AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
421  // Address computations in vectorized code with non-consecutive addresses will
422  // likely result in more instructions compared to scalar code where the
423  // computation can more often be merged into the index mode. The resulting
424  // extra micro-ops can significantly decrease throughput.
425  unsigned NumVectorInstToHideOverhead = 10;
426
427  if (Ty->isVectorTy() && IsComplex)
428    return NumVectorInstToHideOverhead;
429
430  // In many cases the address computation is not merged into the instruction
431  // addressing mode.
432  return 1;
433}
434
435int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
436                                       Type *CondTy) {
437
438  int ISD = TLI->InstructionOpcodeToISD(Opcode);
439  // We don't lower some vector selects well that are wider than the register
440  // width.
441  if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
442    // We would need this many instructions to hide the scalarization happening.
443    const int AmortizationCost = 20;
444    static const TypeConversionCostTblEntry
445    VectorSelectTbl[] = {
446      { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
447      { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
448      { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
449      { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
450      { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
451      { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
452    };
453
454    EVT SelCondTy = TLI->getValueType(DL, CondTy);
455    EVT SelValTy = TLI->getValueType(DL, ValTy);
456    if (SelCondTy.isSimple() && SelValTy.isSimple()) {
457      if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
458                                                     SelCondTy.getSimpleVT(),
459                                                     SelValTy.getSimpleVT()))
460        return Entry->Cost;
461    }
462  }
463  return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
464}
465
466int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
467                                    unsigned Alignment, unsigned AddressSpace) {
468  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
469
470  if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
471      Src->getVectorElementType()->isIntegerTy(64)) {
472    // Unaligned stores are extremely inefficient. We don't split
473    // unaligned v2i64 stores because the negative impact that has shown in
474    // practice on inlined memcpy code.
475    // We make v2i64 stores expensive so that we will only vectorize if there
476    // are 6 other instructions getting vectorized.
477    int AmortizationCost = 6;
478
479    return LT.first * 2 * AmortizationCost;
480  }
481
482  if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
483      Src->getVectorNumElements() < 8) {
484    // We scalarize the loads/stores because there is not v.4b register and we
485    // have to promote the elements to v.4h.
486    unsigned NumVecElts = Src->getVectorNumElements();
487    unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
488    // We generate 2 instructions per vector element.
489    return NumVectorizableInstsToAmortize * NumVecElts * 2;
490  }
491
492  return LT.first;
493}
494
495int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
496                                               unsigned Factor,
497                                               ArrayRef<unsigned> Indices,
498                                               unsigned Alignment,
499                                               unsigned AddressSpace) {
500  assert(Factor >= 2 && "Invalid interleave factor");
501  assert(isa<VectorType>(VecTy) && "Expect a vector type");
502
503  if (Factor <= TLI->getMaxSupportedInterleaveFactor()) {
504    unsigned NumElts = VecTy->getVectorNumElements();
505    Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
506    unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy);
507
508    // ldN/stN only support legal vector types of size 64 or 128 in bits.
509    if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128))
510      return Factor;
511  }
512
513  return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
514                                           Alignment, AddressSpace);
515}
516
517int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
518  int Cost = 0;
519  for (auto *I : Tys) {
520    if (!I->isVectorTy())
521      continue;
522    if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
523      Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
524        getMemoryOpCost(Instruction::Load, I, 128, 0);
525  }
526  return Cost;
527}
528
529unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
530  return ST->getMaxInterleaveFactor();
531}
532
533void AArch64TTIImpl::getUnrollingPreferences(Loop *L,
534                                             TTI::UnrollingPreferences &UP) {
535  // Enable partial unrolling and runtime unrolling.
536  BaseT::getUnrollingPreferences(L, UP);
537
538  // For inner loop, it is more likely to be a hot one, and the runtime check
539  // can be promoted out from LICM pass, so the overhead is less, let's try
540  // a larger threshold to unroll more loops.
541  if (L->getLoopDepth() > 1)
542    UP.PartialThreshold *= 2;
543
544  // Disable partial & runtime unrolling on -Os.
545  UP.PartialOptSizeThreshold = 0;
546}
547
548Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
549                                                         Type *ExpectedType) {
550  switch (Inst->getIntrinsicID()) {
551  default:
552    return nullptr;
553  case Intrinsic::aarch64_neon_st2:
554  case Intrinsic::aarch64_neon_st3:
555  case Intrinsic::aarch64_neon_st4: {
556    // Create a struct type
557    StructType *ST = dyn_cast<StructType>(ExpectedType);
558    if (!ST)
559      return nullptr;
560    unsigned NumElts = Inst->getNumArgOperands() - 1;
561    if (ST->getNumElements() != NumElts)
562      return nullptr;
563    for (unsigned i = 0, e = NumElts; i != e; ++i) {
564      if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
565        return nullptr;
566    }
567    Value *Res = UndefValue::get(ExpectedType);
568    IRBuilder<> Builder(Inst);
569    for (unsigned i = 0, e = NumElts; i != e; ++i) {
570      Value *L = Inst->getArgOperand(i);
571      Res = Builder.CreateInsertValue(Res, L, i);
572    }
573    return Res;
574  }
575  case Intrinsic::aarch64_neon_ld2:
576  case Intrinsic::aarch64_neon_ld3:
577  case Intrinsic::aarch64_neon_ld4:
578    if (Inst->getType() == ExpectedType)
579      return Inst;
580    return nullptr;
581  }
582}
583
584bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
585                                        MemIntrinsicInfo &Info) {
586  switch (Inst->getIntrinsicID()) {
587  default:
588    break;
589  case Intrinsic::aarch64_neon_ld2:
590  case Intrinsic::aarch64_neon_ld3:
591  case Intrinsic::aarch64_neon_ld4:
592    Info.ReadMem = true;
593    Info.WriteMem = false;
594    Info.IsSimple = true;
595    Info.NumMemRefs = 1;
596    Info.PtrVal = Inst->getArgOperand(0);
597    break;
598  case Intrinsic::aarch64_neon_st2:
599  case Intrinsic::aarch64_neon_st3:
600  case Intrinsic::aarch64_neon_st4:
601    Info.ReadMem = false;
602    Info.WriteMem = true;
603    Info.IsSimple = true;
604    Info.NumMemRefs = 1;
605    Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
606    break;
607  }
608
609  switch (Inst->getIntrinsicID()) {
610  default:
611    return false;
612  case Intrinsic::aarch64_neon_ld2:
613  case Intrinsic::aarch64_neon_st2:
614    Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
615    break;
616  case Intrinsic::aarch64_neon_ld3:
617  case Intrinsic::aarch64_neon_st3:
618    Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
619    break;
620  case Intrinsic::aarch64_neon_ld4:
621  case Intrinsic::aarch64_neon_st4:
622    Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
623    break;
624  }
625  return true;
626}
627
628unsigned AArch64TTIImpl::getCacheLineSize() {
629  return ST->getCacheLineSize();
630}
631
632unsigned AArch64TTIImpl::getPrefetchDistance() {
633  return ST->getPrefetchDistance();
634}
635
636unsigned AArch64TTIImpl::getMinPrefetchStride() {
637  return ST->getMinPrefetchStride();
638}
639
640unsigned AArch64TTIImpl::getMaxPrefetchIterationsAhead() {
641  return ST->getMaxPrefetchIterationsAhead();
642}
643