AArch64TargetTransformInfo.cpp revision 4c5e43da7792f75567b693105cc53e3f1992ad98
1//===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "AArch64TargetTransformInfo.h"
11#include "MCTargetDesc/AArch64AddressingModes.h"
12#include "llvm/Analysis/TargetTransformInfo.h"
13#include "llvm/Analysis/LoopInfo.h"
14#include "llvm/CodeGen/BasicTTIImpl.h"
15#include "llvm/Support/Debug.h"
16#include "llvm/Target/CostTable.h"
17#include "llvm/Target/TargetLowering.h"
18#include <algorithm>
19using namespace llvm;
20
21#define DEBUG_TYPE "aarch64tti"
22
23/// \brief Calculate the cost of materializing a 64-bit value. This helper
24/// method might only calculate a fraction of a larger immediate. Therefore it
25/// is valid to return a cost of ZERO.
26unsigned AArch64TTIImpl::getIntImmCost(int64_t Val) {
27  // Check if the immediate can be encoded within an instruction.
28  if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
29    return 0;
30
31  if (Val < 0)
32    Val = ~Val;
33
34  // Calculate how many moves we will need to materialize this constant.
35  unsigned LZ = countLeadingZeros((uint64_t)Val);
36  return (64 - LZ + 15) / 16;
37}
38
39/// \brief Calculate the cost of materializing the given constant.
40unsigned AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
41  assert(Ty->isIntegerTy());
42
43  unsigned BitSize = Ty->getPrimitiveSizeInBits();
44  if (BitSize == 0)
45    return ~0U;
46
47  // Sign-extend all constants to a multiple of 64-bit.
48  APInt ImmVal = Imm;
49  if (BitSize & 0x3f)
50    ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
51
52  // Split the constant into 64-bit chunks and calculate the cost for each
53  // chunk.
54  unsigned Cost = 0;
55  for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
56    APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
57    int64_t Val = Tmp.getSExtValue();
58    Cost += getIntImmCost(Val);
59  }
60  // We need at least one instruction to materialze the constant.
61  return std::max(1U, Cost);
62}
63
64unsigned AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
65                                       const APInt &Imm, Type *Ty) {
66  assert(Ty->isIntegerTy());
67
68  unsigned BitSize = Ty->getPrimitiveSizeInBits();
69  // There is no cost model for constants with a bit size of 0. Return TCC_Free
70  // here, so that constant hoisting will ignore this constant.
71  if (BitSize == 0)
72    return TTI::TCC_Free;
73
74  unsigned ImmIdx = ~0U;
75  switch (Opcode) {
76  default:
77    return TTI::TCC_Free;
78  case Instruction::GetElementPtr:
79    // Always hoist the base address of a GetElementPtr.
80    if (Idx == 0)
81      return 2 * TTI::TCC_Basic;
82    return TTI::TCC_Free;
83  case Instruction::Store:
84    ImmIdx = 0;
85    break;
86  case Instruction::Add:
87  case Instruction::Sub:
88  case Instruction::Mul:
89  case Instruction::UDiv:
90  case Instruction::SDiv:
91  case Instruction::URem:
92  case Instruction::SRem:
93  case Instruction::And:
94  case Instruction::Or:
95  case Instruction::Xor:
96  case Instruction::ICmp:
97    ImmIdx = 1;
98    break;
99  // Always return TCC_Free for the shift value of a shift instruction.
100  case Instruction::Shl:
101  case Instruction::LShr:
102  case Instruction::AShr:
103    if (Idx == 1)
104      return TTI::TCC_Free;
105    break;
106  case Instruction::Trunc:
107  case Instruction::ZExt:
108  case Instruction::SExt:
109  case Instruction::IntToPtr:
110  case Instruction::PtrToInt:
111  case Instruction::BitCast:
112  case Instruction::PHI:
113  case Instruction::Call:
114  case Instruction::Select:
115  case Instruction::Ret:
116  case Instruction::Load:
117    break;
118  }
119
120  if (Idx == ImmIdx) {
121    unsigned NumConstants = (BitSize + 63) / 64;
122    unsigned Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
123    return (Cost <= NumConstants * TTI::TCC_Basic)
124               ? static_cast<unsigned>(TTI::TCC_Free)
125               : Cost;
126  }
127  return AArch64TTIImpl::getIntImmCost(Imm, Ty);
128}
129
130unsigned AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
131                                       const APInt &Imm, Type *Ty) {
132  assert(Ty->isIntegerTy());
133
134  unsigned BitSize = Ty->getPrimitiveSizeInBits();
135  // There is no cost model for constants with a bit size of 0. Return TCC_Free
136  // here, so that constant hoisting will ignore this constant.
137  if (BitSize == 0)
138    return TTI::TCC_Free;
139
140  switch (IID) {
141  default:
142    return TTI::TCC_Free;
143  case Intrinsic::sadd_with_overflow:
144  case Intrinsic::uadd_with_overflow:
145  case Intrinsic::ssub_with_overflow:
146  case Intrinsic::usub_with_overflow:
147  case Intrinsic::smul_with_overflow:
148  case Intrinsic::umul_with_overflow:
149    if (Idx == 1) {
150      unsigned NumConstants = (BitSize + 63) / 64;
151      unsigned Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
152      return (Cost <= NumConstants * TTI::TCC_Basic)
153                 ? static_cast<unsigned>(TTI::TCC_Free)
154                 : Cost;
155    }
156    break;
157  case Intrinsic::experimental_stackmap:
158    if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
159      return TTI::TCC_Free;
160    break;
161  case Intrinsic::experimental_patchpoint_void:
162  case Intrinsic::experimental_patchpoint_i64:
163    if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
164      return TTI::TCC_Free;
165    break;
166  }
167  return AArch64TTIImpl::getIntImmCost(Imm, Ty);
168}
169
170TargetTransformInfo::PopcntSupportKind
171AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
172  assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
173  if (TyWidth == 32 || TyWidth == 64)
174    return TTI::PSK_FastHardware;
175  // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
176  return TTI::PSK_Software;
177}
178
179unsigned AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
180                                          Type *Src) {
181  int ISD = TLI->InstructionOpcodeToISD(Opcode);
182  assert(ISD && "Invalid opcode");
183
184  EVT SrcTy = TLI->getValueType(Src);
185  EVT DstTy = TLI->getValueType(Dst);
186
187  if (!SrcTy.isSimple() || !DstTy.isSimple())
188    return BaseT::getCastInstrCost(Opcode, Dst, Src);
189
190  static const TypeConversionCostTblEntry<MVT> ConversionTbl[] = {
191    // LowerVectorINT_TO_FP:
192    { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
193    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
194    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
195    { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
196    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
197    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
198
199    // Complex: to v2f32
200    { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
201    { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
202    { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
203    { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
204    { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
205    { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
206
207    // Complex: to v4f32
208    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
209    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
210    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
211    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
212
213    // Complex: to v2f64
214    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
215    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
216    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
217    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
218    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
219    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
220
221
222    // LowerVectorFP_TO_INT
223    { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
224    { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
225    { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
226    { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
227    { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
228    { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
229
230    // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
231    { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
232    { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
233    { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
234    { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
235    { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
236    { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
237
238    // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
239    { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
240    { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
241    { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
242    { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
243
244    // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
245    { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
246    { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
247    { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
248    { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
249    { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
250    { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
251  };
252
253  int Idx = ConvertCostTableLookup<MVT>(
254      ConversionTbl, array_lengthof(ConversionTbl), ISD, DstTy.getSimpleVT(),
255      SrcTy.getSimpleVT());
256  if (Idx != -1)
257    return ConversionTbl[Idx].Cost;
258
259  return BaseT::getCastInstrCost(Opcode, Dst, Src);
260}
261
262unsigned AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
263                                            unsigned Index) {
264  assert(Val->isVectorTy() && "This must be a vector type");
265
266  if (Index != -1U) {
267    // Legalize the type.
268    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
269
270    // This type is legalized to a scalar type.
271    if (!LT.second.isVector())
272      return 0;
273
274    // The type may be split. Normalize the index to the new type.
275    unsigned Width = LT.second.getVectorNumElements();
276    Index = Index % Width;
277
278    // The element at index zero is already inside the vector.
279    if (Index == 0)
280      return 0;
281  }
282
283  // All other insert/extracts cost this much.
284  return 2;
285}
286
287unsigned AArch64TTIImpl::getArithmeticInstrCost(
288    unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
289    TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
290    TTI::OperandValueProperties Opd2PropInfo) {
291  // Legalize the type.
292  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
293
294  int ISD = TLI->InstructionOpcodeToISD(Opcode);
295
296  if (ISD == ISD::SDIV &&
297      Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
298      Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
299    // On AArch64, scalar signed division by constants power-of-two are
300    // normally expanded to the sequence ADD + CMP + SELECT + SRA.
301    // The OperandValue properties many not be same as that of previous
302    // operation; conservatively assume OP_None.
303    unsigned Cost =
304      getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
305                             TargetTransformInfo::OP_None,
306                             TargetTransformInfo::OP_None);
307    Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
308                                   TargetTransformInfo::OP_None,
309                                   TargetTransformInfo::OP_None);
310    Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
311                                   TargetTransformInfo::OP_None,
312                                   TargetTransformInfo::OP_None);
313    Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
314                                   TargetTransformInfo::OP_None,
315                                   TargetTransformInfo::OP_None);
316    return Cost;
317  }
318
319  switch (ISD) {
320  default:
321    return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
322                                         Opd1PropInfo, Opd2PropInfo);
323  case ISD::ADD:
324  case ISD::MUL:
325  case ISD::XOR:
326  case ISD::OR:
327  case ISD::AND:
328    // These nodes are marked as 'custom' for combining purposes only.
329    // We know that they are legal. See LowerAdd in ISelLowering.
330    return 1 * LT.first;
331  }
332}
333
334unsigned AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
335  // Address computations in vectorized code with non-consecutive addresses will
336  // likely result in more instructions compared to scalar code where the
337  // computation can more often be merged into the index mode. The resulting
338  // extra micro-ops can significantly decrease throughput.
339  unsigned NumVectorInstToHideOverhead = 10;
340
341  if (Ty->isVectorTy() && IsComplex)
342    return NumVectorInstToHideOverhead;
343
344  // In many cases the address computation is not merged into the instruction
345  // addressing mode.
346  return 1;
347}
348
349unsigned AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
350                                            Type *CondTy) {
351
352  int ISD = TLI->InstructionOpcodeToISD(Opcode);
353  // We don't lower vector selects well that are wider than the register width.
354  if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
355    // We would need this many instructions to hide the scalarization happening.
356    const unsigned AmortizationCost = 20;
357    static const TypeConversionCostTblEntry<MVT::SimpleValueType>
358    VectorSelectTbl[] = {
359      { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 * AmortizationCost },
360      { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 * AmortizationCost },
361      { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 * AmortizationCost },
362      { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
363      { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
364      { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
365    };
366
367    EVT SelCondTy = TLI->getValueType(CondTy);
368    EVT SelValTy = TLI->getValueType(ValTy);
369    if (SelCondTy.isSimple() && SelValTy.isSimple()) {
370      int Idx =
371          ConvertCostTableLookup(VectorSelectTbl, ISD, SelCondTy.getSimpleVT(),
372                                 SelValTy.getSimpleVT());
373      if (Idx != -1)
374        return VectorSelectTbl[Idx].Cost;
375    }
376  }
377  return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
378}
379
380unsigned AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
381                                         unsigned Alignment,
382                                         unsigned AddressSpace) {
383  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
384
385  if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
386      Src->getVectorElementType()->isIntegerTy(64)) {
387    // Unaligned stores are extremely inefficient. We don't split
388    // unaligned v2i64 stores because the negative impact that has shown in
389    // practice on inlined memcpy code.
390    // We make v2i64 stores expensive so that we will only vectorize if there
391    // are 6 other instructions getting vectorized.
392    unsigned AmortizationCost = 6;
393
394    return LT.first * 2 * AmortizationCost;
395  }
396
397  if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
398      Src->getVectorNumElements() < 8) {
399    // We scalarize the loads/stores because there is not v.4b register and we
400    // have to promote the elements to v.4h.
401    unsigned NumVecElts = Src->getVectorNumElements();
402    unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
403    // We generate 2 instructions per vector element.
404    return NumVectorizableInstsToAmortize * NumVecElts * 2;
405  }
406
407  return LT.first;
408}
409
410unsigned AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
411  unsigned Cost = 0;
412  for (auto *I : Tys) {
413    if (!I->isVectorTy())
414      continue;
415    if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
416      Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
417        getMemoryOpCost(Instruction::Load, I, 128, 0);
418  }
419  return Cost;
420}
421
422unsigned AArch64TTIImpl::getMaxInterleaveFactor() {
423  if (ST->isCortexA57())
424    return 4;
425  return 2;
426}
427
428void AArch64TTIImpl::getUnrollingPreferences(Loop *L,
429                                             TTI::UnrollingPreferences &UP) {
430  // Enable partial unrolling and runtime unrolling.
431  BaseT::getUnrollingPreferences(L, UP);
432
433  // For inner loop, it is more likely to be a hot one, and the runtime check
434  // can be promoted out from LICM pass, so the overhead is less, let's try
435  // a larger threshold to unroll more loops.
436  if (L->getLoopDepth() > 1)
437    UP.PartialThreshold *= 2;
438
439  // Disable partial & runtime unrolling on -Os.
440  UP.PartialOptSizeThreshold = 0;
441}
442
443Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
444                                                         Type *ExpectedType) {
445  switch (Inst->getIntrinsicID()) {
446  default:
447    return nullptr;
448  case Intrinsic::aarch64_neon_st2:
449  case Intrinsic::aarch64_neon_st3:
450  case Intrinsic::aarch64_neon_st4: {
451    // Create a struct type
452    StructType *ST = dyn_cast<StructType>(ExpectedType);
453    if (!ST)
454      return nullptr;
455    unsigned NumElts = Inst->getNumArgOperands() - 1;
456    if (ST->getNumElements() != NumElts)
457      return nullptr;
458    for (unsigned i = 0, e = NumElts; i != e; ++i) {
459      if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
460        return nullptr;
461    }
462    Value *Res = UndefValue::get(ExpectedType);
463    IRBuilder<> Builder(Inst);
464    for (unsigned i = 0, e = NumElts; i != e; ++i) {
465      Value *L = Inst->getArgOperand(i);
466      Res = Builder.CreateInsertValue(Res, L, i);
467    }
468    return Res;
469  }
470  case Intrinsic::aarch64_neon_ld2:
471  case Intrinsic::aarch64_neon_ld3:
472  case Intrinsic::aarch64_neon_ld4:
473    if (Inst->getType() == ExpectedType)
474      return Inst;
475    return nullptr;
476  }
477}
478
479bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
480                                        MemIntrinsicInfo &Info) {
481  switch (Inst->getIntrinsicID()) {
482  default:
483    break;
484  case Intrinsic::aarch64_neon_ld2:
485  case Intrinsic::aarch64_neon_ld3:
486  case Intrinsic::aarch64_neon_ld4:
487    Info.ReadMem = true;
488    Info.WriteMem = false;
489    Info.Vol = false;
490    Info.NumMemRefs = 1;
491    Info.PtrVal = Inst->getArgOperand(0);
492    break;
493  case Intrinsic::aarch64_neon_st2:
494  case Intrinsic::aarch64_neon_st3:
495  case Intrinsic::aarch64_neon_st4:
496    Info.ReadMem = false;
497    Info.WriteMem = true;
498    Info.Vol = false;
499    Info.NumMemRefs = 1;
500    Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
501    break;
502  }
503
504  switch (Inst->getIntrinsicID()) {
505  default:
506    return false;
507  case Intrinsic::aarch64_neon_ld2:
508  case Intrinsic::aarch64_neon_st2:
509    Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
510    break;
511  case Intrinsic::aarch64_neon_ld3:
512  case Intrinsic::aarch64_neon_st3:
513    Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
514    break;
515  case Intrinsic::aarch64_neon_ld4:
516  case Intrinsic::aarch64_neon_st4:
517    Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
518    break;
519  }
520  return true;
521}
522