1//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file implements a TargetTransformInfo analysis pass specific to the
11/// X86 target machine. It uses the target's detailed information to provide
12/// more precise answers to certain TTI queries, while letting the target
13/// independent and default TTI implementations handle the rest.
14///
15//===----------------------------------------------------------------------===//
16
17#include "X86TargetTransformInfo.h"
18#include "llvm/Analysis/TargetTransformInfo.h"
19#include "llvm/CodeGen/BasicTTIImpl.h"
20#include "llvm/IR/IntrinsicInst.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Target/CostTable.h"
23#include "llvm/Target/TargetLowering.h"
24using namespace llvm;
25
26#define DEBUG_TYPE "x86tti"
27
28//===----------------------------------------------------------------------===//
29//
30// X86 cost model.
31//
32//===----------------------------------------------------------------------===//
33
34TargetTransformInfo::PopcntSupportKind
35X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
36  assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
37  // TODO: Currently the __builtin_popcount() implementation using SSE3
38  //   instructions is inefficient. Once the problem is fixed, we should
39  //   call ST->hasSSE3() instead of ST->hasPOPCNT().
40  return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
41}
42
43unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
44  if (Vector && !ST->hasSSE1())
45    return 0;
46
47  if (ST->is64Bit()) {
48    if (Vector && ST->hasAVX512())
49      return 32;
50    return 16;
51  }
52  return 8;
53}
54
55unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
56  if (Vector) {
57    if (ST->hasAVX512()) return 512;
58    if (ST->hasAVX()) return 256;
59    if (ST->hasSSE1()) return 128;
60    return 0;
61  }
62
63  if (ST->is64Bit())
64    return 64;
65  return 32;
66
67}
68
69unsigned X86TTIImpl::getMaxInterleaveFactor() {
70  if (ST->isAtom())
71    return 1;
72
73  // Sandybridge and Haswell have multiple execution ports and pipelined
74  // vector units.
75  if (ST->hasAVX())
76    return 4;
77
78  return 2;
79}
80
81unsigned X86TTIImpl::getArithmeticInstrCost(
82    unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
83    TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
84    TTI::OperandValueProperties Opd2PropInfo) {
85  // Legalize the type.
86  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
87
88  int ISD = TLI->InstructionOpcodeToISD(Opcode);
89  assert(ISD && "Invalid opcode");
90
91  if (ISD == ISD::SDIV &&
92      Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
93      Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
94    // On X86, vector signed division by constants power-of-two are
95    // normally expanded to the sequence SRA + SRL + ADD + SRA.
96    // The OperandValue properties many not be same as that of previous
97    // operation;conservatively assume OP_None.
98    unsigned Cost =
99        2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info,
100                                   TargetTransformInfo::OP_None,
101                                   TargetTransformInfo::OP_None);
102    Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
103                                   TargetTransformInfo::OP_None,
104                                   TargetTransformInfo::OP_None);
105    Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
106                                   TargetTransformInfo::OP_None,
107                                   TargetTransformInfo::OP_None);
108
109    return Cost;
110  }
111
112  static const CostTblEntry<MVT::SimpleValueType>
113  AVX2UniformConstCostTable[] = {
114    { ISD::SDIV, MVT::v16i16,  6 }, // vpmulhw sequence
115    { ISD::UDIV, MVT::v16i16,  6 }, // vpmulhuw sequence
116    { ISD::SDIV, MVT::v8i32,  15 }, // vpmuldq sequence
117    { ISD::UDIV, MVT::v8i32,  15 }, // vpmuludq sequence
118  };
119
120  if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
121      ST->hasAVX2()) {
122    int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
123    if (Idx != -1)
124      return LT.first * AVX2UniformConstCostTable[Idx].Cost;
125  }
126
127  static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = {
128    { ISD::SHL,     MVT::v16i32,    1 },
129    { ISD::SRL,     MVT::v16i32,    1 },
130    { ISD::SRA,     MVT::v16i32,    1 },
131    { ISD::SHL,     MVT::v8i64,    1 },
132    { ISD::SRL,     MVT::v8i64,    1 },
133    { ISD::SRA,     MVT::v8i64,    1 },
134  };
135
136  static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
137    // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
138    // customize them to detect the cases where shift amount is a scalar one.
139    { ISD::SHL,     MVT::v4i32,    1 },
140    { ISD::SRL,     MVT::v4i32,    1 },
141    { ISD::SRA,     MVT::v4i32,    1 },
142    { ISD::SHL,     MVT::v8i32,    1 },
143    { ISD::SRL,     MVT::v8i32,    1 },
144    { ISD::SRA,     MVT::v8i32,    1 },
145    { ISD::SHL,     MVT::v2i64,    1 },
146    { ISD::SRL,     MVT::v2i64,    1 },
147    { ISD::SHL,     MVT::v4i64,    1 },
148    { ISD::SRL,     MVT::v4i64,    1 },
149
150    { ISD::SHL,  MVT::v32i8,  42 }, // cmpeqb sequence.
151    { ISD::SHL,  MVT::v16i16,  16*10 }, // Scalarized.
152
153    { ISD::SRL,  MVT::v32i8,  32*10 }, // Scalarized.
154    { ISD::SRL,  MVT::v16i16,  8*10 }, // Scalarized.
155
156    { ISD::SRA,  MVT::v32i8,  32*10 }, // Scalarized.
157    { ISD::SRA,  MVT::v16i16,  16*10 }, // Scalarized.
158    { ISD::SRA,  MVT::v4i64,  4*10 }, // Scalarized.
159
160    // Vectorizing division is a bad idea. See the SSE2 table for more comments.
161    { ISD::SDIV,  MVT::v32i8,  32*20 },
162    { ISD::SDIV,  MVT::v16i16, 16*20 },
163    { ISD::SDIV,  MVT::v8i32,  8*20 },
164    { ISD::SDIV,  MVT::v4i64,  4*20 },
165    { ISD::UDIV,  MVT::v32i8,  32*20 },
166    { ISD::UDIV,  MVT::v16i16, 16*20 },
167    { ISD::UDIV,  MVT::v8i32,  8*20 },
168    { ISD::UDIV,  MVT::v4i64,  4*20 },
169  };
170
171  if (ST->hasAVX512()) {
172    int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
173    if (Idx != -1)
174      return LT.first * AVX512CostTable[Idx].Cost;
175  }
176  // Look for AVX2 lowering tricks.
177  if (ST->hasAVX2()) {
178    if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
179        (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
180         Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
181      // On AVX2, a packed v16i16 shift left by a constant build_vector
182      // is lowered into a vector multiply (vpmullw).
183      return LT.first;
184
185    int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
186    if (Idx != -1)
187      return LT.first * AVX2CostTable[Idx].Cost;
188  }
189
190  static const CostTblEntry<MVT::SimpleValueType>
191  SSE2UniformConstCostTable[] = {
192    // We don't correctly identify costs of casts because they are marked as
193    // custom.
194    // Constant splats are cheaper for the following instructions.
195    { ISD::SHL,  MVT::v16i8,  1 }, // psllw.
196    { ISD::SHL,  MVT::v8i16,  1 }, // psllw.
197    { ISD::SHL,  MVT::v4i32,  1 }, // pslld
198    { ISD::SHL,  MVT::v2i64,  1 }, // psllq.
199
200    { ISD::SRL,  MVT::v16i8,  1 }, // psrlw.
201    { ISD::SRL,  MVT::v8i16,  1 }, // psrlw.
202    { ISD::SRL,  MVT::v4i32,  1 }, // psrld.
203    { ISD::SRL,  MVT::v2i64,  1 }, // psrlq.
204
205    { ISD::SRA,  MVT::v16i8,  4 }, // psrlw, pand, pxor, psubb.
206    { ISD::SRA,  MVT::v8i16,  1 }, // psraw.
207    { ISD::SRA,  MVT::v4i32,  1 }, // psrad.
208
209    { ISD::SDIV, MVT::v8i16,  6 }, // pmulhw sequence
210    { ISD::UDIV, MVT::v8i16,  6 }, // pmulhuw sequence
211    { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
212    { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
213  };
214
215  if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
216      ST->hasSSE2()) {
217    // pmuldq sequence.
218    if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
219      return LT.first * 15;
220
221    int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
222    if (Idx != -1)
223      return LT.first * SSE2UniformConstCostTable[Idx].Cost;
224  }
225
226  if (ISD == ISD::SHL &&
227      Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
228    EVT VT = LT.second;
229    if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
230        (VT == MVT::v4i32 && ST->hasSSE41()))
231      // Vector shift left by non uniform constant can be lowered
232      // into vector multiply (pmullw/pmulld).
233      return LT.first;
234    if (VT == MVT::v4i32 && ST->hasSSE2())
235      // A vector shift left by non uniform constant is converted
236      // into a vector multiply; the new multiply is eventually
237      // lowered into a sequence of shuffles and 2 x pmuludq.
238      ISD = ISD::MUL;
239  }
240
241  static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
242    // We don't correctly identify costs of casts because they are marked as
243    // custom.
244    // For some cases, where the shift amount is a scalar we would be able
245    // to generate better code. Unfortunately, when this is the case the value
246    // (the splat) will get hoisted out of the loop, thereby making it invisible
247    // to ISel. The cost model must return worst case assumptions because it is
248    // used for vectorization and we don't want to make vectorized code worse
249    // than scalar code.
250    { ISD::SHL,  MVT::v16i8,  30 }, // cmpeqb sequence.
251    { ISD::SHL,  MVT::v8i16,  8*10 }, // Scalarized.
252    { ISD::SHL,  MVT::v4i32,  2*5 }, // We optimized this using mul.
253    { ISD::SHL,  MVT::v2i64,  2*10 }, // Scalarized.
254    { ISD::SHL,  MVT::v4i64,  4*10 }, // Scalarized.
255
256    { ISD::SRL,  MVT::v16i8,  16*10 }, // Scalarized.
257    { ISD::SRL,  MVT::v8i16,  8*10 }, // Scalarized.
258    { ISD::SRL,  MVT::v4i32,  4*10 }, // Scalarized.
259    { ISD::SRL,  MVT::v2i64,  2*10 }, // Scalarized.
260
261    { ISD::SRA,  MVT::v16i8,  16*10 }, // Scalarized.
262    { ISD::SRA,  MVT::v8i16,  8*10 }, // Scalarized.
263    { ISD::SRA,  MVT::v4i32,  4*10 }, // Scalarized.
264    { ISD::SRA,  MVT::v2i64,  2*10 }, // Scalarized.
265
266    // It is not a good idea to vectorize division. We have to scalarize it and
267    // in the process we will often end up having to spilling regular
268    // registers. The overhead of division is going to dominate most kernels
269    // anyways so try hard to prevent vectorization of division - it is
270    // generally a bad idea. Assume somewhat arbitrarily that we have to be able
271    // to hide "20 cycles" for each lane.
272    { ISD::SDIV,  MVT::v16i8,  16*20 },
273    { ISD::SDIV,  MVT::v8i16,  8*20 },
274    { ISD::SDIV,  MVT::v4i32,  4*20 },
275    { ISD::SDIV,  MVT::v2i64,  2*20 },
276    { ISD::UDIV,  MVT::v16i8,  16*20 },
277    { ISD::UDIV,  MVT::v8i16,  8*20 },
278    { ISD::UDIV,  MVT::v4i32,  4*20 },
279    { ISD::UDIV,  MVT::v2i64,  2*20 },
280  };
281
282  if (ST->hasSSE2()) {
283    int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
284    if (Idx != -1)
285      return LT.first * SSE2CostTable[Idx].Cost;
286  }
287
288  static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
289    // We don't have to scalarize unsupported ops. We can issue two half-sized
290    // operations and we only need to extract the upper YMM half.
291    // Two ops + 1 extract + 1 insert = 4.
292    { ISD::MUL,     MVT::v16i16,   4 },
293    { ISD::MUL,     MVT::v8i32,    4 },
294    { ISD::SUB,     MVT::v8i32,    4 },
295    { ISD::ADD,     MVT::v8i32,    4 },
296    { ISD::SUB,     MVT::v4i64,    4 },
297    { ISD::ADD,     MVT::v4i64,    4 },
298    // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
299    // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
300    // Because we believe v4i64 to be a legal type, we must also include the
301    // split factor of two in the cost table. Therefore, the cost here is 18
302    // instead of 9.
303    { ISD::MUL,     MVT::v4i64,    18 },
304  };
305
306  // Look for AVX1 lowering tricks.
307  if (ST->hasAVX() && !ST->hasAVX2()) {
308    EVT VT = LT.second;
309
310    // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
311    // sequence of extract + two vector multiply + insert.
312    if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
313        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
314      ISD = ISD::MUL;
315
316    int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
317    if (Idx != -1)
318      return LT.first * AVX1CostTable[Idx].Cost;
319  }
320
321  // Custom lowering of vectors.
322  static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
323    // A v2i64/v4i64 and multiply is custom lowered as a series of long
324    // multiplies(3), shifts(4) and adds(2).
325    { ISD::MUL,     MVT::v2i64,    9 },
326    { ISD::MUL,     MVT::v4i64,    9 },
327  };
328  int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
329  if (Idx != -1)
330    return LT.first * CustomLowered[Idx].Cost;
331
332  // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
333  // 2x pmuludq, 2x shuffle.
334  if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
335      !ST->hasSSE41())
336    return LT.first * 6;
337
338  // Fallback to the default implementation.
339  return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
340}
341
342unsigned X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
343                                    Type *SubTp) {
344  // We only estimate the cost of reverse and alternate shuffles.
345  if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
346    return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
347
348  if (Kind == TTI::SK_Reverse) {
349    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
350    unsigned Cost = 1;
351    if (LT.second.getSizeInBits() > 128)
352      Cost = 3; // Extract + insert + copy.
353
354    // Multiple by the number of parts.
355    return Cost * LT.first;
356  }
357
358  if (Kind == TTI::SK_Alternate) {
359    // 64-bit packed float vectors (v2f32) are widened to type v4f32.
360    // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
361    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
362
363    // The backend knows how to generate a single VEX.256 version of
364    // instruction VPBLENDW if the target supports AVX2.
365    if (ST->hasAVX2() && LT.second == MVT::v16i16)
366      return LT.first;
367
368    static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
369      {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1},  // vblendpd
370      {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1},  // vblendpd
371
372      {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1},  // vblendps
373      {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1},  // vblendps
374
375      // This shuffle is custom lowered into a sequence of:
376      //  2x  vextractf128 , 2x vpblendw , 1x vinsertf128
377      {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
378
379      // This shuffle is custom lowered into a long sequence of:
380      //  2x vextractf128 , 4x vpshufb , 2x vpor ,  1x vinsertf128
381      {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
382    };
383
384    if (ST->hasAVX()) {
385      int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
386      if (Idx != -1)
387        return LT.first * AVXAltShuffleTbl[Idx].Cost;
388    }
389
390    static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
391      // These are lowered into movsd.
392      {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
393      {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
394
395      // packed float vectors with four elements are lowered into BLENDI dag
396      // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
397      {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
398      {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
399
400      // This shuffle generates a single pshufw.
401      {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
402
403      // There is no instruction that matches a v16i8 alternate shuffle.
404      // The backend will expand it into the sequence 'pshufb + pshufb + or'.
405      {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
406    };
407
408    if (ST->hasSSE41()) {
409      int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
410      if (Idx != -1)
411        return LT.first * SSE41AltShuffleTbl[Idx].Cost;
412    }
413
414    static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
415      {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},  // movsd
416      {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},  // movsd
417
418      // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
419      // the sequence 'shufps + pshufd'
420      {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
421      {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
422
423      {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
424      {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}  // pshufb + pshufb + or
425    };
426
427    if (ST->hasSSSE3()) {
428      int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
429      if (Idx != -1)
430        return LT.first * SSSE3AltShuffleTbl[Idx].Cost;
431    }
432
433    static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
434      {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},  // movsd
435      {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},  // movsd
436
437      {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
438      {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
439
440      // This is expanded into a long sequence of four extract + four insert.
441      {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
442
443      // 8 x (pinsrw + pextrw + and + movb + movzb + or)
444      {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
445    };
446
447    // Fall-back (SSE3 and SSE2).
448    int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
449    if (Idx != -1)
450      return LT.first * SSEAltShuffleTbl[Idx].Cost;
451    return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
452  }
453
454  return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
455}
456
457unsigned X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
458  int ISD = TLI->InstructionOpcodeToISD(Opcode);
459  assert(ISD && "Invalid opcode");
460
461  std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
462  std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
463
464  static const TypeConversionCostTblEntry<MVT::SimpleValueType>
465  SSE2ConvTbl[] = {
466    // These are somewhat magic numbers justified by looking at the output of
467    // Intel's IACA, running some kernels and making sure when we take
468    // legalization into account the throughput will be overestimated.
469    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
470    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
471    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
472    { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
473    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
474    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
475    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
476    { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
477    // There are faster sequences for float conversions.
478    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
479    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
480    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
481    { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
482    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
483    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
484    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
485    { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
486  };
487
488  if (ST->hasSSE2() && !ST->hasAVX()) {
489    int Idx =
490        ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
491    if (Idx != -1)
492      return LTSrc.first * SSE2ConvTbl[Idx].Cost;
493  }
494
495  static const TypeConversionCostTblEntry<MVT::SimpleValueType>
496  AVX512ConversionTbl[] = {
497    { ISD::FP_EXTEND, MVT::v8f64,   MVT::v8f32,  1 },
498    { ISD::FP_EXTEND, MVT::v8f64,   MVT::v16f32, 3 },
499    { ISD::FP_ROUND,  MVT::v8f32,   MVT::v8f64,  1 },
500    { ISD::FP_ROUND,  MVT::v16f32,  MVT::v8f64,  3 },
501
502    { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i32, 1 },
503    { ISD::TRUNCATE,  MVT::v16i16,  MVT::v16i32, 1 },
504    { ISD::TRUNCATE,  MVT::v8i16,   MVT::v8i64,  1 },
505    { ISD::TRUNCATE,  MVT::v8i32,   MVT::v8i64,  1 },
506    { ISD::TRUNCATE,  MVT::v16i32,  MVT::v8i64,  4 },
507
508    // v16i1 -> v16i32 - load + broadcast
509    { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1,  2 },
510    { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1,  2 },
511
512    { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
513    { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
514    { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
515    { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
516    { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v16i32, 3 },
517    { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v16i32, 3 },
518
519    { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
520    { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i8,  2 },
521    { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 2 },
522    { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
523    { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
524    { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
525    { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
526  };
527
528  if (ST->hasAVX512()) {
529    int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second,
530                                     LTSrc.second);
531    if (Idx != -1)
532      return AVX512ConversionTbl[Idx].Cost;
533  }
534  EVT SrcTy = TLI->getValueType(Src);
535  EVT DstTy = TLI->getValueType(Dst);
536
537  // The function getSimpleVT only handles simple value types.
538  if (!SrcTy.isSimple() || !DstTy.isSimple())
539    return BaseT::getCastInstrCost(Opcode, Dst, Src);
540
541  static const TypeConversionCostTblEntry<MVT::SimpleValueType>
542  AVX2ConversionTbl[] = {
543    { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
544    { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
545    { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
546    { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
547    { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,   3 },
548    { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,   3 },
549    { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
550    { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
551    { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
552    { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
553    { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,   3 },
554    { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,   3 },
555    { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16,  3 },
556    { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16,  3 },
557    { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
558    { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
559
560    { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i64,  2 },
561    { ISD::TRUNCATE,    MVT::v4i16,  MVT::v4i64,  2 },
562    { ISD::TRUNCATE,    MVT::v4i32,  MVT::v4i64,  2 },
563    { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i32,  2 },
564    { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  2 },
565    { ISD::TRUNCATE,    MVT::v8i32,  MVT::v8i64,  4 },
566
567    { ISD::FP_EXTEND,   MVT::v8f64,  MVT::v8f32,  3 },
568    { ISD::FP_ROUND,    MVT::v8f32,  MVT::v8f64,  3 },
569
570    { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  8 },
571  };
572
573  static const TypeConversionCostTblEntry<MVT::SimpleValueType>
574  AVXConversionTbl[] = {
575    { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
576    { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
577    { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,  7 },
578    { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,  4 },
579    { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  7 },
580    { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  4 },
581    { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 4 },
582    { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 4 },
583    { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,  6 },
584    { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,  4 },
585    { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,  6 },
586    { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,  4 },
587    { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 6 },
588    { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
589    { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 4 },
590    { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 4 },
591
592    { ISD::TRUNCATE,    MVT::v4i8,  MVT::v4i64,  4 },
593    { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i64,  4 },
594    { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64,  4 },
595    { ISD::TRUNCATE,    MVT::v8i8,  MVT::v8i32,  4 },
596    { ISD::TRUNCATE,    MVT::v8i16, MVT::v8i32,  5 },
597    { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i16, 4 },
598    { ISD::TRUNCATE,    MVT::v8i32, MVT::v8i64,  9 },
599
600    { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i1,  8 },
601    { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i8,  8 },
602    { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 5 },
603    { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 1 },
604    { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1,  3 },
605    { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8,  3 },
606    { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 3 },
607    { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
608    { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i1,  3 },
609    { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i8,  3 },
610    { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i16, 3 },
611    { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i32, 1 },
612
613    { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i1,  6 },
614    { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i8,  5 },
615    { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 5 },
616    { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 9 },
617    { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1,  7 },
618    { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8,  2 },
619    { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
620    { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 6 },
621    { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i1,  7 },
622    { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i8,  2 },
623    { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i16, 2 },
624    { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i32, 6 },
625    // The generic code to compute the scalar overhead is currently broken.
626    // Workaround this limitation by estimating the scalarization overhead
627    // here. We have roughly 10 instructions per scalar element.
628    // Multiply that by the vector width.
629    // FIXME: remove that when PR19268 is fixed.
630    { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i64, 2*10 },
631    { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i64, 4*10 },
632
633    { ISD::FP_TO_SINT,  MVT::v8i8,  MVT::v8f32, 7 },
634    { ISD::FP_TO_SINT,  MVT::v4i8,  MVT::v4f32, 1 },
635    // This node is expanded into scalarized operations but BasicTTI is overly
636    // optimistic estimating its cost.  It computes 3 per element (one
637    // vector-extract, one scalar conversion and one vector-insert).  The
638    // problem is that the inserts form a read-modify-write chain so latency
639    // should be factored in too.  Inflating the cost per element by 1.
640    { ISD::FP_TO_UINT,  MVT::v8i32, MVT::v8f32, 8*4 },
641    { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f64, 4*4 },
642  };
643
644  if (ST->hasAVX2()) {
645    int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
646                                     DstTy.getSimpleVT(), SrcTy.getSimpleVT());
647    if (Idx != -1)
648      return AVX2ConversionTbl[Idx].Cost;
649  }
650
651  if (ST->hasAVX()) {
652    int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
653                                     SrcTy.getSimpleVT());
654    if (Idx != -1)
655      return AVXConversionTbl[Idx].Cost;
656  }
657
658  return BaseT::getCastInstrCost(Opcode, Dst, Src);
659}
660
661unsigned X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
662                                        Type *CondTy) {
663  // Legalize the type.
664  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
665
666  MVT MTy = LT.second;
667
668  int ISD = TLI->InstructionOpcodeToISD(Opcode);
669  assert(ISD && "Invalid opcode");
670
671  static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
672    { ISD::SETCC,   MVT::v2f64,   1 },
673    { ISD::SETCC,   MVT::v4f32,   1 },
674    { ISD::SETCC,   MVT::v2i64,   1 },
675    { ISD::SETCC,   MVT::v4i32,   1 },
676    { ISD::SETCC,   MVT::v8i16,   1 },
677    { ISD::SETCC,   MVT::v16i8,   1 },
678  };
679
680  static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
681    { ISD::SETCC,   MVT::v4f64,   1 },
682    { ISD::SETCC,   MVT::v8f32,   1 },
683    // AVX1 does not support 8-wide integer compare.
684    { ISD::SETCC,   MVT::v4i64,   4 },
685    { ISD::SETCC,   MVT::v8i32,   4 },
686    { ISD::SETCC,   MVT::v16i16,  4 },
687    { ISD::SETCC,   MVT::v32i8,   4 },
688  };
689
690  static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
691    { ISD::SETCC,   MVT::v4i64,   1 },
692    { ISD::SETCC,   MVT::v8i32,   1 },
693    { ISD::SETCC,   MVT::v16i16,  1 },
694    { ISD::SETCC,   MVT::v32i8,   1 },
695  };
696
697  static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = {
698    { ISD::SETCC,   MVT::v8i64,   1 },
699    { ISD::SETCC,   MVT::v16i32,  1 },
700    { ISD::SETCC,   MVT::v8f64,   1 },
701    { ISD::SETCC,   MVT::v16f32,  1 },
702  };
703
704  if (ST->hasAVX512()) {
705    int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy);
706    if (Idx != -1)
707      return LT.first * AVX512CostTbl[Idx].Cost;
708  }
709
710  if (ST->hasAVX2()) {
711    int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
712    if (Idx != -1)
713      return LT.first * AVX2CostTbl[Idx].Cost;
714  }
715
716  if (ST->hasAVX()) {
717    int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
718    if (Idx != -1)
719      return LT.first * AVX1CostTbl[Idx].Cost;
720  }
721
722  if (ST->hasSSE42()) {
723    int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
724    if (Idx != -1)
725      return LT.first * SSE42CostTbl[Idx].Cost;
726  }
727
728  return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
729}
730
731unsigned X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
732                                        unsigned Index) {
733  assert(Val->isVectorTy() && "This must be a vector type");
734
735  if (Index != -1U) {
736    // Legalize the type.
737    std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
738
739    // This type is legalized to a scalar type.
740    if (!LT.second.isVector())
741      return 0;
742
743    // The type may be split. Normalize the index to the new type.
744    unsigned Width = LT.second.getVectorNumElements();
745    Index = Index % Width;
746
747    // Floating point scalars are already located in index #0.
748    if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
749      return 0;
750  }
751
752  return BaseT::getVectorInstrCost(Opcode, Val, Index);
753}
754
755unsigned X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
756                                              bool Extract) {
757  assert (Ty->isVectorTy() && "Can only scalarize vectors");
758  unsigned Cost = 0;
759
760  for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
761    if (Insert)
762      Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
763    if (Extract)
764      Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
765  }
766
767  return Cost;
768}
769
770unsigned X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
771                                     unsigned Alignment,
772                                     unsigned AddressSpace) {
773  // Handle non-power-of-two vectors such as <3 x float>
774  if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
775    unsigned NumElem = VTy->getVectorNumElements();
776
777    // Handle a few common cases:
778    // <3 x float>
779    if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
780      // Cost = 64 bit store + extract + 32 bit store.
781      return 3;
782
783    // <3 x double>
784    if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
785      // Cost = 128 bit store + unpack + 64 bit store.
786      return 3;
787
788    // Assume that all other non-power-of-two numbers are scalarized.
789    if (!isPowerOf2_32(NumElem)) {
790      unsigned Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(),
791                                             Alignment, AddressSpace);
792      unsigned SplitCost = getScalarizationOverhead(Src,
793                                                    Opcode == Instruction::Load,
794                                                    Opcode==Instruction::Store);
795      return NumElem * Cost + SplitCost;
796    }
797  }
798
799  // Legalize the type.
800  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
801  assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
802         "Invalid Opcode");
803
804  // Each load/store unit costs 1.
805  unsigned Cost = LT.first * 1;
806
807  // On Sandybridge 256bit load/stores are double pumped
808  // (but not on Haswell).
809  if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
810    Cost*=2;
811
812  return Cost;
813}
814
815unsigned X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
816                                           unsigned Alignment,
817                                           unsigned AddressSpace) {
818  VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
819  if (!SrcVTy)
820    // To calculate scalar take the regular cost, without mask
821    return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
822
823  unsigned NumElem = SrcVTy->getVectorNumElements();
824  VectorType *MaskTy =
825    VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
826  if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy, 1)) ||
827      (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) ||
828      !isPowerOf2_32(NumElem)) {
829    // Scalarization
830    unsigned MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
831    unsigned ScalarCompareCost =
832      getCmpSelInstrCost(Instruction::ICmp,
833                         Type::getInt8Ty(getGlobalContext()), NULL);
834    unsigned BranchCost = getCFInstrCost(Instruction::Br);
835    unsigned MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
836
837    unsigned ValueSplitCost =
838      getScalarizationOverhead(SrcVTy, Opcode == Instruction::Load,
839                               Opcode == Instruction::Store);
840    unsigned MemopCost =
841        NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
842                                         Alignment, AddressSpace);
843    return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
844  }
845
846  // Legalize the type.
847  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(SrcVTy);
848  unsigned Cost = 0;
849  if (LT.second != TLI->getValueType(SrcVTy).getSimpleVT() &&
850      LT.second.getVectorNumElements() == NumElem)
851    // Promotion requires expand/truncate for data and a shuffle for mask.
852    Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, 0) +
853            getShuffleCost(TTI::SK_Alternate, MaskTy, 0, 0);
854
855  else if (LT.second.getVectorNumElements() > NumElem) {
856    VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
857                                            LT.second.getVectorNumElements());
858    // Expanding requires fill mask with zeroes
859    Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
860  }
861  if (!ST->hasAVX512())
862    return Cost + LT.first*4; // Each maskmov costs 4
863
864  // AVX-512 masked load/store is cheapper
865  return Cost+LT.first;
866}
867
868unsigned X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
869  // Address computations in vectorized code with non-consecutive addresses will
870  // likely result in more instructions compared to scalar code where the
871  // computation can more often be merged into the index mode. The resulting
872  // extra micro-ops can significantly decrease throughput.
873  unsigned NumVectorInstToHideOverhead = 10;
874
875  if (Ty->isVectorTy() && IsComplex)
876    return NumVectorInstToHideOverhead;
877
878  return BaseT::getAddressComputationCost(Ty, IsComplex);
879}
880
881unsigned X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
882                                      bool IsPairwise) {
883
884  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
885
886  MVT MTy = LT.second;
887
888  int ISD = TLI->InstructionOpcodeToISD(Opcode);
889  assert(ISD && "Invalid opcode");
890
891  // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
892  // and make it as the cost.
893
894  static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
895    { ISD::FADD,  MVT::v2f64,   2 },
896    { ISD::FADD,  MVT::v4f32,   4 },
897    { ISD::ADD,   MVT::v2i64,   2 },      // The data reported by the IACA tool is "1.6".
898    { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.5".
899    { ISD::ADD,   MVT::v8i16,   5 },
900  };
901
902  static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
903    { ISD::FADD,  MVT::v4f32,   4 },
904    { ISD::FADD,  MVT::v4f64,   5 },
905    { ISD::FADD,  MVT::v8f32,   7 },
906    { ISD::ADD,   MVT::v2i64,   1 },      // The data reported by the IACA tool is "1.5".
907    { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.5".
908    { ISD::ADD,   MVT::v4i64,   5 },      // The data reported by the IACA tool is "4.8".
909    { ISD::ADD,   MVT::v8i16,   5 },
910    { ISD::ADD,   MVT::v8i32,   5 },
911  };
912
913  static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
914    { ISD::FADD,  MVT::v2f64,   2 },
915    { ISD::FADD,  MVT::v4f32,   4 },
916    { ISD::ADD,   MVT::v2i64,   2 },      // The data reported by the IACA tool is "1.6".
917    { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.3".
918    { ISD::ADD,   MVT::v8i16,   4 },      // The data reported by the IACA tool is "4.3".
919  };
920
921  static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
922    { ISD::FADD,  MVT::v4f32,   3 },
923    { ISD::FADD,  MVT::v4f64,   3 },
924    { ISD::FADD,  MVT::v8f32,   4 },
925    { ISD::ADD,   MVT::v2i64,   1 },      // The data reported by the IACA tool is "1.5".
926    { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "2.8".
927    { ISD::ADD,   MVT::v4i64,   3 },
928    { ISD::ADD,   MVT::v8i16,   4 },
929    { ISD::ADD,   MVT::v8i32,   5 },
930  };
931
932  if (IsPairwise) {
933    if (ST->hasAVX()) {
934      int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
935      if (Idx != -1)
936        return LT.first * AVX1CostTblPairWise[Idx].Cost;
937    }
938
939    if (ST->hasSSE42()) {
940      int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
941      if (Idx != -1)
942        return LT.first * SSE42CostTblPairWise[Idx].Cost;
943    }
944  } else {
945    if (ST->hasAVX()) {
946      int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
947      if (Idx != -1)
948        return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
949    }
950
951    if (ST->hasSSE42()) {
952      int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
953      if (Idx != -1)
954        return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
955    }
956  }
957
958  return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
959}
960
961/// \brief Calculate the cost of materializing a 64-bit value. This helper
962/// method might only calculate a fraction of a larger immediate. Therefore it
963/// is valid to return a cost of ZERO.
964unsigned X86TTIImpl::getIntImmCost(int64_t Val) {
965  if (Val == 0)
966    return TTI::TCC_Free;
967
968  if (isInt<32>(Val))
969    return TTI::TCC_Basic;
970
971  return 2 * TTI::TCC_Basic;
972}
973
974unsigned X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
975  assert(Ty->isIntegerTy());
976
977  unsigned BitSize = Ty->getPrimitiveSizeInBits();
978  if (BitSize == 0)
979    return ~0U;
980
981  // Never hoist constants larger than 128bit, because this might lead to
982  // incorrect code generation or assertions in codegen.
983  // Fixme: Create a cost model for types larger than i128 once the codegen
984  // issues have been fixed.
985  if (BitSize > 128)
986    return TTI::TCC_Free;
987
988  if (Imm == 0)
989    return TTI::TCC_Free;
990
991  // Sign-extend all constants to a multiple of 64-bit.
992  APInt ImmVal = Imm;
993  if (BitSize & 0x3f)
994    ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
995
996  // Split the constant into 64-bit chunks and calculate the cost for each
997  // chunk.
998  unsigned Cost = 0;
999  for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1000    APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1001    int64_t Val = Tmp.getSExtValue();
1002    Cost += getIntImmCost(Val);
1003  }
1004  // We need at least one instruction to materialze the constant.
1005  return std::max(1U, Cost);
1006}
1007
1008unsigned X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
1009                                   const APInt &Imm, Type *Ty) {
1010  assert(Ty->isIntegerTy());
1011
1012  unsigned BitSize = Ty->getPrimitiveSizeInBits();
1013  // There is no cost model for constants with a bit size of 0. Return TCC_Free
1014  // here, so that constant hoisting will ignore this constant.
1015  if (BitSize == 0)
1016    return TTI::TCC_Free;
1017
1018  unsigned ImmIdx = ~0U;
1019  switch (Opcode) {
1020  default:
1021    return TTI::TCC_Free;
1022  case Instruction::GetElementPtr:
1023    // Always hoist the base address of a GetElementPtr. This prevents the
1024    // creation of new constants for every base constant that gets constant
1025    // folded with the offset.
1026    if (Idx == 0)
1027      return 2 * TTI::TCC_Basic;
1028    return TTI::TCC_Free;
1029  case Instruction::Store:
1030    ImmIdx = 0;
1031    break;
1032  case Instruction::Add:
1033  case Instruction::Sub:
1034  case Instruction::Mul:
1035  case Instruction::UDiv:
1036  case Instruction::SDiv:
1037  case Instruction::URem:
1038  case Instruction::SRem:
1039  case Instruction::And:
1040  case Instruction::Or:
1041  case Instruction::Xor:
1042  case Instruction::ICmp:
1043    ImmIdx = 1;
1044    break;
1045  // Always return TCC_Free for the shift value of a shift instruction.
1046  case Instruction::Shl:
1047  case Instruction::LShr:
1048  case Instruction::AShr:
1049    if (Idx == 1)
1050      return TTI::TCC_Free;
1051    break;
1052  case Instruction::Trunc:
1053  case Instruction::ZExt:
1054  case Instruction::SExt:
1055  case Instruction::IntToPtr:
1056  case Instruction::PtrToInt:
1057  case Instruction::BitCast:
1058  case Instruction::PHI:
1059  case Instruction::Call:
1060  case Instruction::Select:
1061  case Instruction::Ret:
1062  case Instruction::Load:
1063    break;
1064  }
1065
1066  if (Idx == ImmIdx) {
1067    unsigned NumConstants = (BitSize + 63) / 64;
1068    unsigned Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1069    return (Cost <= NumConstants * TTI::TCC_Basic)
1070               ? static_cast<unsigned>(TTI::TCC_Free)
1071               : Cost;
1072  }
1073
1074  return X86TTIImpl::getIntImmCost(Imm, Ty);
1075}
1076
1077unsigned X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
1078                                   const APInt &Imm, Type *Ty) {
1079  assert(Ty->isIntegerTy());
1080
1081  unsigned BitSize = Ty->getPrimitiveSizeInBits();
1082  // There is no cost model for constants with a bit size of 0. Return TCC_Free
1083  // here, so that constant hoisting will ignore this constant.
1084  if (BitSize == 0)
1085    return TTI::TCC_Free;
1086
1087  switch (IID) {
1088  default:
1089    return TTI::TCC_Free;
1090  case Intrinsic::sadd_with_overflow:
1091  case Intrinsic::uadd_with_overflow:
1092  case Intrinsic::ssub_with_overflow:
1093  case Intrinsic::usub_with_overflow:
1094  case Intrinsic::smul_with_overflow:
1095  case Intrinsic::umul_with_overflow:
1096    if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1097      return TTI::TCC_Free;
1098    break;
1099  case Intrinsic::experimental_stackmap:
1100    if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1101      return TTI::TCC_Free;
1102    break;
1103  case Intrinsic::experimental_patchpoint_void:
1104  case Intrinsic::experimental_patchpoint_i64:
1105    if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1106      return TTI::TCC_Free;
1107    break;
1108  }
1109  return X86TTIImpl::getIntImmCost(Imm, Ty);
1110}
1111
1112bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
1113  int DataWidth = DataTy->getPrimitiveSizeInBits();
1114
1115  // Todo: AVX512 allows gather/scatter, works with strided and random as well
1116  if ((DataWidth < 32) || (Consecutive == 0))
1117    return false;
1118  if (ST->hasAVX512() || ST->hasAVX2())
1119    return true;
1120  return false;
1121}
1122
1123bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) {
1124  return isLegalMaskedLoad(DataType, Consecutive);
1125}
1126
1127