1//===- llvm/Analysis/VectorUtils.h - Vector utilities -----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines some vectorizer utilities.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ANALYSIS_VECTORUTILS_H
15#define LLVM_ANALYSIS_VECTORUTILS_H
16
17#include "llvm/ADT/MapVector.h"
18#include "llvm/Analysis/TargetLibraryInfo.h"
19#include "llvm/IR/IRBuilder.h"
20
21namespace llvm {
22
23template <typename T> class ArrayRef;
24class DemandedBits;
25class GetElementPtrInst;
26class Loop;
27class ScalarEvolution;
28class TargetTransformInfo;
29class Type;
30class Value;
31
32namespace Intrinsic {
33enum ID : unsigned;
34}
35
36/// \brief Identify if the intrinsic is trivially vectorizable.
37/// This method returns true if the intrinsic's argument types are all
38/// scalars for the scalar form of the intrinsic and all vectors for
39/// the vector form of the intrinsic.
40bool isTriviallyVectorizable(Intrinsic::ID ID);
41
42/// \brief Identifies if the intrinsic has a scalar operand. It checks for
43/// ctlz,cttz and powi special intrinsics whose argument is scalar.
44bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx);
45
46/// \brief Returns intrinsic ID for call.
47/// For the input call instruction it finds mapping intrinsic and returns
48/// its intrinsic ID, in case it does not found it return not_intrinsic.
49Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI,
50                                          const TargetLibraryInfo *TLI);
51
52/// \brief Find the operand of the GEP that should be checked for consecutive
53/// stores. This ignores trailing indices that have no effect on the final
54/// pointer.
55unsigned getGEPInductionOperand(const GetElementPtrInst *Gep);
56
57/// \brief If the argument is a GEP, then returns the operand identified by
58/// getGEPInductionOperand. However, if there is some other non-loop-invariant
59/// operand, it returns that instead.
60Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
61
62/// \brief If a value has only one user that is a CastInst, return it.
63Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty);
64
65/// \brief Get the stride of a pointer access in a loop. Looks for symbolic
66/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
67Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
68
69/// \brief Given a vector and an element number, see if the scalar value is
70/// already around as a register, for example if it were inserted then extracted
71/// from the vector.
72Value *findScalarElement(Value *V, unsigned EltNo);
73
74/// \brief Get splat value if the input is a splat vector or return nullptr.
75/// The value may be extracted from a splat constants vector or from
76/// a sequence of instructions that broadcast a single value into a vector.
77const Value *getSplatValue(const Value *V);
78
79/// \brief Compute a map of integer instructions to their minimum legal type
80/// size.
81///
82/// C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
83/// type (e.g. i32) whenever arithmetic is performed on them.
84///
85/// For targets with native i8 or i16 operations, usually InstCombine can shrink
86/// the arithmetic type down again. However InstCombine refuses to create
87/// illegal types, so for targets without i8 or i16 registers, the lengthening
88/// and shrinking remains.
89///
90/// Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
91/// their scalar equivalents do not, so during vectorization it is important to
92/// remove these lengthens and truncates when deciding the profitability of
93/// vectorization.
94///
95/// This function analyzes the given range of instructions and determines the
96/// minimum type size each can be converted to. It attempts to remove or
97/// minimize type size changes across each def-use chain, so for example in the
98/// following code:
99///
100///   %1 = load i8, i8*
101///   %2 = add i8 %1, 2
102///   %3 = load i16, i16*
103///   %4 = zext i8 %2 to i32
104///   %5 = zext i16 %3 to i32
105///   %6 = add i32 %4, %5
106///   %7 = trunc i32 %6 to i16
107///
108/// Instruction %6 must be done at least in i16, so computeMinimumValueSizes
109/// will return: {%1: 16, %2: 16, %3: 16, %4: 16, %5: 16, %6: 16, %7: 16}.
110///
111/// If the optional TargetTransformInfo is provided, this function tries harder
112/// to do less work by only looking at illegal types.
113MapVector<Instruction*, uint64_t>
114computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
115                         DemandedBits &DB,
116                         const TargetTransformInfo *TTI=nullptr);
117
118/// Specifically, let Kinds = [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath,
119/// MD_nontemporal].  For K in Kinds, we get the MDNode for K from each of the
120/// elements of VL, compute their "intersection" (i.e., the most generic
121/// metadata value that covers all of the individual values), and set I's
122/// metadata for M equal to the intersection value.
123///
124/// This function always sets a (possibly null) value for each K in Kinds.
125Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);
126
127/// \brief Create an interleave shuffle mask.
128///
129/// This function creates a shuffle mask for interleaving \p NumVecs vectors of
130/// vectorization factor \p VF into a single wide vector. The mask is of the
131/// form:
132///
133///   <0, VF, VF * 2, ..., VF * (NumVecs - 1), 1, VF + 1, VF * 2 + 1, ...>
134///
135/// For example, the mask for VF = 4 and NumVecs = 2 is:
136///
137///   <0, 4, 1, 5, 2, 6, 3, 7>.
138Constant *createInterleaveMask(IRBuilder<> &Builder, unsigned VF,
139                               unsigned NumVecs);
140
141/// \brief Create a stride shuffle mask.
142///
143/// This function creates a shuffle mask whose elements begin at \p Start and
144/// are incremented by \p Stride. The mask can be used to deinterleave an
145/// interleaved vector into separate vectors of vectorization factor \p VF. The
146/// mask is of the form:
147///
148///   <Start, Start + Stride, ..., Start + Stride * (VF - 1)>
149///
150/// For example, the mask for Start = 0, Stride = 2, and VF = 4 is:
151///
152///   <0, 2, 4, 6>
153Constant *createStrideMask(IRBuilder<> &Builder, unsigned Start,
154                           unsigned Stride, unsigned VF);
155
156/// \brief Create a sequential shuffle mask.
157///
158/// This function creates shuffle mask whose elements are sequential and begin
159/// at \p Start.  The mask contains \p NumInts integers and is padded with \p
160/// NumUndefs undef values. The mask is of the form:
161///
162///   <Start, Start + 1, ... Start + NumInts - 1, undef_1, ... undef_NumUndefs>
163///
164/// For example, the mask for Start = 0, NumInsts = 4, and NumUndefs = 4 is:
165///
166///   <0, 1, 2, 3, undef, undef, undef, undef>
167Constant *createSequentialMask(IRBuilder<> &Builder, unsigned Start,
168                               unsigned NumInts, unsigned NumUndefs);
169
170/// \brief Concatenate a list of vectors.
171///
172/// This function generates code that concatenate the vectors in \p Vecs into a
173/// single large vector. The number of vectors should be greater than one, and
174/// their element types should be the same. The number of elements in the
175/// vectors should also be the same; however, if the last vector has fewer
176/// elements, it will be padded with undefs.
177Value *concatenateVectors(IRBuilder<> &Builder, ArrayRef<Value *> Vecs);
178
179} // llvm namespace
180
181#endif
182