LoopVectorize.cpp revision 5d79bb8770a5a655af0dccc87b952c3ea9bad45e
1//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
11// and generates target-independent LLVM-IR. Legalization of the IR is done
12// in the codegen. However, the vectorizer uses (will use) the codegen
13// interfaces to generate IR that is likely to result in an optimal binary.
14//
15// The loop vectorizer combines consecutive loop iterations into a single
16// 'wide' iteration. After this transformation the index is incremented
17// by the SIMD vector width, and not by one.
18//
19// This pass has three parts:
20// 1. The main loop pass that drives the different parts.
21// 2. LoopVectorizationLegality - A unit that checks for the legality
22//    of the vectorization.
23// 3. InnerLoopVectorizer - A unit that performs the actual
24//    widening of instructions.
25// 4. LoopVectorizationCostModel - A unit that checks for the profitability
26//    of vectorization. It decides on the optimal vector width, which
27//    can be one, if vectorization is not profitable.
28//
29//===----------------------------------------------------------------------===//
30//
31// The reduction-variable vectorization is based on the paper:
32//  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
33//
34// Variable uniformity checks are inspired by:
35//  Karrenberg, R. and Hack, S. Whole Function Vectorization.
36//
37// Other ideas/concepts are from:
38//  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
39//
40//  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
41//  Vectorizing Compilers.
42//
43//===----------------------------------------------------------------------===//
44
45#define LV_NAME "loop-vectorize"
46#define DEBUG_TYPE LV_NAME
47
48#include "llvm/Transforms/Vectorize.h"
49#include "llvm/ADT/DenseMap.h"
50#include "llvm/ADT/MapVector.h"
51#include "llvm/ADT/SmallPtrSet.h"
52#include "llvm/ADT/SmallSet.h"
53#include "llvm/ADT/SmallVector.h"
54#include "llvm/ADT/StringExtras.h"
55#include "llvm/Analysis/AliasAnalysis.h"
56#include "llvm/Analysis/AliasSetTracker.h"
57#include "llvm/Analysis/Dominators.h"
58#include "llvm/Analysis/LoopInfo.h"
59#include "llvm/Analysis/LoopIterator.h"
60#include "llvm/Analysis/LoopPass.h"
61#include "llvm/Analysis/ScalarEvolution.h"
62#include "llvm/Analysis/ScalarEvolutionExpander.h"
63#include "llvm/Analysis/ScalarEvolutionExpressions.h"
64#include "llvm/Analysis/TargetTransformInfo.h"
65#include "llvm/Analysis/ValueTracking.h"
66#include "llvm/Analysis/Verifier.h"
67#include "llvm/IR/Constants.h"
68#include "llvm/IR/DataLayout.h"
69#include "llvm/IR/DerivedTypes.h"
70#include "llvm/IR/Function.h"
71#include "llvm/IR/IRBuilder.h"
72#include "llvm/IR/Instructions.h"
73#include "llvm/IR/IntrinsicInst.h"
74#include "llvm/IR/LLVMContext.h"
75#include "llvm/IR/Module.h"
76#include "llvm/IR/Type.h"
77#include "llvm/IR/Value.h"
78#include "llvm/Pass.h"
79#include "llvm/Support/CommandLine.h"
80#include "llvm/Support/Debug.h"
81#include "llvm/Support/raw_ostream.h"
82#include "llvm/Target/TargetLibraryInfo.h"
83#include "llvm/Transforms/Scalar.h"
84#include "llvm/Transforms/Utils/BasicBlockUtils.h"
85#include "llvm/Transforms/Utils/Local.h"
86#include <algorithm>
87#include <map>
88
89using namespace llvm;
90
91static cl::opt<unsigned>
92VectorizationFactor("force-vector-width", cl::init(0), cl::Hidden,
93                    cl::desc("Sets the SIMD width. Zero is autoselect."));
94
95static cl::opt<unsigned>
96VectorizationUnroll("force-vector-unroll", cl::init(0), cl::Hidden,
97                    cl::desc("Sets the vectorization unroll count. "
98                             "Zero is autoselect."));
99
100static cl::opt<bool>
101EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
102                   cl::desc("Enable if-conversion during vectorization."));
103
104/// We don't vectorize loops with a known constant trip count below this number.
105static cl::opt<unsigned>
106TinyTripCountVectorThreshold("vectorizer-min-trip-count", cl::init(16),
107                             cl::Hidden,
108                             cl::desc("Don't vectorize loops with a constant "
109                                      "trip count that is smaller than this "
110                                      "value."));
111
112/// We don't unroll loops with a known constant trip count below this number.
113static const unsigned TinyTripCountUnrollThreshold = 128;
114
115/// When performing a runtime memory check, do not check more than this
116/// number of pointers. Notice that the check is quadratic!
117static const unsigned RuntimeMemoryCheckThreshold = 4;
118
119namespace {
120
121// Forward declarations.
122class LoopVectorizationLegality;
123class LoopVectorizationCostModel;
124
125/// InnerLoopVectorizer vectorizes loops which contain only one basic
126/// block to a specified vectorization factor (VF).
127/// This class performs the widening of scalars into vectors, or multiple
128/// scalars. This class also implements the following features:
129/// * It inserts an epilogue loop for handling loops that don't have iteration
130///   counts that are known to be a multiple of the vectorization factor.
131/// * It handles the code generation for reduction variables.
132/// * Scalarization (implementation using scalars) of un-vectorizable
133///   instructions.
134/// InnerLoopVectorizer does not perform any vectorization-legality
135/// checks, and relies on the caller to check for the different legality
136/// aspects. The InnerLoopVectorizer relies on the
137/// LoopVectorizationLegality class to provide information about the induction
138/// and reduction variables that were found to a given vectorization factor.
139class InnerLoopVectorizer {
140public:
141  InnerLoopVectorizer(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI,
142                      DominatorTree *DT, DataLayout *DL,
143                      const TargetLibraryInfo *TLI, unsigned VecWidth,
144                      unsigned UnrollFactor)
145      : OrigLoop(OrigLoop), SE(SE), LI(LI), DT(DT), DL(DL), TLI(TLI),
146        VF(VecWidth), UF(UnrollFactor), Builder(SE->getContext()), Induction(0),
147        OldInduction(0), WidenMap(UnrollFactor) {}
148
149  // Perform the actual loop widening (vectorization).
150  void vectorize(LoopVectorizationLegality *Legal) {
151    // Create a new empty loop. Unlink the old loop and connect the new one.
152    createEmptyLoop(Legal);
153    // Widen each instruction in the old loop to a new one in the new loop.
154    // Use the Legality module to find the induction and reduction variables.
155    vectorizeLoop(Legal);
156    // Register the new loop and update the analysis passes.
157    updateAnalysis();
158  }
159
160private:
161  /// A small list of PHINodes.
162  typedef SmallVector<PHINode*, 4> PhiVector;
163  /// When we unroll loops we have multiple vector values for each scalar.
164  /// This data structure holds the unrolled and vectorized values that
165  /// originated from one scalar instruction.
166  typedef SmallVector<Value*, 2> VectorParts;
167
168  /// Add code that checks at runtime if the accessed arrays overlap.
169  /// Returns the comparator value or NULL if no check is needed.
170  Instruction *addRuntimeCheck(LoopVectorizationLegality *Legal,
171                               Instruction *Loc);
172  /// Create an empty loop, based on the loop ranges of the old loop.
173  void createEmptyLoop(LoopVectorizationLegality *Legal);
174  /// Copy and widen the instructions from the old loop.
175  void vectorizeLoop(LoopVectorizationLegality *Legal);
176
177  /// A helper function that computes the predicate of the block BB, assuming
178  /// that the header block of the loop is set to True. It returns the *entry*
179  /// mask for the block BB.
180  VectorParts createBlockInMask(BasicBlock *BB);
181  /// A helper function that computes the predicate of the edge between SRC
182  /// and DST.
183  VectorParts createEdgeMask(BasicBlock *Src, BasicBlock *Dst);
184
185  /// A helper function to vectorize a single BB within the innermost loop.
186  void vectorizeBlockInLoop(LoopVectorizationLegality *Legal, BasicBlock *BB,
187                            PhiVector *PV);
188
189  /// Insert the new loop to the loop hierarchy and pass manager
190  /// and update the analysis passes.
191  void updateAnalysis();
192
193  /// This instruction is un-vectorizable. Implement it as a sequence
194  /// of scalars.
195  void scalarizeInstruction(Instruction *Instr);
196
197  /// Vectorize Load and Store instructions,
198  void vectorizeMemoryInstruction(Instruction *Instr,
199                                  LoopVectorizationLegality *Legal);
200
201  /// Create a broadcast instruction. This method generates a broadcast
202  /// instruction (shuffle) for loop invariant values and for the induction
203  /// value. If this is the induction variable then we extend it to N, N+1, ...
204  /// this is needed because each iteration in the loop corresponds to a SIMD
205  /// element.
206  Value *getBroadcastInstrs(Value *V);
207
208  /// This function adds 0, 1, 2 ... to each vector element, starting at zero.
209  /// If Negate is set then negative numbers are added e.g. (0, -1, -2, ...).
210  /// The sequence starts at StartIndex.
211  Value *getConsecutiveVector(Value* Val, unsigned StartIdx, bool Negate);
212
213  /// When we go over instructions in the basic block we rely on previous
214  /// values within the current basic block or on loop invariant values.
215  /// When we widen (vectorize) values we place them in the map. If the values
216  /// are not within the map, they have to be loop invariant, so we simply
217  /// broadcast them into a vector.
218  VectorParts &getVectorValue(Value *V);
219
220  /// Generate a shuffle sequence that will reverse the vector Vec.
221  Value *reverseVector(Value *Vec);
222
223  /// This is a helper class that holds the vectorizer state. It maps scalar
224  /// instructions to vector instructions. When the code is 'unrolled' then
225  /// then a single scalar value is mapped to multiple vector parts. The parts
226  /// are stored in the VectorPart type.
227  struct ValueMap {
228    /// C'tor.  UnrollFactor controls the number of vectors ('parts') that
229    /// are mapped.
230    ValueMap(unsigned UnrollFactor) : UF(UnrollFactor) {}
231
232    /// \return True if 'Key' is saved in the Value Map.
233    bool has(Value *Key) const { return MapStorage.count(Key); }
234
235    /// Initializes a new entry in the map. Sets all of the vector parts to the
236    /// save value in 'Val'.
237    /// \return A reference to a vector with splat values.
238    VectorParts &splat(Value *Key, Value *Val) {
239      VectorParts &Entry = MapStorage[Key];
240      Entry.assign(UF, Val);
241      return Entry;
242    }
243
244    ///\return A reference to the value that is stored at 'Key'.
245    VectorParts &get(Value *Key) {
246      VectorParts &Entry = MapStorage[Key];
247      if (Entry.empty())
248        Entry.resize(UF);
249      assert(Entry.size() == UF);
250      return Entry;
251    }
252
253  private:
254    /// The unroll factor. Each entry in the map stores this number of vector
255    /// elements.
256    unsigned UF;
257
258    /// Map storage. We use std::map and not DenseMap because insertions to a
259    /// dense map invalidates its iterators.
260    std::map<Value *, VectorParts> MapStorage;
261  };
262
263  /// The original loop.
264  Loop *OrigLoop;
265  /// Scev analysis to use.
266  ScalarEvolution *SE;
267  /// Loop Info.
268  LoopInfo *LI;
269  /// Dominator Tree.
270  DominatorTree *DT;
271  /// Data Layout.
272  DataLayout *DL;
273  /// Target Library Info.
274  const TargetLibraryInfo *TLI;
275
276  /// The vectorization SIMD factor to use. Each vector will have this many
277  /// vector elements.
278  unsigned VF;
279  /// The vectorization unroll factor to use. Each scalar is vectorized to this
280  /// many different vector instructions.
281  unsigned UF;
282
283  /// The builder that we use
284  IRBuilder<> Builder;
285
286  // --- Vectorization state ---
287
288  /// The vector-loop preheader.
289  BasicBlock *LoopVectorPreHeader;
290  /// The scalar-loop preheader.
291  BasicBlock *LoopScalarPreHeader;
292  /// Middle Block between the vector and the scalar.
293  BasicBlock *LoopMiddleBlock;
294  ///The ExitBlock of the scalar loop.
295  BasicBlock *LoopExitBlock;
296  ///The vector loop body.
297  BasicBlock *LoopVectorBody;
298  ///The scalar loop body.
299  BasicBlock *LoopScalarBody;
300  /// A list of all bypass blocks. The first block is the entry of the loop.
301  SmallVector<BasicBlock *, 4> LoopBypassBlocks;
302
303  /// The new Induction variable which was added to the new block.
304  PHINode *Induction;
305  /// The induction variable of the old basic block.
306  PHINode *OldInduction;
307  /// Maps scalars to widened vectors.
308  ValueMap WidenMap;
309};
310
311/// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
312/// to what vectorization factor.
313/// This class does not look at the profitability of vectorization, only the
314/// legality. This class has two main kinds of checks:
315/// * Memory checks - The code in canVectorizeMemory checks if vectorization
316///   will change the order of memory accesses in a way that will change the
317///   correctness of the program.
318/// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
319/// checks for a number of different conditions, such as the availability of a
320/// single induction variable, that all types are supported and vectorize-able,
321/// etc. This code reflects the capabilities of InnerLoopVectorizer.
322/// This class is also used by InnerLoopVectorizer for identifying
323/// induction variable and the different reduction variables.
324class LoopVectorizationLegality {
325public:
326  LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, DataLayout *DL,
327                            DominatorTree *DT, TargetTransformInfo* TTI,
328                            AliasAnalysis *AA, TargetLibraryInfo *TLI)
329      : TheLoop(L), SE(SE), DL(DL), DT(DT), TTI(TTI), AA(AA), TLI(TLI),
330        Induction(0) {}
331
332  /// This enum represents the kinds of reductions that we support.
333  enum ReductionKind {
334    RK_NoReduction, ///< Not a reduction.
335    RK_IntegerAdd,  ///< Sum of integers.
336    RK_IntegerMult, ///< Product of integers.
337    RK_IntegerOr,   ///< Bitwise or logical OR of numbers.
338    RK_IntegerAnd,  ///< Bitwise or logical AND of numbers.
339    RK_IntegerXor,  ///< Bitwise or logical XOR of numbers.
340    RK_FloatAdd,    ///< Sum of floats.
341    RK_FloatMult    ///< Product of floats.
342  };
343
344  /// This enum represents the kinds of inductions that we support.
345  enum InductionKind {
346    IK_NoInduction,         ///< Not an induction variable.
347    IK_IntInduction,        ///< Integer induction variable. Step = 1.
348    IK_ReverseIntInduction, ///< Reverse int induction variable. Step = -1.
349    IK_PtrInduction,        ///< Pointer induction var. Step = sizeof(elem).
350    IK_ReversePtrInduction  ///< Reverse ptr indvar. Step = - sizeof(elem).
351  };
352
353  /// This POD struct holds information about reduction variables.
354  struct ReductionDescriptor {
355    ReductionDescriptor() : StartValue(0), LoopExitInstr(0),
356      Kind(RK_NoReduction) {}
357
358    ReductionDescriptor(Value *Start, Instruction *Exit, ReductionKind K)
359        : StartValue(Start), LoopExitInstr(Exit), Kind(K) {}
360
361    // The starting value of the reduction.
362    // It does not have to be zero!
363    Value *StartValue;
364    // The instruction who's value is used outside the loop.
365    Instruction *LoopExitInstr;
366    // The kind of the reduction.
367    ReductionKind Kind;
368  };
369
370  // This POD struct holds information about the memory runtime legality
371  // check that a group of pointers do not overlap.
372  struct RuntimePointerCheck {
373    RuntimePointerCheck() : Need(false) {}
374
375    /// Reset the state of the pointer runtime information.
376    void reset() {
377      Need = false;
378      Pointers.clear();
379      Starts.clear();
380      Ends.clear();
381    }
382
383    /// Insert a pointer and calculate the start and end SCEVs.
384    void insert(ScalarEvolution *SE, Loop *Lp, Value *Ptr);
385
386    /// This flag indicates if we need to add the runtime check.
387    bool Need;
388    /// Holds the pointers that we need to check.
389    SmallVector<Value*, 2> Pointers;
390    /// Holds the pointer value at the beginning of the loop.
391    SmallVector<const SCEV*, 2> Starts;
392    /// Holds the pointer value at the end of the loop.
393    SmallVector<const SCEV*, 2> Ends;
394  };
395
396  /// A POD for saving information about induction variables.
397  struct InductionInfo {
398    InductionInfo(Value *Start, InductionKind K) : StartValue(Start), IK(K) {}
399    InductionInfo() : StartValue(0), IK(IK_NoInduction) {}
400    /// Start value.
401    Value *StartValue;
402    /// Induction kind.
403    InductionKind IK;
404  };
405
406  /// ReductionList contains the reduction descriptors for all
407  /// of the reductions that were found in the loop.
408  typedef DenseMap<PHINode*, ReductionDescriptor> ReductionList;
409
410  /// InductionList saves induction variables and maps them to the
411  /// induction descriptor.
412  typedef MapVector<PHINode*, InductionInfo> InductionList;
413
414  /// Alias(Multi)Map stores the values (GEPs or underlying objects and their
415  /// respective Store/Load instruction(s) to calculate aliasing.
416  typedef DenseMap<Value*, Instruction* > AliasMap;
417  typedef DenseMap<Value*, std::vector<Instruction*> > AliasMultiMap;
418
419  /// Returns true if it is legal to vectorize this loop.
420  /// This does not mean that it is profitable to vectorize this
421  /// loop, only that it is legal to do so.
422  bool canVectorize();
423
424  /// Returns the Induction variable.
425  PHINode *getInduction() { return Induction; }
426
427  /// Returns the reduction variables found in the loop.
428  ReductionList *getReductionVars() { return &Reductions; }
429
430  /// Returns the induction variables found in the loop.
431  InductionList *getInductionVars() { return &Inductions; }
432
433  /// Returns True if V is an induction variable in this loop.
434  bool isInductionVariable(const Value *V);
435
436  /// Return true if the block BB needs to be predicated in order for the loop
437  /// to be vectorized.
438  bool blockNeedsPredication(BasicBlock *BB);
439
440  /// Check if this  pointer is consecutive when vectorizing. This happens
441  /// when the last index of the GEP is the induction variable, or that the
442  /// pointer itself is an induction variable.
443  /// This check allows us to vectorize A[idx] into a wide load/store.
444  /// Returns:
445  /// 0 - Stride is unknown or non consecutive.
446  /// 1 - Address is consecutive.
447  /// -1 - Address is consecutive, and decreasing.
448  int isConsecutivePtr(Value *Ptr);
449
450  /// Returns true if the value V is uniform within the loop.
451  bool isUniform(Value *V);
452
453  /// Returns true if this instruction will remain scalar after vectorization.
454  bool isUniformAfterVectorization(Instruction* I) { return Uniforms.count(I); }
455
456  /// Returns the information that we collected about runtime memory check.
457  RuntimePointerCheck *getRuntimePointerCheck() { return &PtrRtCheck; }
458private:
459  /// Check if a single basic block loop is vectorizable.
460  /// At this point we know that this is a loop with a constant trip count
461  /// and we only need to check individual instructions.
462  bool canVectorizeInstrs();
463
464  /// When we vectorize loops we may change the order in which
465  /// we read and write from memory. This method checks if it is
466  /// legal to vectorize the code, considering only memory constrains.
467  /// Returns true if the loop is vectorizable
468  bool canVectorizeMemory();
469
470  /// Return true if we can vectorize this loop using the IF-conversion
471  /// transformation.
472  bool canVectorizeWithIfConvert();
473
474  /// Collect the variables that need to stay uniform after vectorization.
475  void collectLoopUniforms();
476
477  /// Return true if all of the instructions in the block can be speculatively
478  /// executed.
479  bool blockCanBePredicated(BasicBlock *BB);
480
481  /// Returns True, if 'Phi' is the kind of reduction variable for type
482  /// 'Kind'. If this is a reduction variable, it adds it to ReductionList.
483  bool AddReductionVar(PHINode *Phi, ReductionKind Kind);
484  /// Returns true if the instruction I can be a reduction variable of type
485  /// 'Kind'.
486  bool isReductionInstr(Instruction *I, ReductionKind Kind);
487  /// Returns the induction kind of Phi. This function may return NoInduction
488  /// if the PHI is not an induction variable.
489  InductionKind isInductionVariable(PHINode *Phi);
490  /// Return true if can compute the address bounds of Ptr within the loop.
491  bool hasComputableBounds(Value *Ptr);
492  /// Return true if there is the chance of write reorder.
493  bool hasPossibleGlobalWriteReorder(Value *Object,
494                                     Instruction *Inst,
495                                     AliasMultiMap &WriteObjects,
496                                     unsigned MaxByteWidth);
497  /// Return the AA location for a load or a store.
498  AliasAnalysis::Location getLoadStoreLocation(Instruction *Inst);
499
500
501  /// The loop that we evaluate.
502  Loop *TheLoop;
503  /// Scev analysis.
504  ScalarEvolution *SE;
505  /// DataLayout analysis.
506  DataLayout *DL;
507  /// Dominators.
508  DominatorTree *DT;
509  /// Target Info.
510  TargetTransformInfo *TTI;
511  /// Alias Analysis.
512  AliasAnalysis *AA;
513  /// Target Library Info.
514  TargetLibraryInfo *TLI;
515
516  //  ---  vectorization state --- //
517
518  /// Holds the integer induction variable. This is the counter of the
519  /// loop.
520  PHINode *Induction;
521  /// Holds the reduction variables.
522  ReductionList Reductions;
523  /// Holds all of the induction variables that we found in the loop.
524  /// Notice that inductions don't need to start at zero and that induction
525  /// variables can be pointers.
526  InductionList Inductions;
527
528  /// Allowed outside users. This holds the reduction
529  /// vars which can be accessed from outside the loop.
530  SmallPtrSet<Value*, 4> AllowedExit;
531  /// This set holds the variables which are known to be uniform after
532  /// vectorization.
533  SmallPtrSet<Instruction*, 4> Uniforms;
534  /// We need to check that all of the pointers in this list are disjoint
535  /// at runtime.
536  RuntimePointerCheck PtrRtCheck;
537};
538
539/// LoopVectorizationCostModel - estimates the expected speedups due to
540/// vectorization.
541/// In many cases vectorization is not profitable. This can happen because of
542/// a number of reasons. In this class we mainly attempt to predict the
543/// expected speedup/slowdowns due to the supported instruction set. We use the
544/// TargetTransformInfo to query the different backends for the cost of
545/// different operations.
546class LoopVectorizationCostModel {
547public:
548  LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI,
549                             LoopVectorizationLegality *Legal,
550                             const TargetTransformInfo &TTI,
551                             DataLayout *DL, const TargetLibraryInfo *TLI)
552      : TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), DL(DL), TLI(TLI) {}
553
554  /// Information about vectorization costs
555  struct VectorizationFactor {
556    unsigned Width; // Vector width with best cost
557    unsigned Cost; // Cost of the loop with that width
558  };
559  /// \return The most profitable vectorization factor and the cost of that VF.
560  /// This method checks every power of two up to VF. If UserVF is not ZERO
561  /// then this vectorization factor will be selected if vectorization is
562  /// possible.
563  VectorizationFactor selectVectorizationFactor(bool OptForSize,
564                                                unsigned UserVF);
565
566  /// \return The size (in bits) of the widest type in the code that
567  /// needs to be vectorized. We ignore values that remain scalar such as
568  /// 64 bit loop indices.
569  unsigned getWidestType();
570
571  /// \return The most profitable unroll factor.
572  /// If UserUF is non-zero then this method finds the best unroll-factor
573  /// based on register pressure and other parameters.
574  /// VF and LoopCost are the selected vectorization factor and the cost of the
575  /// selected VF.
576  unsigned selectUnrollFactor(bool OptForSize, unsigned UserUF, unsigned VF,
577                              unsigned LoopCost);
578
579  /// \brief A struct that represents some properties of the register usage
580  /// of a loop.
581  struct RegisterUsage {
582    /// Holds the number of loop invariant values that are used in the loop.
583    unsigned LoopInvariantRegs;
584    /// Holds the maximum number of concurrent live intervals in the loop.
585    unsigned MaxLocalUsers;
586    /// Holds the number of instructions in the loop.
587    unsigned NumInstructions;
588  };
589
590  /// \return  information about the register usage of the loop.
591  RegisterUsage calculateRegisterUsage();
592
593private:
594  /// Returns the expected execution cost. The unit of the cost does
595  /// not matter because we use the 'cost' units to compare different
596  /// vector widths. The cost that is returned is *not* normalized by
597  /// the factor width.
598  unsigned expectedCost(unsigned VF);
599
600  /// Returns the execution time cost of an instruction for a given vector
601  /// width. Vector width of one means scalar.
602  unsigned getInstructionCost(Instruction *I, unsigned VF);
603
604  /// A helper function for converting Scalar types to vector types.
605  /// If the incoming type is void, we return void. If the VF is 1, we return
606  /// the scalar type.
607  static Type* ToVectorTy(Type *Scalar, unsigned VF);
608
609  /// Returns whether the instruction is a load or store and will be a emitted
610  /// as a vector operation.
611  bool isConsecutiveLoadOrStore(Instruction *I);
612
613  /// The loop that we evaluate.
614  Loop *TheLoop;
615  /// Scev analysis.
616  ScalarEvolution *SE;
617  /// Loop Info analysis.
618  LoopInfo *LI;
619  /// Vectorization legality.
620  LoopVectorizationLegality *Legal;
621  /// Vector target information.
622  const TargetTransformInfo &TTI;
623  /// Target data layout information.
624  DataLayout *DL;
625  /// Target Library Info.
626  const TargetLibraryInfo *TLI;
627};
628
629/// The LoopVectorize Pass.
630struct LoopVectorize : public LoopPass {
631  /// Pass identification, replacement for typeid
632  static char ID;
633
634  explicit LoopVectorize() : LoopPass(ID) {
635    initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
636  }
637
638  ScalarEvolution *SE;
639  DataLayout *DL;
640  LoopInfo *LI;
641  TargetTransformInfo *TTI;
642  DominatorTree *DT;
643  AliasAnalysis *AA;
644  TargetLibraryInfo *TLI;
645
646  virtual bool runOnLoop(Loop *L, LPPassManager &LPM) {
647    // We only vectorize innermost loops.
648    if (!L->empty())
649      return false;
650
651    SE = &getAnalysis<ScalarEvolution>();
652    DL = getAnalysisIfAvailable<DataLayout>();
653    LI = &getAnalysis<LoopInfo>();
654    TTI = &getAnalysis<TargetTransformInfo>();
655    DT = &getAnalysis<DominatorTree>();
656    AA = getAnalysisIfAvailable<AliasAnalysis>();
657    TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
658
659    DEBUG(dbgs() << "LV: Checking a loop in \"" <<
660          L->getHeader()->getParent()->getName() << "\"\n");
661
662    // Check if it is legal to vectorize the loop.
663    LoopVectorizationLegality LVL(L, SE, DL, DT, TTI, AA, TLI);
664    if (!LVL.canVectorize()) {
665      DEBUG(dbgs() << "LV: Not vectorizing.\n");
666      return false;
667    }
668
669    // Use the cost model.
670    LoopVectorizationCostModel CM(L, SE, LI, &LVL, *TTI, DL, TLI);
671
672    // Check the function attributes to find out if this function should be
673    // optimized for size.
674    Function *F = L->getHeader()->getParent();
675    Attribute::AttrKind SzAttr = Attribute::OptimizeForSize;
676    Attribute::AttrKind FlAttr = Attribute::NoImplicitFloat;
677    unsigned FnIndex = AttributeSet::FunctionIndex;
678    bool OptForSize = F->getAttributes().hasAttribute(FnIndex, SzAttr);
679    bool NoFloat = F->getAttributes().hasAttribute(FnIndex, FlAttr);
680
681    if (NoFloat) {
682      DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
683            "attribute is used.\n");
684      return false;
685    }
686
687    // Select the optimal vectorization factor.
688    LoopVectorizationCostModel::VectorizationFactor VF;
689    VF = CM.selectVectorizationFactor(OptForSize, VectorizationFactor);
690    // Select the unroll factor.
691    unsigned UF = CM.selectUnrollFactor(OptForSize, VectorizationUnroll,
692                                        VF.Width, VF.Cost);
693
694    if (VF.Width == 1) {
695      DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
696      return false;
697    }
698
699    DEBUG(dbgs() << "LV: Found a vectorizable loop ("<< VF.Width << ") in "<<
700          F->getParent()->getModuleIdentifier()<<"\n");
701    DEBUG(dbgs() << "LV: Unroll Factor is " << UF << "\n");
702
703    // If we decided that it is *legal* to vectorize the loop then do it.
704    InnerLoopVectorizer LB(L, SE, LI, DT, DL, TLI, VF.Width, UF);
705    LB.vectorize(&LVL);
706
707    DEBUG(verifyFunction(*L->getHeader()->getParent()));
708    return true;
709  }
710
711  virtual void getAnalysisUsage(AnalysisUsage &AU) const {
712    LoopPass::getAnalysisUsage(AU);
713    AU.addRequiredID(LoopSimplifyID);
714    AU.addRequiredID(LCSSAID);
715    AU.addRequired<DominatorTree>();
716    AU.addRequired<LoopInfo>();
717    AU.addRequired<ScalarEvolution>();
718    AU.addRequired<TargetTransformInfo>();
719    AU.addPreserved<LoopInfo>();
720    AU.addPreserved<DominatorTree>();
721  }
722
723};
724
725} // end anonymous namespace
726
727//===----------------------------------------------------------------------===//
728// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
729// LoopVectorizationCostModel.
730//===----------------------------------------------------------------------===//
731
732void
733LoopVectorizationLegality::RuntimePointerCheck::insert(ScalarEvolution *SE,
734                                                       Loop *Lp, Value *Ptr) {
735  const SCEV *Sc = SE->getSCEV(Ptr);
736  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
737  assert(AR && "Invalid addrec expression");
738  const SCEV *Ex = SE->getExitCount(Lp, Lp->getLoopLatch());
739  const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE);
740  Pointers.push_back(Ptr);
741  Starts.push_back(AR->getStart());
742  Ends.push_back(ScEnd);
743}
744
745Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
746  // Save the current insertion location.
747  Instruction *Loc = Builder.GetInsertPoint();
748
749  // We need to place the broadcast of invariant variables outside the loop.
750  Instruction *Instr = dyn_cast<Instruction>(V);
751  bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody);
752  bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr;
753
754  // Place the code for broadcasting invariant variables in the new preheader.
755  if (Invariant)
756    Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
757
758  // Broadcast the scalar into all locations in the vector.
759  Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
760
761  // Restore the builder insertion point.
762  if (Invariant)
763    Builder.SetInsertPoint(Loc);
764
765  return Shuf;
766}
767
768Value *InnerLoopVectorizer::getConsecutiveVector(Value* Val, unsigned StartIdx,
769                                                 bool Negate) {
770  assert(Val->getType()->isVectorTy() && "Must be a vector");
771  assert(Val->getType()->getScalarType()->isIntegerTy() &&
772         "Elem must be an integer");
773  // Create the types.
774  Type *ITy = Val->getType()->getScalarType();
775  VectorType *Ty = cast<VectorType>(Val->getType());
776  int VLen = Ty->getNumElements();
777  SmallVector<Constant*, 8> Indices;
778
779  // Create a vector of consecutive numbers from zero to VF.
780  for (int i = 0; i < VLen; ++i) {
781    int Idx = Negate ? (-i): i;
782    Indices.push_back(ConstantInt::get(ITy, StartIdx + Idx));
783  }
784
785  // Add the consecutive indices to the vector value.
786  Constant *Cv = ConstantVector::get(Indices);
787  assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
788  return Builder.CreateAdd(Val, Cv, "induction");
789}
790
791int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
792  assert(Ptr->getType()->isPointerTy() && "Unexpected non ptr");
793  // Make sure that the pointer does not point to structs.
794  if (cast<PointerType>(Ptr->getType())->getElementType()->isAggregateType())
795    return 0;
796
797  // If this value is a pointer induction variable we know it is consecutive.
798  PHINode *Phi = dyn_cast_or_null<PHINode>(Ptr);
799  if (Phi && Inductions.count(Phi)) {
800    InductionInfo II = Inductions[Phi];
801    if (IK_PtrInduction == II.IK)
802      return 1;
803    else if (IK_ReversePtrInduction == II.IK)
804      return -1;
805  }
806
807  GetElementPtrInst *Gep = dyn_cast_or_null<GetElementPtrInst>(Ptr);
808  if (!Gep)
809    return 0;
810
811  unsigned NumOperands = Gep->getNumOperands();
812  Value *LastIndex = Gep->getOperand(NumOperands - 1);
813
814  Value *GpPtr = Gep->getPointerOperand();
815  // If this GEP value is a consecutive pointer induction variable and all of
816  // the indices are constant then we know it is consecutive. We can
817  Phi = dyn_cast<PHINode>(GpPtr);
818  if (Phi && Inductions.count(Phi)) {
819
820    // Make sure that the pointer does not point to structs.
821    PointerType *GepPtrType = cast<PointerType>(GpPtr->getType());
822    if (GepPtrType->getElementType()->isAggregateType())
823      return 0;
824
825    // Make sure that all of the index operands are loop invariant.
826    for (unsigned i = 1; i < NumOperands; ++i)
827      if (!SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), TheLoop))
828        return 0;
829
830    InductionInfo II = Inductions[Phi];
831    if (IK_PtrInduction == II.IK)
832      return 1;
833    else if (IK_ReversePtrInduction == II.IK)
834      return -1;
835  }
836
837  // Check that all of the gep indices are uniform except for the last.
838  for (unsigned i = 0; i < NumOperands - 1; ++i)
839    if (!SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), TheLoop))
840      return 0;
841
842  // We can emit wide load/stores only if the last index is the induction
843  // variable.
844  const SCEV *Last = SE->getSCEV(LastIndex);
845  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) {
846    const SCEV *Step = AR->getStepRecurrence(*SE);
847
848    // The memory is consecutive because the last index is consecutive
849    // and all other indices are loop invariant.
850    if (Step->isOne())
851      return 1;
852    if (Step->isAllOnesValue())
853      return -1;
854  }
855
856  return 0;
857}
858
859bool LoopVectorizationLegality::isUniform(Value *V) {
860  return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
861}
862
863InnerLoopVectorizer::VectorParts&
864InnerLoopVectorizer::getVectorValue(Value *V) {
865  assert(V != Induction && "The new induction variable should not be used.");
866  assert(!V->getType()->isVectorTy() && "Can't widen a vector");
867
868  // If we have this scalar in the map, return it.
869  if (WidenMap.has(V))
870    return WidenMap.get(V);
871
872  // If this scalar is unknown, assume that it is a constant or that it is
873  // loop invariant. Broadcast V and save the value for future uses.
874  Value *B = getBroadcastInstrs(V);
875  return WidenMap.splat(V, B);
876}
877
878Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
879  assert(Vec->getType()->isVectorTy() && "Invalid type");
880  SmallVector<Constant*, 8> ShuffleMask;
881  for (unsigned i = 0; i < VF; ++i)
882    ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
883
884  return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
885                                     ConstantVector::get(ShuffleMask),
886                                     "reverse");
887}
888
889
890void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
891                                             LoopVectorizationLegality *Legal) {
892  // Attempt to issue a wide load.
893  LoadInst *LI = dyn_cast<LoadInst>(Instr);
894  StoreInst *SI = dyn_cast<StoreInst>(Instr);
895
896  assert((LI || SI) && "Invalid Load/Store instruction");
897
898  Type *ScalarDataTy = LI ? LI->getType() : SI->getValueOperand()->getType();
899  Type *DataTy = VectorType::get(ScalarDataTy, VF);
900  Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand();
901  unsigned Alignment = LI ? LI->getAlignment() : SI->getAlignment();
902
903  // If the pointer is loop invariant or if it is non consecutive,
904  // scalarize the load.
905  int Stride = Legal->isConsecutivePtr(Ptr);
906  bool Reverse = Stride < 0;
907  bool UniformLoad = LI && Legal->isUniform(Ptr);
908  if (Stride == 0 || UniformLoad)
909    return scalarizeInstruction(Instr);
910
911  Constant *Zero = Builder.getInt32(0);
912  VectorParts &Entry = WidenMap.get(Instr);
913
914  // Handle consecutive loads/stores.
915  GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr);
916  if (Gep && Legal->isInductionVariable(Gep->getPointerOperand())) {
917    Value *PtrOperand = Gep->getPointerOperand();
918    Value *FirstBasePtr = getVectorValue(PtrOperand)[0];
919    FirstBasePtr = Builder.CreateExtractElement(FirstBasePtr, Zero);
920
921    // Create the new GEP with the new induction variable.
922    GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
923    Gep2->setOperand(0, FirstBasePtr);
924    Gep2->setName("gep.indvar.base");
925    Ptr = Builder.Insert(Gep2);
926  } else if (Gep) {
927    assert(SE->isLoopInvariant(SE->getSCEV(Gep->getPointerOperand()),
928                               OrigLoop) && "Base ptr must be invariant");
929
930    // The last index does not have to be the induction. It can be
931    // consecutive and be a function of the index. For example A[I+1];
932    unsigned NumOperands = Gep->getNumOperands();
933
934    Value *LastGepOperand = Gep->getOperand(NumOperands - 1);
935    VectorParts &GEPParts = getVectorValue(LastGepOperand);
936    Value *LastIndex = GEPParts[0];
937    LastIndex = Builder.CreateExtractElement(LastIndex, Zero);
938
939    // Create the new GEP with the new induction variable.
940    GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
941    Gep2->setOperand(NumOperands - 1, LastIndex);
942    Gep2->setName("gep.indvar.idx");
943    Ptr = Builder.Insert(Gep2);
944  } else {
945    // Use the induction element ptr.
946    assert(isa<PHINode>(Ptr) && "Invalid induction ptr");
947    VectorParts &PtrVal = getVectorValue(Ptr);
948    Ptr = Builder.CreateExtractElement(PtrVal[0], Zero);
949  }
950
951  // Handle Stores:
952  if (SI) {
953    assert(!Legal->isUniform(SI->getPointerOperand()) &&
954           "We do not allow storing to uniform addresses");
955
956    VectorParts &StoredVal = getVectorValue(SI->getValueOperand());
957    for (unsigned Part = 0; Part < UF; ++Part) {
958      // Calculate the pointer for the specific unroll-part.
959      Value *PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF));
960
961      if (Reverse) {
962        // If we store to reverse consecutive memory locations then we need
963        // to reverse the order of elements in the stored value.
964        StoredVal[Part] = reverseVector(StoredVal[Part]);
965        // If the address is consecutive but reversed, then the
966        // wide store needs to start at the last vector element.
967        PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF));
968        PartPtr = Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF));
969      }
970
971      Value *VecPtr = Builder.CreateBitCast(PartPtr, DataTy->getPointerTo());
972      Builder.CreateStore(StoredVal[Part], VecPtr)->setAlignment(Alignment);
973    }
974  }
975
976  for (unsigned Part = 0; Part < UF; ++Part) {
977    // Calculate the pointer for the specific unroll-part.
978    Value *PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(Part * VF));
979
980    if (Reverse) {
981      // If the address is consecutive but reversed, then the
982      // wide store needs to start at the last vector element.
983      PartPtr = Builder.CreateGEP(Ptr, Builder.getInt32(-Part * VF));
984      PartPtr = Builder.CreateGEP(PartPtr, Builder.getInt32(1 - VF));
985    }
986
987    Value *VecPtr = Builder.CreateBitCast(PartPtr, DataTy->getPointerTo());
988    Value *LI = Builder.CreateLoad(VecPtr, "wide.load");
989    cast<LoadInst>(LI)->setAlignment(Alignment);
990    Entry[Part] = Reverse ? reverseVector(LI) :  LI;
991  }
992}
993
994void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr) {
995  assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
996  // Holds vector parameters or scalars, in case of uniform vals.
997  SmallVector<VectorParts, 4> Params;
998
999  // Find all of the vectorized parameters.
1000  for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
1001    Value *SrcOp = Instr->getOperand(op);
1002
1003    // If we are accessing the old induction variable, use the new one.
1004    if (SrcOp == OldInduction) {
1005      Params.push_back(getVectorValue(SrcOp));
1006      continue;
1007    }
1008
1009    // Try using previously calculated values.
1010    Instruction *SrcInst = dyn_cast<Instruction>(SrcOp);
1011
1012    // If the src is an instruction that appeared earlier in the basic block
1013    // then it should already be vectorized.
1014    if (SrcInst && OrigLoop->contains(SrcInst)) {
1015      assert(WidenMap.has(SrcInst) && "Source operand is unavailable");
1016      // The parameter is a vector value from earlier.
1017      Params.push_back(WidenMap.get(SrcInst));
1018    } else {
1019      // The parameter is a scalar from outside the loop. Maybe even a constant.
1020      VectorParts Scalars;
1021      Scalars.append(UF, SrcOp);
1022      Params.push_back(Scalars);
1023    }
1024  }
1025
1026  assert(Params.size() == Instr->getNumOperands() &&
1027         "Invalid number of operands");
1028
1029  // Does this instruction return a value ?
1030  bool IsVoidRetTy = Instr->getType()->isVoidTy();
1031
1032  Value *UndefVec = IsVoidRetTy ? 0 :
1033    UndefValue::get(VectorType::get(Instr->getType(), VF));
1034  // Create a new entry in the WidenMap and initialize it to Undef or Null.
1035  VectorParts &VecResults = WidenMap.splat(Instr, UndefVec);
1036
1037  // For each scalar that we create:
1038  for (unsigned Width = 0; Width < VF; ++Width) {
1039    // For each vector unroll 'part':
1040    for (unsigned Part = 0; Part < UF; ++Part) {
1041      Instruction *Cloned = Instr->clone();
1042      if (!IsVoidRetTy)
1043        Cloned->setName(Instr->getName() + ".cloned");
1044      // Replace the operands of the cloned instrucions with extracted scalars.
1045      for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
1046        Value *Op = Params[op][Part];
1047        // Param is a vector. Need to extract the right lane.
1048        if (Op->getType()->isVectorTy())
1049          Op = Builder.CreateExtractElement(Op, Builder.getInt32(Width));
1050        Cloned->setOperand(op, Op);
1051      }
1052
1053      // Place the cloned scalar in the new loop.
1054      Builder.Insert(Cloned);
1055
1056      // If the original scalar returns a value we need to place it in a vector
1057      // so that future users will be able to use it.
1058      if (!IsVoidRetTy)
1059        VecResults[Part] = Builder.CreateInsertElement(VecResults[Part], Cloned,
1060                                                       Builder.getInt32(Width));
1061    }
1062  }
1063}
1064
1065Instruction *
1066InnerLoopVectorizer::addRuntimeCheck(LoopVectorizationLegality *Legal,
1067                                     Instruction *Loc) {
1068  LoopVectorizationLegality::RuntimePointerCheck *PtrRtCheck =
1069  Legal->getRuntimePointerCheck();
1070
1071  if (!PtrRtCheck->Need)
1072    return NULL;
1073
1074  Instruction *MemoryRuntimeCheck = 0;
1075  unsigned NumPointers = PtrRtCheck->Pointers.size();
1076  SmallVector<Value* , 2> Starts;
1077  SmallVector<Value* , 2> Ends;
1078
1079  SCEVExpander Exp(*SE, "induction");
1080
1081  // Use this type for pointer arithmetic.
1082  Type* PtrArithTy = Type::getInt8PtrTy(Loc->getContext(), 0);
1083
1084  for (unsigned i = 0; i < NumPointers; ++i) {
1085    Value *Ptr = PtrRtCheck->Pointers[i];
1086    const SCEV *Sc = SE->getSCEV(Ptr);
1087
1088    if (SE->isLoopInvariant(Sc, OrigLoop)) {
1089      DEBUG(dbgs() << "LV: Adding RT check for a loop invariant ptr:" <<
1090            *Ptr <<"\n");
1091      Starts.push_back(Ptr);
1092      Ends.push_back(Ptr);
1093    } else {
1094      DEBUG(dbgs() << "LV: Adding RT check for range:" << *Ptr <<"\n");
1095
1096      Value *Start = Exp.expandCodeFor(PtrRtCheck->Starts[i], PtrArithTy, Loc);
1097      Value *End = Exp.expandCodeFor(PtrRtCheck->Ends[i], PtrArithTy, Loc);
1098      Starts.push_back(Start);
1099      Ends.push_back(End);
1100    }
1101  }
1102
1103  IRBuilder<> ChkBuilder(Loc);
1104
1105  for (unsigned i = 0; i < NumPointers; ++i) {
1106    for (unsigned j = i+1; j < NumPointers; ++j) {
1107      Value *Start0 = ChkBuilder.CreateBitCast(Starts[i], PtrArithTy, "bc");
1108      Value *Start1 = ChkBuilder.CreateBitCast(Starts[j], PtrArithTy, "bc");
1109      Value *End0 =   ChkBuilder.CreateBitCast(Ends[i],   PtrArithTy, "bc");
1110      Value *End1 =   ChkBuilder.CreateBitCast(Ends[j],   PtrArithTy, "bc");
1111
1112      Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0");
1113      Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1");
1114      Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
1115      if (MemoryRuntimeCheck)
1116        IsConflict = ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict,
1117                                         "conflict.rdx");
1118
1119      MemoryRuntimeCheck = cast<Instruction>(IsConflict);
1120    }
1121  }
1122
1123  return MemoryRuntimeCheck;
1124}
1125
1126void
1127InnerLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) {
1128  /*
1129   In this function we generate a new loop. The new loop will contain
1130   the vectorized instructions while the old loop will continue to run the
1131   scalar remainder.
1132
1133       [ ] <-- vector loop bypass (may consist of multiple blocks).
1134     /  |
1135    /   v
1136   |   [ ]     <-- vector pre header.
1137   |    |
1138   |    v
1139   |   [  ] \
1140   |   [  ]_|   <-- vector loop.
1141   |    |
1142    \   v
1143      >[ ]   <--- middle-block.
1144     /  |
1145    /   v
1146   |   [ ]     <--- new preheader.
1147   |    |
1148   |    v
1149   |   [ ] \
1150   |   [ ]_|   <-- old scalar loop to handle remainder.
1151    \   |
1152     \  v
1153      >[ ]     <-- exit block.
1154   ...
1155   */
1156
1157  BasicBlock *OldBasicBlock = OrigLoop->getHeader();
1158  BasicBlock *BypassBlock = OrigLoop->getLoopPreheader();
1159  BasicBlock *ExitBlock = OrigLoop->getExitBlock();
1160  assert(ExitBlock && "Must have an exit block");
1161
1162  // Some loops have a single integer induction variable, while other loops
1163  // don't. One example is c++ iterators that often have multiple pointer
1164  // induction variables. In the code below we also support a case where we
1165  // don't have a single induction variable.
1166  OldInduction = Legal->getInduction();
1167  Type *IdxTy = OldInduction ? OldInduction->getType() :
1168  DL->getIntPtrType(SE->getContext());
1169
1170  // Find the loop boundaries.
1171  const SCEV *ExitCount = SE->getExitCount(OrigLoop, OrigLoop->getLoopLatch());
1172  assert(ExitCount != SE->getCouldNotCompute() && "Invalid loop count");
1173
1174  // Get the total trip count from the count by adding 1.
1175  ExitCount = SE->getAddExpr(ExitCount,
1176                             SE->getConstant(ExitCount->getType(), 1));
1177
1178  // Expand the trip count and place the new instructions in the preheader.
1179  // Notice that the pre-header does not change, only the loop body.
1180  SCEVExpander Exp(*SE, "induction");
1181
1182  // Count holds the overall loop count (N).
1183  Value *Count = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
1184                                   BypassBlock->getTerminator());
1185
1186  // The loop index does not have to start at Zero. Find the original start
1187  // value from the induction PHI node. If we don't have an induction variable
1188  // then we know that it starts at zero.
1189  Value *StartIdx = OldInduction ?
1190  OldInduction->getIncomingValueForBlock(BypassBlock):
1191  ConstantInt::get(IdxTy, 0);
1192
1193  assert(BypassBlock && "Invalid loop structure");
1194  LoopBypassBlocks.push_back(BypassBlock);
1195
1196  // Split the single block loop into the two loop structure described above.
1197  BasicBlock *VectorPH =
1198  BypassBlock->splitBasicBlock(BypassBlock->getTerminator(), "vector.ph");
1199  BasicBlock *VecBody =
1200  VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
1201  BasicBlock *MiddleBlock =
1202  VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
1203  BasicBlock *ScalarPH =
1204  MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
1205
1206  // Use this IR builder to create the loop instructions (Phi, Br, Cmp)
1207  // inside the loop.
1208  Builder.SetInsertPoint(VecBody->getFirstInsertionPt());
1209
1210  // Generate the induction variable.
1211  Induction = Builder.CreatePHI(IdxTy, 2, "index");
1212  // The loop step is equal to the vectorization factor (num of SIMD elements)
1213  // times the unroll factor (num of SIMD instructions).
1214  Constant *Step = ConstantInt::get(IdxTy, VF * UF);
1215
1216  // This is the IR builder that we use to add all of the logic for bypassing
1217  // the new vector loop.
1218  IRBuilder<> BypassBuilder(BypassBlock->getTerminator());
1219
1220  // We may need to extend the index in case there is a type mismatch.
1221  // We know that the count starts at zero and does not overflow.
1222  if (Count->getType() != IdxTy) {
1223    // The exit count can be of pointer type. Convert it to the correct
1224    // integer type.
1225    if (ExitCount->getType()->isPointerTy())
1226      Count = BypassBuilder.CreatePointerCast(Count, IdxTy, "ptrcnt.to.int");
1227    else
1228      Count = BypassBuilder.CreateZExtOrTrunc(Count, IdxTy, "cnt.cast");
1229  }
1230
1231  // Add the start index to the loop count to get the new end index.
1232  Value *IdxEnd = BypassBuilder.CreateAdd(Count, StartIdx, "end.idx");
1233
1234  // Now we need to generate the expression for N - (N % VF), which is
1235  // the part that the vectorized body will execute.
1236  Value *R = BypassBuilder.CreateURem(Count, Step, "n.mod.vf");
1237  Value *CountRoundDown = BypassBuilder.CreateSub(Count, R, "n.vec");
1238  Value *IdxEndRoundDown = BypassBuilder.CreateAdd(CountRoundDown, StartIdx,
1239                                                     "end.idx.rnd.down");
1240
1241  // Now, compare the new count to zero. If it is zero skip the vector loop and
1242  // jump to the scalar loop.
1243  Value *Cmp = BypassBuilder.CreateICmpEQ(IdxEndRoundDown, StartIdx,
1244                                          "cmp.zero");
1245
1246  BasicBlock *LastBypassBlock = BypassBlock;
1247
1248  // Generate the code that checks in runtime if arrays overlap. We put the
1249  // checks into a separate block to make the more common case of few elements
1250  // faster.
1251  Instruction *MemRuntimeCheck = addRuntimeCheck(Legal,
1252                                                 BypassBlock->getTerminator());
1253  if (MemRuntimeCheck) {
1254    // Create a new block containing the memory check.
1255    BasicBlock *CheckBlock = BypassBlock->splitBasicBlock(MemRuntimeCheck,
1256                                                          "vector.memcheck");
1257    LoopBypassBlocks.push_back(CheckBlock);
1258
1259    // Replace the branch into the memory check block with a conditional branch
1260    // for the "few elements case".
1261    Instruction *OldTerm = BypassBlock->getTerminator();
1262    BranchInst::Create(MiddleBlock, CheckBlock, Cmp, OldTerm);
1263    OldTerm->eraseFromParent();
1264
1265    Cmp = MemRuntimeCheck;
1266    LastBypassBlock = CheckBlock;
1267  }
1268
1269  LastBypassBlock->getTerminator()->eraseFromParent();
1270  BranchInst::Create(MiddleBlock, VectorPH, Cmp,
1271                     LastBypassBlock);
1272
1273  // We are going to resume the execution of the scalar loop.
1274  // Go over all of the induction variables that we found and fix the
1275  // PHIs that are left in the scalar version of the loop.
1276  // The starting values of PHI nodes depend on the counter of the last
1277  // iteration in the vectorized loop.
1278  // If we come from a bypass edge then we need to start from the original
1279  // start value.
1280
1281  // This variable saves the new starting index for the scalar loop.
1282  PHINode *ResumeIndex = 0;
1283  LoopVectorizationLegality::InductionList::iterator I, E;
1284  LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
1285  for (I = List->begin(), E = List->end(); I != E; ++I) {
1286    PHINode *OrigPhi = I->first;
1287    LoopVectorizationLegality::InductionInfo II = I->second;
1288    PHINode *ResumeVal = PHINode::Create(OrigPhi->getType(), 2, "resume.val",
1289                                         MiddleBlock->getTerminator());
1290    Value *EndValue = 0;
1291    switch (II.IK) {
1292    case LoopVectorizationLegality::IK_NoInduction:
1293      llvm_unreachable("Unknown induction");
1294    case LoopVectorizationLegality::IK_IntInduction: {
1295      // Handle the integer induction counter:
1296      assert(OrigPhi->getType()->isIntegerTy() && "Invalid type");
1297      assert(OrigPhi == OldInduction && "Unknown integer PHI");
1298      // We know what the end value is.
1299      EndValue = IdxEndRoundDown;
1300      // We also know which PHI node holds it.
1301      ResumeIndex = ResumeVal;
1302      break;
1303    }
1304    case LoopVectorizationLegality::IK_ReverseIntInduction: {
1305      // Convert the CountRoundDown variable to the PHI size.
1306      unsigned CRDSize = CountRoundDown->getType()->getScalarSizeInBits();
1307      unsigned IISize = II.StartValue->getType()->getScalarSizeInBits();
1308      Value *CRD = CountRoundDown;
1309      if (CRDSize > IISize)
1310        CRD = CastInst::Create(Instruction::Trunc, CountRoundDown,
1311                               II.StartValue->getType(), "tr.crd",
1312                               LoopBypassBlocks.back()->getTerminator());
1313      else if (CRDSize < IISize)
1314        CRD = CastInst::Create(Instruction::SExt, CountRoundDown,
1315                               II.StartValue->getType(),
1316                               "sext.crd",
1317                               LoopBypassBlocks.back()->getTerminator());
1318      // Handle reverse integer induction counter:
1319      EndValue =
1320        BinaryOperator::CreateSub(II.StartValue, CRD, "rev.ind.end",
1321                                  LoopBypassBlocks.back()->getTerminator());
1322      break;
1323    }
1324    case LoopVectorizationLegality::IK_PtrInduction: {
1325      // For pointer induction variables, calculate the offset using
1326      // the end index.
1327      EndValue =
1328        GetElementPtrInst::Create(II.StartValue, CountRoundDown, "ptr.ind.end",
1329                                  LoopBypassBlocks.back()->getTerminator());
1330      break;
1331    }
1332    case LoopVectorizationLegality::IK_ReversePtrInduction: {
1333      // The value at the end of the loop for the reverse pointer is calculated
1334      // by creating a GEP with a negative index starting from the start value.
1335      Value *Zero = ConstantInt::get(CountRoundDown->getType(), 0);
1336      Value *NegIdx = BinaryOperator::CreateSub(Zero, CountRoundDown,
1337                                  "rev.ind.end",
1338                                  LoopBypassBlocks.back()->getTerminator());
1339      EndValue = GetElementPtrInst::Create(II.StartValue, NegIdx,
1340                                  "rev.ptr.ind.end",
1341                                  LoopBypassBlocks.back()->getTerminator());
1342      break;
1343    }
1344    }// end of case
1345
1346    // The new PHI merges the original incoming value, in case of a bypass,
1347    // or the value at the end of the vectorized loop.
1348    for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
1349      ResumeVal->addIncoming(II.StartValue, LoopBypassBlocks[I]);
1350    ResumeVal->addIncoming(EndValue, VecBody);
1351
1352    // Fix the scalar body counter (PHI node).
1353    unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
1354    OrigPhi->setIncomingValue(BlockIdx, ResumeVal);
1355  }
1356
1357  // If we are generating a new induction variable then we also need to
1358  // generate the code that calculates the exit value. This value is not
1359  // simply the end of the counter because we may skip the vectorized body
1360  // in case of a runtime check.
1361  if (!OldInduction){
1362    assert(!ResumeIndex && "Unexpected resume value found");
1363    ResumeIndex = PHINode::Create(IdxTy, 2, "new.indc.resume.val",
1364                                  MiddleBlock->getTerminator());
1365    for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
1366      ResumeIndex->addIncoming(StartIdx, LoopBypassBlocks[I]);
1367    ResumeIndex->addIncoming(IdxEndRoundDown, VecBody);
1368  }
1369
1370  // Make sure that we found the index where scalar loop needs to continue.
1371  assert(ResumeIndex && ResumeIndex->getType()->isIntegerTy() &&
1372         "Invalid resume Index");
1373
1374  // Add a check in the middle block to see if we have completed
1375  // all of the iterations in the first vector loop.
1376  // If (N - N%VF) == N, then we *don't* need to run the remainder.
1377  Value *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, IdxEnd,
1378                                ResumeIndex, "cmp.n",
1379                                MiddleBlock->getTerminator());
1380
1381  BranchInst::Create(ExitBlock, ScalarPH, CmpN, MiddleBlock->getTerminator());
1382  // Remove the old terminator.
1383  MiddleBlock->getTerminator()->eraseFromParent();
1384
1385  // Create i+1 and fill the PHINode.
1386  Value *NextIdx = Builder.CreateAdd(Induction, Step, "index.next");
1387  Induction->addIncoming(StartIdx, VectorPH);
1388  Induction->addIncoming(NextIdx, VecBody);
1389  // Create the compare.
1390  Value *ICmp = Builder.CreateICmpEQ(NextIdx, IdxEndRoundDown);
1391  Builder.CreateCondBr(ICmp, MiddleBlock, VecBody);
1392
1393  // Now we have two terminators. Remove the old one from the block.
1394  VecBody->getTerminator()->eraseFromParent();
1395
1396  // Get ready to start creating new instructions into the vectorized body.
1397  Builder.SetInsertPoint(VecBody->getFirstInsertionPt());
1398
1399  // Create and register the new vector loop.
1400  Loop* Lp = new Loop();
1401  Loop *ParentLoop = OrigLoop->getParentLoop();
1402
1403  // Insert the new loop into the loop nest and register the new basic blocks.
1404  if (ParentLoop) {
1405    ParentLoop->addChildLoop(Lp);
1406    for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I)
1407      ParentLoop->addBasicBlockToLoop(LoopBypassBlocks[I], LI->getBase());
1408    ParentLoop->addBasicBlockToLoop(ScalarPH, LI->getBase());
1409    ParentLoop->addBasicBlockToLoop(VectorPH, LI->getBase());
1410    ParentLoop->addBasicBlockToLoop(MiddleBlock, LI->getBase());
1411  } else {
1412    LI->addTopLevelLoop(Lp);
1413  }
1414
1415  Lp->addBasicBlockToLoop(VecBody, LI->getBase());
1416
1417  // Save the state.
1418  LoopVectorPreHeader = VectorPH;
1419  LoopScalarPreHeader = ScalarPH;
1420  LoopMiddleBlock = MiddleBlock;
1421  LoopExitBlock = ExitBlock;
1422  LoopVectorBody = VecBody;
1423  LoopScalarBody = OldBasicBlock;
1424}
1425
1426/// This function returns the identity element (or neutral element) for
1427/// the operation K.
1428static Constant*
1429getReductionIdentity(LoopVectorizationLegality::ReductionKind K, Type *Tp) {
1430  switch (K) {
1431  case LoopVectorizationLegality:: RK_IntegerXor:
1432  case LoopVectorizationLegality:: RK_IntegerAdd:
1433  case LoopVectorizationLegality:: RK_IntegerOr:
1434    // Adding, Xoring, Oring zero to a number does not change it.
1435    return ConstantInt::get(Tp, 0);
1436  case LoopVectorizationLegality:: RK_IntegerMult:
1437    // Multiplying a number by 1 does not change it.
1438    return ConstantInt::get(Tp, 1);
1439  case LoopVectorizationLegality:: RK_IntegerAnd:
1440    // AND-ing a number with an all-1 value does not change it.
1441    return ConstantInt::get(Tp, -1, true);
1442  case LoopVectorizationLegality:: RK_FloatMult:
1443    // Multiplying a number by 1 does not change it.
1444    return ConstantFP::get(Tp, 1.0L);
1445  case LoopVectorizationLegality:: RK_FloatAdd:
1446    // Adding zero to a number does not change it.
1447    return ConstantFP::get(Tp, 0.0L);
1448  default:
1449    llvm_unreachable("Unknown reduction kind");
1450  }
1451}
1452
1453static Intrinsic::ID
1454getIntrinsicIDForCall(CallInst *CI, const TargetLibraryInfo *TLI) {
1455  // If we have an intrinsic call, check if it is trivially vectorizable.
1456  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
1457    switch (II->getIntrinsicID()) {
1458    case Intrinsic::sqrt:
1459    case Intrinsic::sin:
1460    case Intrinsic::cos:
1461    case Intrinsic::exp:
1462    case Intrinsic::exp2:
1463    case Intrinsic::log:
1464    case Intrinsic::log10:
1465    case Intrinsic::log2:
1466    case Intrinsic::fabs:
1467    case Intrinsic::floor:
1468    case Intrinsic::ceil:
1469    case Intrinsic::trunc:
1470    case Intrinsic::rint:
1471    case Intrinsic::nearbyint:
1472    case Intrinsic::pow:
1473    case Intrinsic::fma:
1474    case Intrinsic::fmuladd:
1475      return II->getIntrinsicID();
1476    default:
1477      return Intrinsic::not_intrinsic;
1478    }
1479  }
1480
1481  if (!TLI)
1482    return Intrinsic::not_intrinsic;
1483
1484  LibFunc::Func Func;
1485  Function *F = CI->getCalledFunction();
1486  // We're going to make assumptions on the semantics of the functions, check
1487  // that the target knows that it's available in this environment.
1488  if (!F || !TLI->getLibFunc(F->getName(), Func))
1489    return Intrinsic::not_intrinsic;
1490
1491  // Otherwise check if we have a call to a function that can be turned into a
1492  // vector intrinsic.
1493  switch (Func) {
1494  default:
1495    break;
1496  case LibFunc::sin:
1497  case LibFunc::sinf:
1498  case LibFunc::sinl:
1499    return Intrinsic::sin;
1500  case LibFunc::cos:
1501  case LibFunc::cosf:
1502  case LibFunc::cosl:
1503    return Intrinsic::cos;
1504  case LibFunc::exp:
1505  case LibFunc::expf:
1506  case LibFunc::expl:
1507    return Intrinsic::exp;
1508  case LibFunc::exp2:
1509  case LibFunc::exp2f:
1510  case LibFunc::exp2l:
1511    return Intrinsic::exp2;
1512  case LibFunc::log:
1513  case LibFunc::logf:
1514  case LibFunc::logl:
1515    return Intrinsic::log;
1516  case LibFunc::log10:
1517  case LibFunc::log10f:
1518  case LibFunc::log10l:
1519    return Intrinsic::log10;
1520  case LibFunc::log2:
1521  case LibFunc::log2f:
1522  case LibFunc::log2l:
1523    return Intrinsic::log2;
1524  case LibFunc::fabs:
1525  case LibFunc::fabsf:
1526  case LibFunc::fabsl:
1527    return Intrinsic::fabs;
1528  case LibFunc::floor:
1529  case LibFunc::floorf:
1530  case LibFunc::floorl:
1531    return Intrinsic::floor;
1532  case LibFunc::ceil:
1533  case LibFunc::ceilf:
1534  case LibFunc::ceill:
1535    return Intrinsic::ceil;
1536  case LibFunc::trunc:
1537  case LibFunc::truncf:
1538  case LibFunc::truncl:
1539    return Intrinsic::trunc;
1540  case LibFunc::rint:
1541  case LibFunc::rintf:
1542  case LibFunc::rintl:
1543    return Intrinsic::rint;
1544  case LibFunc::nearbyint:
1545  case LibFunc::nearbyintf:
1546  case LibFunc::nearbyintl:
1547    return Intrinsic::nearbyint;
1548  case LibFunc::pow:
1549  case LibFunc::powf:
1550  case LibFunc::powl:
1551    return Intrinsic::pow;
1552  }
1553
1554  return Intrinsic::not_intrinsic;
1555}
1556
1557/// This function translates the reduction kind to an LLVM binary operator.
1558static Instruction::BinaryOps
1559getReductionBinOp(LoopVectorizationLegality::ReductionKind Kind) {
1560  switch (Kind) {
1561    case LoopVectorizationLegality::RK_IntegerAdd:
1562      return Instruction::Add;
1563    case LoopVectorizationLegality::RK_IntegerMult:
1564      return Instruction::Mul;
1565    case LoopVectorizationLegality::RK_IntegerOr:
1566      return Instruction::Or;
1567    case LoopVectorizationLegality::RK_IntegerAnd:
1568      return Instruction::And;
1569    case LoopVectorizationLegality::RK_IntegerXor:
1570      return Instruction::Xor;
1571    case LoopVectorizationLegality::RK_FloatMult:
1572      return Instruction::FMul;
1573    case LoopVectorizationLegality::RK_FloatAdd:
1574      return Instruction::FAdd;
1575    default:
1576      llvm_unreachable("Unknown reduction operation");
1577  }
1578}
1579
1580void
1581InnerLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) {
1582  //===------------------------------------------------===//
1583  //
1584  // Notice: any optimization or new instruction that go
1585  // into the code below should be also be implemented in
1586  // the cost-model.
1587  //
1588  //===------------------------------------------------===//
1589  Constant *Zero = Builder.getInt32(0);
1590
1591  // In order to support reduction variables we need to be able to vectorize
1592  // Phi nodes. Phi nodes have cycles, so we need to vectorize them in two
1593  // stages. First, we create a new vector PHI node with no incoming edges.
1594  // We use this value when we vectorize all of the instructions that use the
1595  // PHI. Next, after all of the instructions in the block are complete we
1596  // add the new incoming edges to the PHI. At this point all of the
1597  // instructions in the basic block are vectorized, so we can use them to
1598  // construct the PHI.
1599  PhiVector RdxPHIsToFix;
1600
1601  // Scan the loop in a topological order to ensure that defs are vectorized
1602  // before users.
1603  LoopBlocksDFS DFS(OrigLoop);
1604  DFS.perform(LI);
1605
1606  // Vectorize all of the blocks in the original loop.
1607  for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(),
1608       be = DFS.endRPO(); bb != be; ++bb)
1609    vectorizeBlockInLoop(Legal, *bb, &RdxPHIsToFix);
1610
1611  // At this point every instruction in the original loop is widened to
1612  // a vector form. We are almost done. Now, we need to fix the PHI nodes
1613  // that we vectorized. The PHI nodes are currently empty because we did
1614  // not want to introduce cycles. Notice that the remaining PHI nodes
1615  // that we need to fix are reduction variables.
1616
1617  // Create the 'reduced' values for each of the induction vars.
1618  // The reduced values are the vector values that we scalarize and combine
1619  // after the loop is finished.
1620  for (PhiVector::iterator it = RdxPHIsToFix.begin(), e = RdxPHIsToFix.end();
1621       it != e; ++it) {
1622    PHINode *RdxPhi = *it;
1623    assert(RdxPhi && "Unable to recover vectorized PHI");
1624
1625    // Find the reduction variable descriptor.
1626    assert(Legal->getReductionVars()->count(RdxPhi) &&
1627           "Unable to find the reduction variable");
1628    LoopVectorizationLegality::ReductionDescriptor RdxDesc =
1629    (*Legal->getReductionVars())[RdxPhi];
1630
1631    // We need to generate a reduction vector from the incoming scalar.
1632    // To do so, we need to generate the 'identity' vector and overide
1633    // one of the elements with the incoming scalar reduction. We need
1634    // to do it in the vector-loop preheader.
1635    Builder.SetInsertPoint(LoopBypassBlocks.back()->getTerminator());
1636
1637    // This is the vector-clone of the value that leaves the loop.
1638    VectorParts &VectorExit = getVectorValue(RdxDesc.LoopExitInstr);
1639    Type *VecTy = VectorExit[0]->getType();
1640
1641    // Find the reduction identity variable. Zero for addition, or, xor,
1642    // one for multiplication, -1 for And.
1643    Constant *Iden = getReductionIdentity(RdxDesc.Kind, VecTy->getScalarType());
1644    Constant *Identity = ConstantVector::getSplat(VF, Iden);
1645
1646    // This vector is the Identity vector where the first element is the
1647    // incoming scalar reduction.
1648    Value *VectorStart = Builder.CreateInsertElement(Identity,
1649                                                     RdxDesc.StartValue, Zero);
1650
1651    // Fix the vector-loop phi.
1652    // We created the induction variable so we know that the
1653    // preheader is the first entry.
1654    BasicBlock *VecPreheader = Induction->getIncomingBlock(0);
1655
1656    // Reductions do not have to start at zero. They can start with
1657    // any loop invariant values.
1658    VectorParts &VecRdxPhi = WidenMap.get(RdxPhi);
1659    BasicBlock *Latch = OrigLoop->getLoopLatch();
1660    Value *LoopVal = RdxPhi->getIncomingValueForBlock(Latch);
1661    VectorParts &Val = getVectorValue(LoopVal);
1662    for (unsigned part = 0; part < UF; ++part) {
1663      // Make sure to add the reduction stat value only to the
1664      // first unroll part.
1665      Value *StartVal = (part == 0) ? VectorStart : Identity;
1666      cast<PHINode>(VecRdxPhi[part])->addIncoming(StartVal, VecPreheader);
1667      cast<PHINode>(VecRdxPhi[part])->addIncoming(Val[part], LoopVectorBody);
1668    }
1669
1670    // Before each round, move the insertion point right between
1671    // the PHIs and the values we are going to write.
1672    // This allows us to write both PHINodes and the extractelement
1673    // instructions.
1674    Builder.SetInsertPoint(LoopMiddleBlock->getFirstInsertionPt());
1675
1676    VectorParts RdxParts;
1677    for (unsigned part = 0; part < UF; ++part) {
1678      // This PHINode contains the vectorized reduction variable, or
1679      // the initial value vector, if we bypass the vector loop.
1680      VectorParts &RdxExitVal = getVectorValue(RdxDesc.LoopExitInstr);
1681      PHINode *NewPhi = Builder.CreatePHI(VecTy, 2, "rdx.vec.exit.phi");
1682      Value *StartVal = (part == 0) ? VectorStart : Identity;
1683      for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
1684        NewPhi->addIncoming(StartVal, LoopBypassBlocks[I]);
1685      NewPhi->addIncoming(RdxExitVal[part], LoopVectorBody);
1686      RdxParts.push_back(NewPhi);
1687    }
1688
1689    // Reduce all of the unrolled parts into a single vector.
1690    Value *ReducedPartRdx = RdxParts[0];
1691    for (unsigned part = 1; part < UF; ++part) {
1692      Instruction::BinaryOps Op = getReductionBinOp(RdxDesc.Kind);
1693      ReducedPartRdx = Builder.CreateBinOp(Op, RdxParts[part], ReducedPartRdx,
1694                                           "bin.rdx");
1695    }
1696
1697    // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles
1698    // and vector ops, reducing the set of values being computed by half each
1699    // round.
1700    assert(isPowerOf2_32(VF) &&
1701           "Reduction emission only supported for pow2 vectors!");
1702    Value *TmpVec = ReducedPartRdx;
1703    SmallVector<Constant*, 32> ShuffleMask(VF, 0);
1704    for (unsigned i = VF; i != 1; i >>= 1) {
1705      // Move the upper half of the vector to the lower half.
1706      for (unsigned j = 0; j != i/2; ++j)
1707        ShuffleMask[j] = Builder.getInt32(i/2 + j);
1708
1709      // Fill the rest of the mask with undef.
1710      std::fill(&ShuffleMask[i/2], ShuffleMask.end(),
1711                UndefValue::get(Builder.getInt32Ty()));
1712
1713      Value *Shuf =
1714        Builder.CreateShuffleVector(TmpVec,
1715                                    UndefValue::get(TmpVec->getType()),
1716                                    ConstantVector::get(ShuffleMask),
1717                                    "rdx.shuf");
1718
1719      Instruction::BinaryOps Op = getReductionBinOp(RdxDesc.Kind);
1720      TmpVec = Builder.CreateBinOp(Op, TmpVec, Shuf, "bin.rdx");
1721    }
1722
1723    // The result is in the first element of the vector.
1724    Value *Scalar0 = Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
1725
1726    // Now, we need to fix the users of the reduction variable
1727    // inside and outside of the scalar remainder loop.
1728    // We know that the loop is in LCSSA form. We need to update the
1729    // PHI nodes in the exit blocks.
1730    for (BasicBlock::iterator LEI = LoopExitBlock->begin(),
1731         LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) {
1732      PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI);
1733      if (!LCSSAPhi) continue;
1734
1735      // All PHINodes need to have a single entry edge, or two if
1736      // we already fixed them.
1737      assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
1738
1739      // We found our reduction value exit-PHI. Update it with the
1740      // incoming bypass edge.
1741      if (LCSSAPhi->getIncomingValue(0) == RdxDesc.LoopExitInstr) {
1742        // Add an edge coming from the bypass.
1743        LCSSAPhi->addIncoming(Scalar0, LoopMiddleBlock);
1744        break;
1745      }
1746    }// end of the LCSSA phi scan.
1747
1748    // Fix the scalar loop reduction variable with the incoming reduction sum
1749    // from the vector body and from the backedge value.
1750    int IncomingEdgeBlockIdx =
1751    (RdxPhi)->getBasicBlockIndex(OrigLoop->getLoopLatch());
1752    assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
1753    // Pick the other block.
1754    int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
1755    (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, Scalar0);
1756    (RdxPhi)->setIncomingValue(IncomingEdgeBlockIdx, RdxDesc.LoopExitInstr);
1757  }// end of for each redux variable.
1758
1759  // The Loop exit block may have single value PHI nodes where the incoming
1760  // value is 'undef'. While vectorizing we only handled real values that
1761  // were defined inside the loop. Here we handle the 'undef case'.
1762  // See PR14725.
1763  for (BasicBlock::iterator LEI = LoopExitBlock->begin(),
1764       LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) {
1765    PHINode *LCSSAPhi = dyn_cast<PHINode>(LEI);
1766    if (!LCSSAPhi) continue;
1767    if (LCSSAPhi->getNumIncomingValues() == 1)
1768      LCSSAPhi->addIncoming(UndefValue::get(LCSSAPhi->getType()),
1769                            LoopMiddleBlock);
1770  }
1771}
1772
1773InnerLoopVectorizer::VectorParts
1774InnerLoopVectorizer::createEdgeMask(BasicBlock *Src, BasicBlock *Dst) {
1775  assert(std::find(pred_begin(Dst), pred_end(Dst), Src) != pred_end(Dst) &&
1776         "Invalid edge");
1777
1778  VectorParts SrcMask = createBlockInMask(Src);
1779
1780  // The terminator has to be a branch inst!
1781  BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
1782  assert(BI && "Unexpected terminator found");
1783
1784  if (BI->isConditional()) {
1785    VectorParts EdgeMask = getVectorValue(BI->getCondition());
1786
1787    if (BI->getSuccessor(0) != Dst)
1788      for (unsigned part = 0; part < UF; ++part)
1789        EdgeMask[part] = Builder.CreateNot(EdgeMask[part]);
1790
1791    for (unsigned part = 0; part < UF; ++part)
1792      EdgeMask[part] = Builder.CreateAnd(EdgeMask[part], SrcMask[part]);
1793    return EdgeMask;
1794  }
1795
1796  return SrcMask;
1797}
1798
1799InnerLoopVectorizer::VectorParts
1800InnerLoopVectorizer::createBlockInMask(BasicBlock *BB) {
1801  assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
1802
1803  // Loop incoming mask is all-one.
1804  if (OrigLoop->getHeader() == BB) {
1805    Value *C = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 1);
1806    return getVectorValue(C);
1807  }
1808
1809  // This is the block mask. We OR all incoming edges, and with zero.
1810  Value *Zero = ConstantInt::get(IntegerType::getInt1Ty(BB->getContext()), 0);
1811  VectorParts BlockMask = getVectorValue(Zero);
1812
1813  // For each pred:
1814  for (pred_iterator it = pred_begin(BB), e = pred_end(BB); it != e; ++it) {
1815    VectorParts EM = createEdgeMask(*it, BB);
1816    for (unsigned part = 0; part < UF; ++part)
1817      BlockMask[part] = Builder.CreateOr(BlockMask[part], EM[part]);
1818  }
1819
1820  return BlockMask;
1821}
1822
1823void
1824InnerLoopVectorizer::vectorizeBlockInLoop(LoopVectorizationLegality *Legal,
1825                                          BasicBlock *BB, PhiVector *PV) {
1826  // For each instruction in the old loop.
1827  for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
1828    VectorParts &Entry = WidenMap.get(it);
1829    switch (it->getOpcode()) {
1830    case Instruction::Br:
1831      // Nothing to do for PHIs and BR, since we already took care of the
1832      // loop control flow instructions.
1833      continue;
1834    case Instruction::PHI:{
1835      PHINode* P = cast<PHINode>(it);
1836      // Handle reduction variables:
1837      if (Legal->getReductionVars()->count(P)) {
1838        for (unsigned part = 0; part < UF; ++part) {
1839          // This is phase one of vectorizing PHIs.
1840          Type *VecTy = VectorType::get(it->getType(), VF);
1841          Entry[part] = PHINode::Create(VecTy, 2, "vec.phi",
1842                                        LoopVectorBody-> getFirstInsertionPt());
1843        }
1844        PV->push_back(P);
1845        continue;
1846      }
1847
1848      // Check for PHI nodes that are lowered to vector selects.
1849      if (P->getParent() != OrigLoop->getHeader()) {
1850        // We know that all PHIs in non header blocks are converted into
1851        // selects, so we don't have to worry about the insertion order and we
1852        // can just use the builder.
1853
1854        // At this point we generate the predication tree. There may be
1855        // duplications since this is a simple recursive scan, but future
1856        // optimizations will clean it up.
1857        VectorParts Cond = createEdgeMask(P->getIncomingBlock(0),
1858                                               P->getParent());
1859
1860        for (unsigned part = 0; part < UF; ++part) {
1861        VectorParts &In0 = getVectorValue(P->getIncomingValue(0));
1862        VectorParts &In1 = getVectorValue(P->getIncomingValue(1));
1863          Entry[part] = Builder.CreateSelect(Cond[part], In0[part], In1[part],
1864                                             "predphi");
1865        }
1866        continue;
1867      }
1868
1869      // This PHINode must be an induction variable.
1870      // Make sure that we know about it.
1871      assert(Legal->getInductionVars()->count(P) &&
1872             "Not an induction variable");
1873
1874      LoopVectorizationLegality::InductionInfo II =
1875        Legal->getInductionVars()->lookup(P);
1876
1877      switch (II.IK) {
1878      case LoopVectorizationLegality::IK_NoInduction:
1879        llvm_unreachable("Unknown induction");
1880      case LoopVectorizationLegality::IK_IntInduction: {
1881        assert(P == OldInduction && "Unexpected PHI");
1882        Value *Broadcasted = getBroadcastInstrs(Induction);
1883        // After broadcasting the induction variable we need to make the
1884        // vector consecutive by adding 0, 1, 2 ...
1885        for (unsigned part = 0; part < UF; ++part)
1886          Entry[part] = getConsecutiveVector(Broadcasted, VF * part, false);
1887        continue;
1888      }
1889      case LoopVectorizationLegality::IK_ReverseIntInduction:
1890      case LoopVectorizationLegality::IK_PtrInduction:
1891      case LoopVectorizationLegality::IK_ReversePtrInduction:
1892        // Handle reverse integer and pointer inductions.
1893        Value *StartIdx = 0;
1894        // If we have a single integer induction variable then use it.
1895        // Otherwise, start counting at zero.
1896        if (OldInduction) {
1897          LoopVectorizationLegality::InductionInfo OldII =
1898            Legal->getInductionVars()->lookup(OldInduction);
1899          StartIdx = OldII.StartValue;
1900        } else {
1901          StartIdx = ConstantInt::get(Induction->getType(), 0);
1902        }
1903        // This is the normalized GEP that starts counting at zero.
1904        Value *NormalizedIdx = Builder.CreateSub(Induction, StartIdx,
1905                                                 "normalized.idx");
1906
1907        // Handle the reverse integer induction variable case.
1908        if (LoopVectorizationLegality::IK_ReverseIntInduction == II.IK) {
1909          IntegerType *DstTy = cast<IntegerType>(II.StartValue->getType());
1910          Value *CNI = Builder.CreateSExtOrTrunc(NormalizedIdx, DstTy,
1911                                                 "resize.norm.idx");
1912          Value *ReverseInd  = Builder.CreateSub(II.StartValue, CNI,
1913                                                 "reverse.idx");
1914
1915          // This is a new value so do not hoist it out.
1916          Value *Broadcasted = getBroadcastInstrs(ReverseInd);
1917          // After broadcasting the induction variable we need to make the
1918          // vector consecutive by adding  ... -3, -2, -1, 0.
1919          for (unsigned part = 0; part < UF; ++part)
1920            Entry[part] = getConsecutiveVector(Broadcasted, -VF * part, true);
1921          continue;
1922        }
1923
1924        // Handle the pointer induction variable case.
1925        assert(P->getType()->isPointerTy() && "Unexpected type.");
1926
1927        // Is this a reverse induction ptr or a consecutive induction ptr.
1928        bool Reverse = (LoopVectorizationLegality::IK_ReversePtrInduction ==
1929                        II.IK);
1930
1931        // This is the vector of results. Notice that we don't generate
1932        // vector geps because scalar geps result in better code.
1933        for (unsigned part = 0; part < UF; ++part) {
1934          Value *VecVal = UndefValue::get(VectorType::get(P->getType(), VF));
1935          for (unsigned int i = 0; i < VF; ++i) {
1936            int EltIndex = (i + part * VF) * (Reverse ? -1 : 1);
1937            Constant *Idx = ConstantInt::get(Induction->getType(), EltIndex);
1938            Value *GlobalIdx;
1939            if (!Reverse)
1940              GlobalIdx = Builder.CreateAdd(NormalizedIdx, Idx, "gep.idx");
1941            else
1942              GlobalIdx = Builder.CreateSub(Idx, NormalizedIdx, "gep.ridx");
1943
1944            Value *SclrGep = Builder.CreateGEP(II.StartValue, GlobalIdx,
1945                                               "next.gep");
1946            VecVal = Builder.CreateInsertElement(VecVal, SclrGep,
1947                                                 Builder.getInt32(i),
1948                                                 "insert.gep");
1949          }
1950          Entry[part] = VecVal;
1951        }
1952        continue;
1953      }
1954
1955    }// End of PHI.
1956
1957    case Instruction::Add:
1958    case Instruction::FAdd:
1959    case Instruction::Sub:
1960    case Instruction::FSub:
1961    case Instruction::Mul:
1962    case Instruction::FMul:
1963    case Instruction::UDiv:
1964    case Instruction::SDiv:
1965    case Instruction::FDiv:
1966    case Instruction::URem:
1967    case Instruction::SRem:
1968    case Instruction::FRem:
1969    case Instruction::Shl:
1970    case Instruction::LShr:
1971    case Instruction::AShr:
1972    case Instruction::And:
1973    case Instruction::Or:
1974    case Instruction::Xor: {
1975      // Just widen binops.
1976      BinaryOperator *BinOp = dyn_cast<BinaryOperator>(it);
1977      VectorParts &A = getVectorValue(it->getOperand(0));
1978      VectorParts &B = getVectorValue(it->getOperand(1));
1979
1980      // Use this vector value for all users of the original instruction.
1981      for (unsigned Part = 0; Part < UF; ++Part) {
1982        Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A[Part], B[Part]);
1983
1984        // Update the NSW, NUW and Exact flags. Notice: V can be an Undef.
1985        BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V);
1986        if (VecOp && isa<OverflowingBinaryOperator>(BinOp)) {
1987          VecOp->setHasNoSignedWrap(BinOp->hasNoSignedWrap());
1988          VecOp->setHasNoUnsignedWrap(BinOp->hasNoUnsignedWrap());
1989        }
1990        if (VecOp && isa<PossiblyExactOperator>(VecOp))
1991          VecOp->setIsExact(BinOp->isExact());
1992
1993        Entry[Part] = V;
1994      }
1995      break;
1996    }
1997    case Instruction::Select: {
1998      // Widen selects.
1999      // If the selector is loop invariant we can create a select
2000      // instruction with a scalar condition. Otherwise, use vector-select.
2001      bool InvariantCond = SE->isLoopInvariant(SE->getSCEV(it->getOperand(0)),
2002                                               OrigLoop);
2003
2004      // The condition can be loop invariant  but still defined inside the
2005      // loop. This means that we can't just use the original 'cond' value.
2006      // We have to take the 'vectorized' value and pick the first lane.
2007      // Instcombine will make this a no-op.
2008      VectorParts &Cond = getVectorValue(it->getOperand(0));
2009      VectorParts &Op0  = getVectorValue(it->getOperand(1));
2010      VectorParts &Op1  = getVectorValue(it->getOperand(2));
2011      Value *ScalarCond = Builder.CreateExtractElement(Cond[0],
2012                                                       Builder.getInt32(0));
2013      for (unsigned Part = 0; Part < UF; ++Part) {
2014        Entry[Part] = Builder.CreateSelect(
2015          InvariantCond ? ScalarCond : Cond[Part],
2016          Op0[Part],
2017          Op1[Part]);
2018      }
2019      break;
2020    }
2021
2022    case Instruction::ICmp:
2023    case Instruction::FCmp: {
2024      // Widen compares. Generate vector compares.
2025      bool FCmp = (it->getOpcode() == Instruction::FCmp);
2026      CmpInst *Cmp = dyn_cast<CmpInst>(it);
2027      VectorParts &A = getVectorValue(it->getOperand(0));
2028      VectorParts &B = getVectorValue(it->getOperand(1));
2029      for (unsigned Part = 0; Part < UF; ++Part) {
2030        Value *C = 0;
2031        if (FCmp)
2032          C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]);
2033        else
2034          C = Builder.CreateICmp(Cmp->getPredicate(), A[Part], B[Part]);
2035        Entry[Part] = C;
2036      }
2037      break;
2038    }
2039
2040    case Instruction::Store:
2041    case Instruction::Load:
2042        vectorizeMemoryInstruction(it, Legal);
2043        break;
2044    case Instruction::ZExt:
2045    case Instruction::SExt:
2046    case Instruction::FPToUI:
2047    case Instruction::FPToSI:
2048    case Instruction::FPExt:
2049    case Instruction::PtrToInt:
2050    case Instruction::IntToPtr:
2051    case Instruction::SIToFP:
2052    case Instruction::UIToFP:
2053    case Instruction::Trunc:
2054    case Instruction::FPTrunc:
2055    case Instruction::BitCast: {
2056      CastInst *CI = dyn_cast<CastInst>(it);
2057      /// Optimize the special case where the source is the induction
2058      /// variable. Notice that we can only optimize the 'trunc' case
2059      /// because: a. FP conversions lose precision, b. sext/zext may wrap,
2060      /// c. other casts depend on pointer size.
2061      if (CI->getOperand(0) == OldInduction &&
2062          it->getOpcode() == Instruction::Trunc) {
2063        Value *ScalarCast = Builder.CreateCast(CI->getOpcode(), Induction,
2064                                               CI->getType());
2065        Value *Broadcasted = getBroadcastInstrs(ScalarCast);
2066        for (unsigned Part = 0; Part < UF; ++Part)
2067          Entry[Part] = getConsecutiveVector(Broadcasted, VF * Part, false);
2068        break;
2069      }
2070      /// Vectorize casts.
2071      Type *DestTy = VectorType::get(CI->getType()->getScalarType(), VF);
2072
2073      VectorParts &A = getVectorValue(it->getOperand(0));
2074      for (unsigned Part = 0; Part < UF; ++Part)
2075        Entry[Part] = Builder.CreateCast(CI->getOpcode(), A[Part], DestTy);
2076      break;
2077    }
2078
2079    case Instruction::Call: {
2080      Module *M = BB->getParent()->getParent();
2081      CallInst *CI = cast<CallInst>(it);
2082      Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
2083      assert(ID && "Not an intrinsic call!");
2084      for (unsigned Part = 0; Part < UF; ++Part) {
2085        SmallVector<Value*, 4> Args;
2086        for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
2087          VectorParts &Arg = getVectorValue(CI->getArgOperand(i));
2088          Args.push_back(Arg[Part]);
2089        }
2090        Type *Tys[] = { VectorType::get(CI->getType()->getScalarType(), VF) };
2091        Function *F = Intrinsic::getDeclaration(M, ID, Tys);
2092        Entry[Part] = Builder.CreateCall(F, Args);
2093      }
2094      break;
2095    }
2096
2097    default:
2098      // All other instructions are unsupported. Scalarize them.
2099      scalarizeInstruction(it);
2100      break;
2101    }// end of switch.
2102  }// end of for_each instr.
2103}
2104
2105void InnerLoopVectorizer::updateAnalysis() {
2106  // Forget the original basic block.
2107  SE->forgetLoop(OrigLoop);
2108
2109  // Update the dominator tree information.
2110  assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
2111         "Entry does not dominate exit.");
2112
2113  for (unsigned I = 1, E = LoopBypassBlocks.size(); I != E; ++I)
2114    DT->addNewBlock(LoopBypassBlocks[I], LoopBypassBlocks[I-1]);
2115  DT->addNewBlock(LoopVectorPreHeader, LoopBypassBlocks.back());
2116  DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader);
2117  DT->addNewBlock(LoopMiddleBlock, LoopBypassBlocks.front());
2118  DT->addNewBlock(LoopScalarPreHeader, LoopMiddleBlock);
2119  DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
2120  DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
2121
2122  DEBUG(DT->verifyAnalysis());
2123}
2124
2125bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
2126  if (!EnableIfConversion)
2127    return false;
2128
2129  assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
2130  std::vector<BasicBlock*> &LoopBlocks = TheLoop->getBlocksVector();
2131
2132  // Collect the blocks that need predication.
2133  for (unsigned i = 0, e = LoopBlocks.size(); i < e; ++i) {
2134    BasicBlock *BB = LoopBlocks[i];
2135
2136    // We don't support switch statements inside loops.
2137    if (!isa<BranchInst>(BB->getTerminator()))
2138      return false;
2139
2140    // We must have at most two predecessors because we need to convert
2141    // all PHIs to selects.
2142    unsigned Preds = std::distance(pred_begin(BB), pred_end(BB));
2143    if (Preds > 2)
2144      return false;
2145
2146    // We must be able to predicate all blocks that need to be predicated.
2147    if (blockNeedsPredication(BB) && !blockCanBePredicated(BB))
2148      return false;
2149  }
2150
2151  // We can if-convert this loop.
2152  return true;
2153}
2154
2155bool LoopVectorizationLegality::canVectorize() {
2156  assert(TheLoop->getLoopPreheader() && "No preheader!!");
2157
2158  // We can only vectorize innermost loops.
2159  if (TheLoop->getSubLoopsVector().size())
2160    return false;
2161
2162  // We must have a single backedge.
2163  if (TheLoop->getNumBackEdges() != 1)
2164    return false;
2165
2166  // We must have a single exiting block.
2167  if (!TheLoop->getExitingBlock())
2168    return false;
2169
2170  unsigned NumBlocks = TheLoop->getNumBlocks();
2171
2172  // Check if we can if-convert non single-bb loops.
2173  if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
2174    DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
2175    return false;
2176  }
2177
2178  // We need to have a loop header.
2179  BasicBlock *Latch = TheLoop->getLoopLatch();
2180  DEBUG(dbgs() << "LV: Found a loop: " <<
2181        TheLoop->getHeader()->getName() << "\n");
2182
2183  // ScalarEvolution needs to be able to find the exit count.
2184  const SCEV *ExitCount = SE->getExitCount(TheLoop, Latch);
2185  if (ExitCount == SE->getCouldNotCompute()) {
2186    DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n");
2187    return false;
2188  }
2189
2190  // Do not loop-vectorize loops with a tiny trip count.
2191  unsigned TC = SE->getSmallConstantTripCount(TheLoop, Latch);
2192  if (TC > 0u && TC < TinyTripCountVectorThreshold) {
2193    DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " <<
2194          "This loop is not worth vectorizing.\n");
2195    return false;
2196  }
2197
2198  // Check if we can vectorize the instructions and CFG in this loop.
2199  if (!canVectorizeInstrs()) {
2200    DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
2201    return false;
2202  }
2203
2204  // Go over each instruction and look at memory deps.
2205  if (!canVectorizeMemory()) {
2206    DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
2207    return false;
2208  }
2209
2210  // Collect all of the variables that remain uniform after vectorization.
2211  collectLoopUniforms();
2212
2213  DEBUG(dbgs() << "LV: We can vectorize this loop" <<
2214        (PtrRtCheck.Need ? " (with a runtime bound check)" : "")
2215        <<"!\n");
2216
2217  // Okay! We can vectorize. At this point we don't have any other mem analysis
2218  // which may limit our maximum vectorization factor, so just return true with
2219  // no restrictions.
2220  return true;
2221}
2222
2223bool LoopVectorizationLegality::canVectorizeInstrs() {
2224  BasicBlock *PreHeader = TheLoop->getLoopPreheader();
2225  BasicBlock *Header = TheLoop->getHeader();
2226
2227  // For each block in the loop.
2228  for (Loop::block_iterator bb = TheLoop->block_begin(),
2229       be = TheLoop->block_end(); bb != be; ++bb) {
2230
2231    // Scan the instructions in the block and look for hazards.
2232    for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e;
2233         ++it) {
2234
2235      if (PHINode *Phi = dyn_cast<PHINode>(it)) {
2236        // This should not happen because the loop should be normalized.
2237        if (Phi->getNumIncomingValues() != 2) {
2238          DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
2239          return false;
2240        }
2241
2242        // Check that this PHI type is allowed.
2243        if (!Phi->getType()->isIntegerTy() &&
2244            !Phi->getType()->isFloatingPointTy() &&
2245            !Phi->getType()->isPointerTy()) {
2246          DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
2247          return false;
2248        }
2249
2250        // If this PHINode is not in the header block, then we know that we
2251        // can convert it to select during if-conversion. No need to check if
2252        // the PHIs in this block are induction or reduction variables.
2253        if (*bb != Header)
2254          continue;
2255
2256        // This is the value coming from the preheader.
2257        Value *StartValue = Phi->getIncomingValueForBlock(PreHeader);
2258        // Check if this is an induction variable.
2259        InductionKind IK = isInductionVariable(Phi);
2260
2261        if (IK_NoInduction != IK) {
2262          // Int inductions are special because we only allow one IV.
2263          if (IK == IK_IntInduction) {
2264            if (Induction) {
2265              DEBUG(dbgs() << "LV: Found too many inductions."<< *Phi <<"\n");
2266              return false;
2267            }
2268            Induction = Phi;
2269          }
2270
2271          DEBUG(dbgs() << "LV: Found an induction variable.\n");
2272          Inductions[Phi] = InductionInfo(StartValue, IK);
2273          continue;
2274        }
2275
2276        if (AddReductionVar(Phi, RK_IntegerAdd)) {
2277          DEBUG(dbgs() << "LV: Found an ADD reduction PHI."<< *Phi <<"\n");
2278          continue;
2279        }
2280        if (AddReductionVar(Phi, RK_IntegerMult)) {
2281          DEBUG(dbgs() << "LV: Found a MUL reduction PHI."<< *Phi <<"\n");
2282          continue;
2283        }
2284        if (AddReductionVar(Phi, RK_IntegerOr)) {
2285          DEBUG(dbgs() << "LV: Found an OR reduction PHI."<< *Phi <<"\n");
2286          continue;
2287        }
2288        if (AddReductionVar(Phi, RK_IntegerAnd)) {
2289          DEBUG(dbgs() << "LV: Found an AND reduction PHI."<< *Phi <<"\n");
2290          continue;
2291        }
2292        if (AddReductionVar(Phi, RK_IntegerXor)) {
2293          DEBUG(dbgs() << "LV: Found a XOR reduction PHI."<< *Phi <<"\n");
2294          continue;
2295        }
2296        if (AddReductionVar(Phi, RK_FloatMult)) {
2297          DEBUG(dbgs() << "LV: Found an FMult reduction PHI."<< *Phi <<"\n");
2298          continue;
2299        }
2300        if (AddReductionVar(Phi, RK_FloatAdd)) {
2301          DEBUG(dbgs() << "LV: Found an FAdd reduction PHI."<< *Phi <<"\n");
2302          continue;
2303        }
2304
2305        DEBUG(dbgs() << "LV: Found an unidentified PHI."<< *Phi <<"\n");
2306        return false;
2307      }// end of PHI handling
2308
2309      // We still don't handle functions.
2310      CallInst *CI = dyn_cast<CallInst>(it);
2311      if (CI && !getIntrinsicIDForCall(CI, TLI)) {
2312        DEBUG(dbgs() << "LV: Found a call site.\n");
2313        return false;
2314      }
2315
2316      // Check that the instruction return type is vectorizable.
2317      if (!VectorType::isValidElementType(it->getType()) &&
2318          !it->getType()->isVoidTy()) {
2319        DEBUG(dbgs() << "LV: Found unvectorizable type." << "\n");
2320        return false;
2321      }
2322
2323      // Check that the stored type is vectorizable.
2324      if (StoreInst *ST = dyn_cast<StoreInst>(it)) {
2325        Type *T = ST->getValueOperand()->getType();
2326        if (!VectorType::isValidElementType(T))
2327          return false;
2328      }
2329
2330      // Reduction instructions are allowed to have exit users.
2331      // All other instructions must not have external users.
2332      if (!AllowedExit.count(it))
2333        //Check that all of the users of the loop are inside the BB.
2334        for (Value::use_iterator I = it->use_begin(), E = it->use_end();
2335             I != E; ++I) {
2336          Instruction *U = cast<Instruction>(*I);
2337          // This user may be a reduction exit value.
2338          if (!TheLoop->contains(U)) {
2339            DEBUG(dbgs() << "LV: Found an outside user for : "<< *U << "\n");
2340            return false;
2341          }
2342        }
2343    } // next instr.
2344
2345  }
2346
2347  if (!Induction) {
2348    DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
2349    assert(getInductionVars()->size() && "No induction variables");
2350  }
2351
2352  return true;
2353}
2354
2355void LoopVectorizationLegality::collectLoopUniforms() {
2356  // We now know that the loop is vectorizable!
2357  // Collect variables that will remain uniform after vectorization.
2358  std::vector<Value*> Worklist;
2359  BasicBlock *Latch = TheLoop->getLoopLatch();
2360
2361  // Start with the conditional branch and walk up the block.
2362  Worklist.push_back(Latch->getTerminator()->getOperand(0));
2363
2364  while (Worklist.size()) {
2365    Instruction *I = dyn_cast<Instruction>(Worklist.back());
2366    Worklist.pop_back();
2367
2368    // Look at instructions inside this loop.
2369    // Stop when reaching PHI nodes.
2370    // TODO: we need to follow values all over the loop, not only in this block.
2371    if (!I || !TheLoop->contains(I) || isa<PHINode>(I))
2372      continue;
2373
2374    // This is a known uniform.
2375    Uniforms.insert(I);
2376
2377    // Insert all operands.
2378    for (int i = 0, Op = I->getNumOperands(); i < Op; ++i) {
2379      Worklist.push_back(I->getOperand(i));
2380    }
2381  }
2382}
2383
2384AliasAnalysis::Location
2385LoopVectorizationLegality::getLoadStoreLocation(Instruction *Inst) {
2386  if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
2387    return AA->getLocation(Store);
2388  else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
2389    return AA->getLocation(Load);
2390
2391  llvm_unreachable("Should be either load or store instruction");
2392}
2393
2394bool
2395LoopVectorizationLegality::hasPossibleGlobalWriteReorder(
2396                                                Value *Object,
2397                                                Instruction *Inst,
2398                                                AliasMultiMap& WriteObjects,
2399                                                unsigned MaxByteWidth) {
2400
2401  AliasAnalysis::Location ThisLoc = getLoadStoreLocation(Inst);
2402
2403  std::vector<Instruction*>::iterator
2404              it = WriteObjects[Object].begin(),
2405              end = WriteObjects[Object].end();
2406
2407  for (; it != end; ++it) {
2408    Instruction* I = *it;
2409    if (I == Inst)
2410      continue;
2411
2412    AliasAnalysis::Location ThatLoc = getLoadStoreLocation(I);
2413    if (AA->alias(ThisLoc.getWithNewSize(MaxByteWidth),
2414                  ThatLoc.getWithNewSize(MaxByteWidth)))
2415      return true;
2416  }
2417  return false;
2418}
2419
2420bool LoopVectorizationLegality::canVectorizeMemory() {
2421
2422  if (TheLoop->isAnnotatedParallel()) {
2423    DEBUG(dbgs()
2424          << "LV: A loop annotated parallel, ignore memory dependency "
2425          << "checks.\n");
2426    return true;
2427  }
2428
2429  typedef SmallVector<Value*, 16> ValueVector;
2430  typedef SmallPtrSet<Value*, 16> ValueSet;
2431  // Holds the Load and Store *instructions*.
2432  ValueVector Loads;
2433  ValueVector Stores;
2434  PtrRtCheck.Pointers.clear();
2435  PtrRtCheck.Need = false;
2436
2437  // For each block.
2438  for (Loop::block_iterator bb = TheLoop->block_begin(),
2439       be = TheLoop->block_end(); bb != be; ++bb) {
2440
2441    // Scan the BB and collect legal loads and stores.
2442    for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e;
2443         ++it) {
2444
2445      // If this is a load, save it. If this instruction can read from memory
2446      // but is not a load, then we quit. Notice that we don't handle function
2447      // calls that read or write.
2448      if (it->mayReadFromMemory()) {
2449        LoadInst *Ld = dyn_cast<LoadInst>(it);
2450        if (!Ld) return false;
2451        if (!Ld->isSimple()) {
2452          DEBUG(dbgs() << "LV: Found a non-simple load.\n");
2453          return false;
2454        }
2455        Loads.push_back(Ld);
2456        continue;
2457      }
2458
2459      // Save 'store' instructions. Abort if other instructions write to memory.
2460      if (it->mayWriteToMemory()) {
2461        StoreInst *St = dyn_cast<StoreInst>(it);
2462        if (!St) return false;
2463        if (!St->isSimple()) {
2464          DEBUG(dbgs() << "LV: Found a non-simple store.\n");
2465          return false;
2466        }
2467        Stores.push_back(St);
2468      }
2469    } // next instr.
2470  } // next block.
2471
2472  // Now we have two lists that hold the loads and the stores.
2473  // Next, we find the pointers that they use.
2474
2475  // Check if we see any stores. If there are no stores, then we don't
2476  // care if the pointers are *restrict*.
2477  if (!Stores.size()) {
2478    DEBUG(dbgs() << "LV: Found a read-only loop!\n");
2479    return true;
2480  }
2481
2482  // Holds the read and read-write *pointers* that we find. These maps hold
2483  // unique values for pointers (so no need for multi-map).
2484  AliasMap Reads;
2485  AliasMap ReadWrites;
2486
2487  // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
2488  // multiple times on the same object. If the ptr is accessed twice, once
2489  // for read and once for write, it will only appear once (on the write
2490  // list). This is okay, since we are going to check for conflicts between
2491  // writes and between reads and writes, but not between reads and reads.
2492  ValueSet Seen;
2493
2494  ValueVector::iterator I, IE;
2495  for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) {
2496    StoreInst *ST = cast<StoreInst>(*I);
2497    Value* Ptr = ST->getPointerOperand();
2498
2499    if (isUniform(Ptr)) {
2500      DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
2501      return false;
2502    }
2503
2504    // If we did *not* see this pointer before, insert it to
2505    // the read-write list. At this phase it is only a 'write' list.
2506    if (Seen.insert(Ptr))
2507      ReadWrites.insert(std::make_pair(Ptr, ST));
2508  }
2509
2510  for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) {
2511    LoadInst *LD = cast<LoadInst>(*I);
2512    Value* Ptr = LD->getPointerOperand();
2513    // If we did *not* see this pointer before, insert it to the
2514    // read list. If we *did* see it before, then it is already in
2515    // the read-write list. This allows us to vectorize expressions
2516    // such as A[i] += x;  Because the address of A[i] is a read-write
2517    // pointer. This only works if the index of A[i] is consecutive.
2518    // If the address of i is unknown (for example A[B[i]]) then we may
2519    // read a few words, modify, and write a few words, and some of the
2520    // words may be written to the same address.
2521    if (Seen.insert(Ptr) || 0 == isConsecutivePtr(Ptr))
2522      Reads.insert(std::make_pair(Ptr, LD));
2523  }
2524
2525  // If we write (or read-write) to a single destination and there are no
2526  // other reads in this loop then is it safe to vectorize.
2527  if (ReadWrites.size() == 1 && Reads.size() == 0) {
2528    DEBUG(dbgs() << "LV: Found a write-only loop!\n");
2529    return true;
2530  }
2531
2532  // Find pointers with computable bounds. We are going to use this information
2533  // to place a runtime bound check.
2534  bool CanDoRT = true;
2535  AliasMap::iterator MI, ME;
2536  for (MI = ReadWrites.begin(), ME = ReadWrites.end(); MI != ME; ++MI) {
2537    Value *V = (*MI).first;
2538    if (hasComputableBounds(V)) {
2539      PtrRtCheck.insert(SE, TheLoop, V);
2540      DEBUG(dbgs() << "LV: Found a runtime check ptr:" << *V <<"\n");
2541    } else {
2542      CanDoRT = false;
2543      break;
2544    }
2545  }
2546  for (MI = Reads.begin(), ME = Reads.end(); MI != ME; ++MI) {
2547    Value *V = (*MI).first;
2548    if (hasComputableBounds(V)) {
2549      PtrRtCheck.insert(SE, TheLoop, V);
2550      DEBUG(dbgs() << "LV: Found a runtime check ptr:" << *V <<"\n");
2551    } else {
2552      CanDoRT = false;
2553      break;
2554    }
2555  }
2556
2557  // Check that we did not collect too many pointers or found a
2558  // unsizeable pointer.
2559  if (!CanDoRT || PtrRtCheck.Pointers.size() > RuntimeMemoryCheckThreshold) {
2560    PtrRtCheck.reset();
2561    CanDoRT = false;
2562  }
2563
2564  if (CanDoRT) {
2565    DEBUG(dbgs() << "LV: We can perform a memory runtime check if needed.\n");
2566  }
2567
2568  bool NeedRTCheck = false;
2569
2570  // Biggest vectorized access possible, vector width * unroll factor.
2571  // TODO: We're being very pessimistic here, find a way to know the
2572  // real access width before getting here.
2573  unsigned MaxByteWidth = (TTI->getRegisterBitWidth(true) / 8) *
2574                           TTI->getMaximumUnrollFactor();
2575  // Now that the pointers are in two lists (Reads and ReadWrites), we
2576  // can check that there are no conflicts between each of the writes and
2577  // between the writes to the reads.
2578  // Note that WriteObjects duplicates the stores (indexed now by underlying
2579  // objects) to avoid pointing to elements inside ReadWrites.
2580  // TODO: Maybe create a new type where they can interact without duplication.
2581  AliasMultiMap WriteObjects;
2582  ValueVector TempObjects;
2583
2584  // Check that the read-writes do not conflict with other read-write
2585  // pointers.
2586  bool AllWritesIdentified = true;
2587  for (MI = ReadWrites.begin(), ME = ReadWrites.end(); MI != ME; ++MI) {
2588    Value *Val = (*MI).first;
2589    Instruction *Inst = (*MI).second;
2590
2591    GetUnderlyingObjects(Val, TempObjects, DL);
2592    for (ValueVector::iterator UI=TempObjects.begin(), UE=TempObjects.end();
2593         UI != UE; ++UI) {
2594      if (!isIdentifiedObject(*UI)) {
2595        DEBUG(dbgs() << "LV: Found an unidentified write ptr:"<< **UI <<"\n");
2596        NeedRTCheck = true;
2597        AllWritesIdentified = false;
2598      }
2599
2600      // Never seen it before, can't alias.
2601      if (WriteObjects[*UI].empty()) {
2602        DEBUG(dbgs() << "LV: Adding Underlying value:" << **UI <<"\n");
2603        WriteObjects[*UI].push_back(Inst);
2604        continue;
2605      }
2606      // Direct alias found.
2607      if (!AA || dyn_cast<GlobalValue>(*UI) == NULL) {
2608        DEBUG(dbgs() << "LV: Found a possible write-write reorder:"
2609              << **UI <<"\n");
2610        return false;
2611      }
2612      DEBUG(dbgs() << "LV: Found a conflicting global value:"
2613            << **UI <<"\n");
2614      DEBUG(dbgs() << "LV: While examining store:" << *Inst <<"\n");
2615      DEBUG(dbgs() << "LV: On value:" << *Val <<"\n");
2616
2617      // If global alias, make sure they do alias.
2618      if (hasPossibleGlobalWriteReorder(*UI,
2619                                        Inst,
2620                                        WriteObjects,
2621                                        MaxByteWidth)) {
2622        DEBUG(dbgs() << "LV: Found a possible write-write reorder:"
2623              << *UI <<"\n");
2624        return false;
2625      }
2626
2627      // Didn't alias, insert into map for further reference.
2628      WriteObjects[*UI].push_back(Inst);
2629    }
2630    TempObjects.clear();
2631  }
2632
2633  /// Check that the reads don't conflict with the read-writes.
2634  for (MI = Reads.begin(), ME = Reads.end(); MI != ME; ++MI) {
2635    Value *Val = (*MI).first;
2636    GetUnderlyingObjects(Val, TempObjects, DL);
2637    for (ValueVector::iterator UI=TempObjects.begin(), UE=TempObjects.end();
2638         UI != UE; ++UI) {
2639      // If all of the writes are identified then we don't care if the read
2640      // pointer is identified or not.
2641      if (!AllWritesIdentified && !isIdentifiedObject(*UI)) {
2642        DEBUG(dbgs() << "LV: Found an unidentified read ptr:"<< **UI <<"\n");
2643        NeedRTCheck = true;
2644      }
2645
2646      // Never seen it before, can't alias.
2647      if (WriteObjects[*UI].empty())
2648        continue;
2649      // Direct alias found.
2650      if (!AA || dyn_cast<GlobalValue>(*UI) == NULL) {
2651        DEBUG(dbgs() << "LV: Found a possible write-write reorder:"
2652              << **UI <<"\n");
2653        return false;
2654      }
2655      DEBUG(dbgs() << "LV: Found a global value:  "
2656            << **UI <<"\n");
2657      Instruction *Inst = (*MI).second;
2658      DEBUG(dbgs() << "LV: While examining load:" << *Inst <<"\n");
2659      DEBUG(dbgs() << "LV: On value:" << *Val <<"\n");
2660
2661      // If global alias, make sure they do alias.
2662      if (hasPossibleGlobalWriteReorder(*UI,
2663                                        Inst,
2664                                        WriteObjects,
2665                                        MaxByteWidth)) {
2666        DEBUG(dbgs() << "LV: Found a possible read-write reorder:"
2667              << *UI <<"\n");
2668        return false;
2669      }
2670    }
2671    TempObjects.clear();
2672  }
2673
2674  PtrRtCheck.Need = NeedRTCheck;
2675  if (NeedRTCheck && !CanDoRT) {
2676    DEBUG(dbgs() << "LV: We can't vectorize because we can't find " <<
2677          "the array bounds.\n");
2678    PtrRtCheck.reset();
2679    return false;
2680  }
2681
2682  DEBUG(dbgs() << "LV: We "<< (NeedRTCheck ? "" : "don't") <<
2683        " need a runtime memory check.\n");
2684  return true;
2685}
2686
2687bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
2688                                                ReductionKind Kind) {
2689  if (Phi->getNumIncomingValues() != 2)
2690    return false;
2691
2692  // Reduction variables are only found in the loop header block.
2693  if (Phi->getParent() != TheLoop->getHeader())
2694    return false;
2695
2696  // Obtain the reduction start value from the value that comes from the loop
2697  // preheader.
2698  Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader());
2699
2700  // ExitInstruction is the single value which is used outside the loop.
2701  // We only allow for a single reduction value to be used outside the loop.
2702  // This includes users of the reduction, variables (which form a cycle
2703  // which ends in the phi node).
2704  Instruction *ExitInstruction = 0;
2705  // Indicates that we found a binary operation in our scan.
2706  bool FoundBinOp = false;
2707
2708  // Iter is our iterator. We start with the PHI node and scan for all of the
2709  // users of this instruction. All users must be instructions that can be
2710  // used as reduction variables (such as ADD). We may have a single
2711  // out-of-block user. The cycle must end with the original PHI.
2712  Instruction *Iter = Phi;
2713  while (true) {
2714    // If the instruction has no users then this is a broken
2715    // chain and can't be a reduction variable.
2716    if (Iter->use_empty())
2717      return false;
2718
2719    // Did we find a user inside this loop already ?
2720    bool FoundInBlockUser = false;
2721    // Did we reach the initial PHI node already ?
2722    bool FoundStartPHI = false;
2723
2724    // Is this a bin op ?
2725    FoundBinOp |= !isa<PHINode>(Iter);
2726
2727    // Remember the current instruction.
2728    Instruction *OldIter = Iter;
2729
2730    // For each of the *users* of iter.
2731    for (Value::use_iterator it = Iter->use_begin(), e = Iter->use_end();
2732         it != e; ++it) {
2733      Instruction *U = cast<Instruction>(*it);
2734      // We already know that the PHI is a user.
2735      if (U == Phi) {
2736        FoundStartPHI = true;
2737        continue;
2738      }
2739
2740      // Check if we found the exit user.
2741      BasicBlock *Parent = U->getParent();
2742      if (!TheLoop->contains(Parent)) {
2743        // Exit if you find multiple outside users.
2744        if (ExitInstruction != 0)
2745          return false;
2746        ExitInstruction = Iter;
2747      }
2748
2749      // We allow in-loop PHINodes which are not the original reduction PHI
2750      // node. If this PHI is the only user of Iter (happens in IF w/ no ELSE
2751      // structure) then don't skip this PHI.
2752      if (isa<PHINode>(Iter) && isa<PHINode>(U) &&
2753          U->getParent() != TheLoop->getHeader() &&
2754          TheLoop->contains(U) &&
2755          Iter->hasNUsesOrMore(2))
2756        continue;
2757
2758      // We can't have multiple inside users.
2759      if (FoundInBlockUser)
2760        return false;
2761      FoundInBlockUser = true;
2762
2763      // Any reduction instr must be of one of the allowed kinds.
2764      if (!isReductionInstr(U, Kind))
2765        return false;
2766
2767      // Reductions of instructions such as Div, and Sub is only
2768      // possible if the LHS is the reduction variable.
2769      if (!U->isCommutative() && !isa<PHINode>(U) && U->getOperand(0) != Iter)
2770        return false;
2771
2772      Iter = U;
2773    }
2774
2775    // If all uses were skipped this can't be a reduction variable.
2776    if (Iter == OldIter)
2777      return false;
2778
2779    // We found a reduction var if we have reached the original
2780    // phi node and we only have a single instruction with out-of-loop
2781    // users.
2782    if (FoundStartPHI) {
2783      // This instruction is allowed to have out-of-loop users.
2784      AllowedExit.insert(ExitInstruction);
2785
2786      // Save the description of this reduction variable.
2787      ReductionDescriptor RD(RdxStart, ExitInstruction, Kind);
2788      Reductions[Phi] = RD;
2789      // We've ended the cycle. This is a reduction variable if we have an
2790      // outside user and it has a binary op.
2791      return FoundBinOp && ExitInstruction;
2792    }
2793  }
2794}
2795
2796bool
2797LoopVectorizationLegality::isReductionInstr(Instruction *I,
2798                                            ReductionKind Kind) {
2799  bool FP = I->getType()->isFloatingPointTy();
2800  bool FastMath = (FP && I->isCommutative() && I->isAssociative());
2801
2802  switch (I->getOpcode()) {
2803  default:
2804    return false;
2805  case Instruction::PHI:
2806      if (FP && (Kind != RK_FloatMult && Kind != RK_FloatAdd))
2807        return false;
2808    // possibly.
2809    return true;
2810  case Instruction::Sub:
2811  case Instruction::Add:
2812    return Kind == RK_IntegerAdd;
2813  case Instruction::SDiv:
2814  case Instruction::UDiv:
2815  case Instruction::Mul:
2816    return Kind == RK_IntegerMult;
2817  case Instruction::And:
2818    return Kind == RK_IntegerAnd;
2819  case Instruction::Or:
2820    return Kind == RK_IntegerOr;
2821  case Instruction::Xor:
2822    return Kind == RK_IntegerXor;
2823  case Instruction::FMul:
2824    return Kind == RK_FloatMult && FastMath;
2825  case Instruction::FAdd:
2826    return Kind == RK_FloatAdd && FastMath;
2827   }
2828}
2829
2830LoopVectorizationLegality::InductionKind
2831LoopVectorizationLegality::isInductionVariable(PHINode *Phi) {
2832  Type *PhiTy = Phi->getType();
2833  // We only handle integer and pointer inductions variables.
2834  if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy())
2835    return IK_NoInduction;
2836
2837  // Check that the PHI is consecutive.
2838  const SCEV *PhiScev = SE->getSCEV(Phi);
2839  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
2840  if (!AR) {
2841    DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
2842    return IK_NoInduction;
2843  }
2844  const SCEV *Step = AR->getStepRecurrence(*SE);
2845
2846  // Integer inductions need to have a stride of one.
2847  if (PhiTy->isIntegerTy()) {
2848    if (Step->isOne())
2849      return IK_IntInduction;
2850    if (Step->isAllOnesValue())
2851      return IK_ReverseIntInduction;
2852    return IK_NoInduction;
2853  }
2854
2855  // Calculate the pointer stride and check if it is consecutive.
2856  const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
2857  if (!C)
2858    return IK_NoInduction;
2859
2860  assert(PhiTy->isPointerTy() && "The PHI must be a pointer");
2861  uint64_t Size = DL->getTypeAllocSize(PhiTy->getPointerElementType());
2862  if (C->getValue()->equalsInt(Size))
2863    return IK_PtrInduction;
2864  else if (C->getValue()->equalsInt(0 - Size))
2865    return IK_ReversePtrInduction;
2866
2867  return IK_NoInduction;
2868}
2869
2870bool LoopVectorizationLegality::isInductionVariable(const Value *V) {
2871  Value *In0 = const_cast<Value*>(V);
2872  PHINode *PN = dyn_cast_or_null<PHINode>(In0);
2873  if (!PN)
2874    return false;
2875
2876  return Inductions.count(PN);
2877}
2878
2879bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB)  {
2880  assert(TheLoop->contains(BB) && "Unknown block used");
2881
2882  // Blocks that do not dominate the latch need predication.
2883  BasicBlock* Latch = TheLoop->getLoopLatch();
2884  return !DT->dominates(BB, Latch);
2885}
2886
2887bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB) {
2888  for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
2889    // We don't predicate loads/stores at the moment.
2890    if (it->mayReadFromMemory() || it->mayWriteToMemory() || it->mayThrow())
2891      return false;
2892
2893    // The instructions below can trap.
2894    switch (it->getOpcode()) {
2895    default: continue;
2896    case Instruction::UDiv:
2897    case Instruction::SDiv:
2898    case Instruction::URem:
2899    case Instruction::SRem:
2900             return false;
2901    }
2902  }
2903
2904  return true;
2905}
2906
2907bool LoopVectorizationLegality::hasComputableBounds(Value *Ptr) {
2908  const SCEV *PhiScev = SE->getSCEV(Ptr);
2909  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
2910  if (!AR)
2911    return false;
2912
2913  return AR->isAffine();
2914}
2915
2916LoopVectorizationCostModel::VectorizationFactor
2917LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize,
2918                                                      unsigned UserVF) {
2919  // Width 1 means no vectorize
2920  VectorizationFactor Factor = { 1U, 0U };
2921  if (OptForSize && Legal->getRuntimePointerCheck()->Need) {
2922    DEBUG(dbgs() << "LV: Aborting. Runtime ptr check is required in Os.\n");
2923    return Factor;
2924  }
2925
2926  // Find the trip count.
2927  unsigned TC = SE->getSmallConstantTripCount(TheLoop, TheLoop->getLoopLatch());
2928  DEBUG(dbgs() << "LV: Found trip count:"<<TC<<"\n");
2929
2930  unsigned WidestType = getWidestType();
2931  unsigned WidestRegister = TTI.getRegisterBitWidth(true);
2932  unsigned MaxVectorSize = WidestRegister / WidestType;
2933  DEBUG(dbgs() << "LV: The Widest type: " << WidestType << " bits.\n");
2934  DEBUG(dbgs() << "LV: The Widest register is:" << WidestRegister << "bits.\n");
2935
2936  if (MaxVectorSize == 0) {
2937    DEBUG(dbgs() << "LV: The target has no vector registers.\n");
2938    MaxVectorSize = 1;
2939  }
2940
2941  assert(MaxVectorSize <= 32 && "Did not expect to pack so many elements"
2942         " into one vector!");
2943
2944  unsigned VF = MaxVectorSize;
2945
2946  // If we optimize the program for size, avoid creating the tail loop.
2947  if (OptForSize) {
2948    // If we are unable to calculate the trip count then don't try to vectorize.
2949    if (TC < 2) {
2950      DEBUG(dbgs() << "LV: Aborting. A tail loop is required in Os.\n");
2951      return Factor;
2952    }
2953
2954    // Find the maximum SIMD width that can fit within the trip count.
2955    VF = TC % MaxVectorSize;
2956
2957    if (VF == 0)
2958      VF = MaxVectorSize;
2959
2960    // If the trip count that we found modulo the vectorization factor is not
2961    // zero then we require a tail.
2962    if (VF < 2) {
2963      DEBUG(dbgs() << "LV: Aborting. A tail loop is required in Os.\n");
2964      return Factor;
2965    }
2966  }
2967
2968  if (UserVF != 0) {
2969    assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
2970    DEBUG(dbgs() << "LV: Using user VF "<<UserVF<<".\n");
2971
2972    Factor.Width = UserVF;
2973    return Factor;
2974  }
2975
2976  float Cost = expectedCost(1);
2977  unsigned Width = 1;
2978  DEBUG(dbgs() << "LV: Scalar loop costs: "<< (int)Cost << ".\n");
2979  for (unsigned i=2; i <= VF; i*=2) {
2980    // Notice that the vector loop needs to be executed less times, so
2981    // we need to divide the cost of the vector loops by the width of
2982    // the vector elements.
2983    float VectorCost = expectedCost(i) / (float)i;
2984    DEBUG(dbgs() << "LV: Vector loop of width "<< i << " costs: " <<
2985          (int)VectorCost << ".\n");
2986    if (VectorCost < Cost) {
2987      Cost = VectorCost;
2988      Width = i;
2989    }
2990  }
2991
2992  DEBUG(dbgs() << "LV: Selecting VF = : "<< Width << ".\n");
2993  Factor.Width = Width;
2994  Factor.Cost = Width * Cost;
2995  return Factor;
2996}
2997
2998unsigned LoopVectorizationCostModel::getWidestType() {
2999  unsigned MaxWidth = 8;
3000
3001  // For each block.
3002  for (Loop::block_iterator bb = TheLoop->block_begin(),
3003       be = TheLoop->block_end(); bb != be; ++bb) {
3004    BasicBlock *BB = *bb;
3005
3006    // For each instruction in the loop.
3007    for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
3008      Type *T = it->getType();
3009
3010      // Only examine Loads, Stores and PHINodes.
3011      if (!isa<LoadInst>(it) && !isa<StoreInst>(it) && !isa<PHINode>(it))
3012        continue;
3013
3014      // Examine PHI nodes that are reduction variables.
3015      if (PHINode *PN = dyn_cast<PHINode>(it))
3016        if (!Legal->getReductionVars()->count(PN))
3017          continue;
3018
3019      // Examine the stored values.
3020      if (StoreInst *ST = dyn_cast<StoreInst>(it))
3021        T = ST->getValueOperand()->getType();
3022
3023      // Ignore loaded pointer types and stored pointer types that are not
3024      // consecutive. However, we do want to take consecutive stores/loads of
3025      // pointer vectors into account.
3026      if (T->isPointerTy() && !isConsecutiveLoadOrStore(it))
3027        continue;
3028
3029      MaxWidth = std::max(MaxWidth,
3030                          (unsigned)DL->getTypeSizeInBits(T->getScalarType()));
3031    }
3032  }
3033
3034  return MaxWidth;
3035}
3036
3037unsigned
3038LoopVectorizationCostModel::selectUnrollFactor(bool OptForSize,
3039                                               unsigned UserUF,
3040                                               unsigned VF,
3041                                               unsigned LoopCost) {
3042
3043  // -- The unroll heuristics --
3044  // We unroll the loop in order to expose ILP and reduce the loop overhead.
3045  // There are many micro-architectural considerations that we can't predict
3046  // at this level. For example frontend pressure (on decode or fetch) due to
3047  // code size, or the number and capabilities of the execution ports.
3048  //
3049  // We use the following heuristics to select the unroll factor:
3050  // 1. If the code has reductions the we unroll in order to break the cross
3051  // iteration dependency.
3052  // 2. If the loop is really small then we unroll in order to reduce the loop
3053  // overhead.
3054  // 3. We don't unroll if we think that we will spill registers to memory due
3055  // to the increased register pressure.
3056
3057  // Use the user preference, unless 'auto' is selected.
3058  if (UserUF != 0)
3059    return UserUF;
3060
3061  // When we optimize for size we don't unroll.
3062  if (OptForSize)
3063    return 1;
3064
3065  // Do not unroll loops with a relatively small trip count.
3066  unsigned TC = SE->getSmallConstantTripCount(TheLoop,
3067                                              TheLoop->getLoopLatch());
3068  if (TC > 1 && TC < TinyTripCountUnrollThreshold)
3069    return 1;
3070
3071  unsigned TargetVectorRegisters = TTI.getNumberOfRegisters(true);
3072  DEBUG(dbgs() << "LV: The target has " << TargetVectorRegisters <<
3073        " vector registers\n");
3074
3075  LoopVectorizationCostModel::RegisterUsage R = calculateRegisterUsage();
3076  // We divide by these constants so assume that we have at least one
3077  // instruction that uses at least one register.
3078  R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
3079  R.NumInstructions = std::max(R.NumInstructions, 1U);
3080
3081  // We calculate the unroll factor using the following formula.
3082  // Subtract the number of loop invariants from the number of available
3083  // registers. These registers are used by all of the unrolled instances.
3084  // Next, divide the remaining registers by the number of registers that is
3085  // required by the loop, in order to estimate how many parallel instances
3086  // fit without causing spills.
3087  unsigned UF = (TargetVectorRegisters - R.LoopInvariantRegs) / R.MaxLocalUsers;
3088
3089  // Clamp the unroll factor ranges to reasonable factors.
3090  unsigned MaxUnrollSize = TTI.getMaximumUnrollFactor();
3091
3092  // If we did not calculate the cost for VF (because the user selected the VF)
3093  // then we calculate the cost of VF here.
3094  if (LoopCost == 0)
3095    LoopCost = expectedCost(VF);
3096
3097  // Clamp the calculated UF to be between the 1 and the max unroll factor
3098  // that the target allows.
3099  if (UF > MaxUnrollSize)
3100    UF = MaxUnrollSize;
3101  else if (UF < 1)
3102    UF = 1;
3103
3104  if (Legal->getReductionVars()->size()) {
3105    DEBUG(dbgs() << "LV: Unrolling because of reductions. \n");
3106    return UF;
3107  }
3108
3109  // We want to unroll tiny loops in order to reduce the loop overhead.
3110  // We assume that the cost overhead is 1 and we use the cost model
3111  // to estimate the cost of the loop and unroll until the cost of the
3112  // loop overhead is about 5% of the cost of the loop.
3113  DEBUG(dbgs() << "LV: Loop cost is "<< LoopCost <<" \n");
3114  if (LoopCost < 20) {
3115    DEBUG(dbgs() << "LV: Unrolling to reduce branch cost. \n");
3116    unsigned NewUF = 20/LoopCost + 1;
3117    return std::min(NewUF, UF);
3118  }
3119
3120  DEBUG(dbgs() << "LV: Not Unrolling. \n");
3121  return 1;
3122}
3123
3124LoopVectorizationCostModel::RegisterUsage
3125LoopVectorizationCostModel::calculateRegisterUsage() {
3126  // This function calculates the register usage by measuring the highest number
3127  // of values that are alive at a single location. Obviously, this is a very
3128  // rough estimation. We scan the loop in a topological order in order and
3129  // assign a number to each instruction. We use RPO to ensure that defs are
3130  // met before their users. We assume that each instruction that has in-loop
3131  // users starts an interval. We record every time that an in-loop value is
3132  // used, so we have a list of the first and last occurrences of each
3133  // instruction. Next, we transpose this data structure into a multi map that
3134  // holds the list of intervals that *end* at a specific location. This multi
3135  // map allows us to perform a linear search. We scan the instructions linearly
3136  // and record each time that a new interval starts, by placing it in a set.
3137  // If we find this value in the multi-map then we remove it from the set.
3138  // The max register usage is the maximum size of the set.
3139  // We also search for instructions that are defined outside the loop, but are
3140  // used inside the loop. We need this number separately from the max-interval
3141  // usage number because when we unroll, loop-invariant values do not take
3142  // more register.
3143  LoopBlocksDFS DFS(TheLoop);
3144  DFS.perform(LI);
3145
3146  RegisterUsage R;
3147  R.NumInstructions = 0;
3148
3149  // Each 'key' in the map opens a new interval. The values
3150  // of the map are the index of the 'last seen' usage of the
3151  // instruction that is the key.
3152  typedef DenseMap<Instruction*, unsigned> IntervalMap;
3153  // Maps instruction to its index.
3154  DenseMap<unsigned, Instruction*> IdxToInstr;
3155  // Marks the end of each interval.
3156  IntervalMap EndPoint;
3157  // Saves the list of instruction indices that are used in the loop.
3158  SmallSet<Instruction*, 8> Ends;
3159  // Saves the list of values that are used in the loop but are
3160  // defined outside the loop, such as arguments and constants.
3161  SmallPtrSet<Value*, 8> LoopInvariants;
3162
3163  unsigned Index = 0;
3164  for (LoopBlocksDFS::RPOIterator bb = DFS.beginRPO(),
3165       be = DFS.endRPO(); bb != be; ++bb) {
3166    R.NumInstructions += (*bb)->size();
3167    for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e;
3168         ++it) {
3169      Instruction *I = it;
3170      IdxToInstr[Index++] = I;
3171
3172      // Save the end location of each USE.
3173      for (unsigned i = 0; i < I->getNumOperands(); ++i) {
3174        Value *U = I->getOperand(i);
3175        Instruction *Instr = dyn_cast<Instruction>(U);
3176
3177        // Ignore non-instruction values such as arguments, constants, etc.
3178        if (!Instr) continue;
3179
3180        // If this instruction is outside the loop then record it and continue.
3181        if (!TheLoop->contains(Instr)) {
3182          LoopInvariants.insert(Instr);
3183          continue;
3184        }
3185
3186        // Overwrite previous end points.
3187        EndPoint[Instr] = Index;
3188        Ends.insert(Instr);
3189      }
3190    }
3191  }
3192
3193  // Saves the list of intervals that end with the index in 'key'.
3194  typedef SmallVector<Instruction*, 2> InstrList;
3195  DenseMap<unsigned, InstrList> TransposeEnds;
3196
3197  // Transpose the EndPoints to a list of values that end at each index.
3198  for (IntervalMap::iterator it = EndPoint.begin(), e = EndPoint.end();
3199       it != e; ++it)
3200    TransposeEnds[it->second].push_back(it->first);
3201
3202  SmallSet<Instruction*, 8> OpenIntervals;
3203  unsigned MaxUsage = 0;
3204
3205
3206  DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
3207  for (unsigned int i = 0; i < Index; ++i) {
3208    Instruction *I = IdxToInstr[i];
3209    // Ignore instructions that are never used within the loop.
3210    if (!Ends.count(I)) continue;
3211
3212    // Remove all of the instructions that end at this location.
3213    InstrList &List = TransposeEnds[i];
3214    for (unsigned int j=0, e = List.size(); j < e; ++j)
3215      OpenIntervals.erase(List[j]);
3216
3217    // Count the number of live interals.
3218    MaxUsage = std::max(MaxUsage, OpenIntervals.size());
3219
3220    DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " <<
3221          OpenIntervals.size() <<"\n");
3222
3223    // Add the current instruction to the list of open intervals.
3224    OpenIntervals.insert(I);
3225  }
3226
3227  unsigned Invariant = LoopInvariants.size();
3228  DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsage << " \n");
3229  DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << " \n");
3230  DEBUG(dbgs() << "LV(REG): LoopSize: " << R.NumInstructions << " \n");
3231
3232  R.LoopInvariantRegs = Invariant;
3233  R.MaxLocalUsers = MaxUsage;
3234  return R;
3235}
3236
3237unsigned LoopVectorizationCostModel::expectedCost(unsigned VF) {
3238  unsigned Cost = 0;
3239
3240  // For each block.
3241  for (Loop::block_iterator bb = TheLoop->block_begin(),
3242       be = TheLoop->block_end(); bb != be; ++bb) {
3243    unsigned BlockCost = 0;
3244    BasicBlock *BB = *bb;
3245
3246    // For each instruction in the old loop.
3247    for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
3248      unsigned C = getInstructionCost(it, VF);
3249      Cost += C;
3250      DEBUG(dbgs() << "LV: Found an estimated cost of "<< C <<" for VF " <<
3251            VF << " For instruction: "<< *it << "\n");
3252    }
3253
3254    // We assume that if-converted blocks have a 50% chance of being executed.
3255    // When the code is scalar then some of the blocks are avoided due to CF.
3256    // When the code is vectorized we execute all code paths.
3257    if (Legal->blockNeedsPredication(*bb) && VF == 1)
3258      BlockCost /= 2;
3259
3260    Cost += BlockCost;
3261  }
3262
3263  return Cost;
3264}
3265
3266unsigned
3267LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
3268  // If we know that this instruction will remain uniform, check the cost of
3269  // the scalar version.
3270  if (Legal->isUniformAfterVectorization(I))
3271    VF = 1;
3272
3273  Type *RetTy = I->getType();
3274  Type *VectorTy = ToVectorTy(RetTy, VF);
3275
3276  // TODO: We need to estimate the cost of intrinsic calls.
3277  switch (I->getOpcode()) {
3278  case Instruction::GetElementPtr:
3279    // We mark this instruction as zero-cost because the cost of GEPs in
3280    // vectorized code depends on whether the corresponding memory instruction
3281    // is scalarized or not. Therefore, we handle GEPs with the memory
3282    // instruction cost.
3283    return 0;
3284  case Instruction::Br: {
3285    return TTI.getCFInstrCost(I->getOpcode());
3286  }
3287  case Instruction::PHI:
3288    //TODO: IF-converted IFs become selects.
3289    return 0;
3290  case Instruction::Add:
3291  case Instruction::FAdd:
3292  case Instruction::Sub:
3293  case Instruction::FSub:
3294  case Instruction::Mul:
3295  case Instruction::FMul:
3296  case Instruction::UDiv:
3297  case Instruction::SDiv:
3298  case Instruction::FDiv:
3299  case Instruction::URem:
3300  case Instruction::SRem:
3301  case Instruction::FRem:
3302  case Instruction::Shl:
3303  case Instruction::LShr:
3304  case Instruction::AShr:
3305  case Instruction::And:
3306  case Instruction::Or:
3307  case Instruction::Xor:
3308    return TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy);
3309  case Instruction::Select: {
3310    SelectInst *SI = cast<SelectInst>(I);
3311    const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
3312    bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
3313    Type *CondTy = SI->getCondition()->getType();
3314    if (ScalarCond)
3315      CondTy = VectorType::get(CondTy, VF);
3316
3317    return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy);
3318  }
3319  case Instruction::ICmp:
3320  case Instruction::FCmp: {
3321    Type *ValTy = I->getOperand(0)->getType();
3322    VectorTy = ToVectorTy(ValTy, VF);
3323    return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy);
3324  }
3325  case Instruction::Store:
3326  case Instruction::Load: {
3327    StoreInst *SI = dyn_cast<StoreInst>(I);
3328    LoadInst *LI = dyn_cast<LoadInst>(I);
3329    Type *ValTy = (SI ? SI->getValueOperand()->getType() :
3330                   LI->getType());
3331    VectorTy = ToVectorTy(ValTy, VF);
3332
3333    unsigned Alignment = SI ? SI->getAlignment() : LI->getAlignment();
3334    unsigned AS = SI ? SI->getPointerAddressSpace() :
3335      LI->getPointerAddressSpace();
3336    Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand();
3337    // We add the cost of address computation here instead of with the gep
3338    // instruction because only here we know whether the operation is
3339    // scalarized.
3340    if (VF == 1)
3341      return TTI.getAddressComputationCost(VectorTy) +
3342        TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
3343
3344    // Scalarized loads/stores.
3345    int Stride = Legal->isConsecutivePtr(Ptr);
3346    bool Reverse = Stride < 0;
3347    if (0 == Stride) {
3348      unsigned Cost = 0;
3349      // The cost of extracting from the value vector and pointer vector.
3350      Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
3351      for (unsigned i = 0; i < VF; ++i) {
3352        //  The cost of extracting the pointer operand.
3353        Cost += TTI.getVectorInstrCost(Instruction::ExtractElement, PtrTy, i);
3354        // In case of STORE, the cost of ExtractElement from the vector.
3355        // In case of LOAD, the cost of InsertElement into the returned
3356        // vector.
3357        Cost += TTI.getVectorInstrCost(SI ? Instruction::ExtractElement :
3358                                            Instruction::InsertElement,
3359                                            VectorTy, i);
3360      }
3361
3362      // The cost of the scalar loads/stores.
3363      Cost += VF * TTI.getAddressComputationCost(ValTy->getScalarType());
3364      Cost += VF * TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(),
3365                                       Alignment, AS);
3366      return Cost;
3367    }
3368
3369    // Wide load/stores.
3370    unsigned Cost = TTI.getAddressComputationCost(VectorTy);
3371    Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
3372
3373    if (Reverse)
3374      Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
3375                                  VectorTy, 0);
3376    return Cost;
3377  }
3378  case Instruction::ZExt:
3379  case Instruction::SExt:
3380  case Instruction::FPToUI:
3381  case Instruction::FPToSI:
3382  case Instruction::FPExt:
3383  case Instruction::PtrToInt:
3384  case Instruction::IntToPtr:
3385  case Instruction::SIToFP:
3386  case Instruction::UIToFP:
3387  case Instruction::Trunc:
3388  case Instruction::FPTrunc:
3389  case Instruction::BitCast: {
3390    // We optimize the truncation of induction variable.
3391    // The cost of these is the same as the scalar operation.
3392    if (I->getOpcode() == Instruction::Trunc &&
3393        Legal->isInductionVariable(I->getOperand(0)))
3394      return TTI.getCastInstrCost(I->getOpcode(), I->getType(),
3395                                  I->getOperand(0)->getType());
3396
3397    Type *SrcVecTy = ToVectorTy(I->getOperand(0)->getType(), VF);
3398    return TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy);
3399  }
3400  case Instruction::Call: {
3401    CallInst *CI = cast<CallInst>(I);
3402    Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
3403    assert(ID && "Not an intrinsic call!");
3404    Type *RetTy = ToVectorTy(CI->getType(), VF);
3405    SmallVector<Type*, 4> Tys;
3406    for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i)
3407      Tys.push_back(ToVectorTy(CI->getArgOperand(i)->getType(), VF));
3408    return TTI.getIntrinsicInstrCost(ID, RetTy, Tys);
3409  }
3410  default: {
3411    // We are scalarizing the instruction. Return the cost of the scalar
3412    // instruction, plus the cost of insert and extract into vector
3413    // elements, times the vector width.
3414    unsigned Cost = 0;
3415
3416    if (!RetTy->isVoidTy() && VF != 1) {
3417      unsigned InsCost = TTI.getVectorInstrCost(Instruction::InsertElement,
3418                                                VectorTy);
3419      unsigned ExtCost = TTI.getVectorInstrCost(Instruction::ExtractElement,
3420                                                VectorTy);
3421
3422      // The cost of inserting the results plus extracting each one of the
3423      // operands.
3424      Cost += VF * (InsCost + ExtCost * I->getNumOperands());
3425    }
3426
3427    // The cost of executing VF copies of the scalar instruction. This opcode
3428    // is unknown. Assume that it is the same as 'mul'.
3429    Cost += VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy);
3430    return Cost;
3431  }
3432  }// end of switch.
3433}
3434
3435Type* LoopVectorizationCostModel::ToVectorTy(Type *Scalar, unsigned VF) {
3436  if (Scalar->isVoidTy() || VF == 1)
3437    return Scalar;
3438  return VectorType::get(Scalar, VF);
3439}
3440
3441char LoopVectorize::ID = 0;
3442static const char lv_name[] = "Loop Vectorization";
3443INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
3444INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
3445INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
3446INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
3447INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
3448INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
3449
3450namespace llvm {
3451  Pass *createLoopVectorizePass() {
3452    return new LoopVectorize();
3453  }
3454}
3455
3456bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
3457  // Check for a store.
3458  if (StoreInst *ST = dyn_cast<StoreInst>(Inst))
3459    return Legal->isConsecutivePtr(ST->getPointerOperand()) != 0;
3460
3461  // Check for a load.
3462  if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
3463    return Legal->isConsecutivePtr(LI->getPointerOperand()) != 0;
3464
3465  return false;
3466}
3467