SeparateConstOffsetFromGEP.cpp revision 4c5e43da7792f75567b693105cc53e3f1992ad98
1//===-- SeparateConstOffsetFromGEP.cpp - ------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Loop unrolling may create many similar GEPs for array accesses.
11// e.g., a 2-level loop
12//
13// float a[32][32]; // global variable
14//
15// for (int i = 0; i < 2; ++i) {
16//   for (int j = 0; j < 2; ++j) {
17//     ...
18//     ... = a[x + i][y + j];
19//     ...
20//   }
21// }
22//
23// will probably be unrolled to:
24//
25// gep %a, 0, %x, %y; load
26// gep %a, 0, %x, %y + 1; load
27// gep %a, 0, %x + 1, %y; load
28// gep %a, 0, %x + 1, %y + 1; load
29//
30// LLVM's GVN does not use partial redundancy elimination yet, and is thus
31// unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs
32// significant slowdown in targets with limited addressing modes. For instance,
33// because the PTX target does not support the reg+reg addressing mode, the
34// NVPTX backend emits PTX code that literally computes the pointer address of
35// each GEP, wasting tons of registers. It emits the following PTX for the
36// first load and similar PTX for other loads.
37//
38// mov.u32         %r1, %x;
39// mov.u32         %r2, %y;
40// mul.wide.u32    %rl2, %r1, 128;
41// mov.u64         %rl3, a;
42// add.s64         %rl4, %rl3, %rl2;
43// mul.wide.u32    %rl5, %r2, 4;
44// add.s64         %rl6, %rl4, %rl5;
45// ld.global.f32   %f1, [%rl6];
46//
47// To reduce the register pressure, the optimization implemented in this file
48// merges the common part of a group of GEPs, so we can compute each pointer
49// address by adding a simple offset to the common part, saving many registers.
50//
51// It works by splitting each GEP into a variadic base and a constant offset.
52// The variadic base can be computed once and reused by multiple GEPs, and the
53// constant offsets can be nicely folded into the reg+immediate addressing mode
54// (supported by most targets) without using any extra register.
55//
56// For instance, we transform the four GEPs and four loads in the above example
57// into:
58//
59// base = gep a, 0, x, y
60// load base
61// laod base + 1  * sizeof(float)
62// load base + 32 * sizeof(float)
63// load base + 33 * sizeof(float)
64//
65// Given the transformed IR, a backend that supports the reg+immediate
66// addressing mode can easily fold the pointer arithmetics into the loads. For
67// example, the NVPTX backend can easily fold the pointer arithmetics into the
68// ld.global.f32 instructions, and the resultant PTX uses much fewer registers.
69//
70// mov.u32         %r1, %tid.x;
71// mov.u32         %r2, %tid.y;
72// mul.wide.u32    %rl2, %r1, 128;
73// mov.u64         %rl3, a;
74// add.s64         %rl4, %rl3, %rl2;
75// mul.wide.u32    %rl5, %r2, 4;
76// add.s64         %rl6, %rl4, %rl5;
77// ld.global.f32   %f1, [%rl6]; // so far the same as unoptimized PTX
78// ld.global.f32   %f2, [%rl6+4]; // much better
79// ld.global.f32   %f3, [%rl6+128]; // much better
80// ld.global.f32   %f4, [%rl6+132]; // much better
81//
82// Another improvement enabled by the LowerGEP flag is to lower a GEP with
83// multiple indices to either multiple GEPs with a single index or arithmetic
84// operations (depending on whether the target uses alias analysis in codegen).
85// Such transformation can have following benefits:
86// (1) It can always extract constants in the indices of structure type.
87// (2) After such Lowering, there are more optimization opportunities such as
88//     CSE, LICM and CGP.
89//
90// E.g. The following GEPs have multiple indices:
91//  BB1:
92//    %p = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 3
93//    load %p
94//    ...
95//  BB2:
96//    %p2 = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 2
97//    load %p2
98//    ...
99//
100// We can not do CSE for to the common part related to index "i64 %i". Lowering
101// GEPs can achieve such goals.
102// If the target does not use alias analysis in codegen, this pass will
103// lower a GEP with multiple indices into arithmetic operations:
104//  BB1:
105//    %1 = ptrtoint [10 x %struct]* %ptr to i64    ; CSE opportunity
106//    %2 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
107//    %3 = add i64 %1, %2                          ; CSE opportunity
108//    %4 = mul i64 %j1, length_of_struct
109//    %5 = add i64 %3, %4
110//    %6 = add i64 %3, struct_field_3              ; Constant offset
111//    %p = inttoptr i64 %6 to i32*
112//    load %p
113//    ...
114//  BB2:
115//    %7 = ptrtoint [10 x %struct]* %ptr to i64    ; CSE opportunity
116//    %8 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
117//    %9 = add i64 %7, %8                          ; CSE opportunity
118//    %10 = mul i64 %j2, length_of_struct
119//    %11 = add i64 %9, %10
120//    %12 = add i64 %11, struct_field_2            ; Constant offset
121//    %p = inttoptr i64 %12 to i32*
122//    load %p2
123//    ...
124//
125// If the target uses alias analysis in codegen, this pass will lower a GEP
126// with multiple indices into multiple GEPs with a single index:
127//  BB1:
128//    %1 = bitcast [10 x %struct]* %ptr to i8*     ; CSE opportunity
129//    %2 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
130//    %3 = getelementptr i8* %1, i64 %2            ; CSE opportunity
131//    %4 = mul i64 %j1, length_of_struct
132//    %5 = getelementptr i8* %3, i64 %4
133//    %6 = getelementptr i8* %5, struct_field_3    ; Constant offset
134//    %p = bitcast i8* %6 to i32*
135//    load %p
136//    ...
137//  BB2:
138//    %7 = bitcast [10 x %struct]* %ptr to i8*     ; CSE opportunity
139//    %8 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
140//    %9 = getelementptr i8* %7, i64 %8            ; CSE opportunity
141//    %10 = mul i64 %j2, length_of_struct
142//    %11 = getelementptr i8* %9, i64 %10
143//    %12 = getelementptr i8* %11, struct_field_2  ; Constant offset
144//    %p2 = bitcast i8* %12 to i32*
145//    load %p2
146//    ...
147//
148// Lowering GEPs can also benefit other passes such as LICM and CGP.
149// LICM (Loop Invariant Code Motion) can not hoist/sink a GEP of multiple
150// indices if one of the index is variant. If we lower such GEP into invariant
151// parts and variant parts, LICM can hoist/sink those invariant parts.
152// CGP (CodeGen Prepare) tries to sink address calculations that match the
153// target's addressing modes. A GEP with multiple indices may not match and will
154// not be sunk. If we lower such GEP into smaller parts, CGP may sink some of
155// them. So we end up with a better addressing mode.
156//
157//===----------------------------------------------------------------------===//
158
159#include "llvm/Analysis/TargetTransformInfo.h"
160#include "llvm/Analysis/ValueTracking.h"
161#include "llvm/IR/Constants.h"
162#include "llvm/IR/DataLayout.h"
163#include "llvm/IR/Instructions.h"
164#include "llvm/IR/LLVMContext.h"
165#include "llvm/IR/Module.h"
166#include "llvm/IR/Operator.h"
167#include "llvm/Support/CommandLine.h"
168#include "llvm/Support/raw_ostream.h"
169#include "llvm/Transforms/Scalar.h"
170#include "llvm/Target/TargetMachine.h"
171#include "llvm/Target/TargetSubtargetInfo.h"
172#include "llvm/IR/IRBuilder.h"
173
174using namespace llvm;
175
176static cl::opt<bool> DisableSeparateConstOffsetFromGEP(
177    "disable-separate-const-offset-from-gep", cl::init(false),
178    cl::desc("Do not separate the constant offset from a GEP instruction"),
179    cl::Hidden);
180
181namespace {
182
183/// \brief A helper class for separating a constant offset from a GEP index.
184///
185/// In real programs, a GEP index may be more complicated than a simple addition
186/// of something and a constant integer which can be trivially splitted. For
187/// example, to split ((a << 3) | 5) + b, we need to search deeper for the
188/// constant offset, so that we can separate the index to (a << 3) + b and 5.
189///
190/// Therefore, this class looks into the expression that computes a given GEP
191/// index, and tries to find a constant integer that can be hoisted to the
192/// outermost level of the expression as an addition. Not every constant in an
193/// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a +
194/// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case,
195/// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
196class ConstantOffsetExtractor {
197 public:
198  /// Extracts a constant offset from the given GEP index. It returns the
199  /// new index representing the remainder (equal to the original index minus
200  /// the constant offset), or nullptr if we cannot extract a constant offset.
201  /// \p Idx    The given GEP index
202  /// \p GEP    The given GEP
203   static Value *Extract(Value *Idx, GetElementPtrInst *GEP);
204  /// Looks for a constant offset from the given GEP index without extracting
205  /// it. It returns the numeric value of the extracted constant offset (0 if
206  /// failed). The meaning of the arguments are the same as Extract.
207   static int64_t Find(Value *Idx, GetElementPtrInst *GEP);
208
209 private:
210   ConstantOffsetExtractor(Instruction *InsertionPt) : IP(InsertionPt) {}
211  /// Searches the expression that computes V for a non-zero constant C s.t.
212  /// V can be reassociated into the form V' + C. If the searching is
213  /// successful, returns C and update UserChain as a def-use chain from C to V;
214  /// otherwise, UserChain is empty.
215  ///
216  /// \p V            The given expression
217  /// \p SignExtended Whether V will be sign-extended in the computation of the
218  ///                 GEP index
219  /// \p ZeroExtended Whether V will be zero-extended in the computation of the
220  ///                 GEP index
221  /// \p NonNegative  Whether V is guaranteed to be non-negative. For example,
222  ///                 an index of an inbounds GEP is guaranteed to be
223  ///                 non-negative. Levaraging this, we can better split
224  ///                 inbounds GEPs.
225  APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative);
226  /// A helper function to look into both operands of a binary operator.
227  APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
228                            bool ZeroExtended);
229  /// After finding the constant offset C from the GEP index I, we build a new
230  /// index I' s.t. I' + C = I. This function builds and returns the new
231  /// index I' according to UserChain produced by function "find".
232  ///
233  /// The building conceptually takes two steps:
234  /// 1) iteratively distribute s/zext towards the leaves of the expression tree
235  /// that computes I
236  /// 2) reassociate the expression tree to the form I' + C.
237  ///
238  /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute
239  /// sext to a, b and 5 so that we have
240  ///   sext(a) + (sext(b) + 5).
241  /// Then, we reassociate it to
242  ///   (sext(a) + sext(b)) + 5.
243  /// Given this form, we know I' is sext(a) + sext(b).
244  Value *rebuildWithoutConstOffset();
245  /// After the first step of rebuilding the GEP index without the constant
246  /// offset, distribute s/zext to the operands of all operators in UserChain.
247  /// e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
248  /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))).
249  ///
250  /// The function also updates UserChain to point to new subexpressions after
251  /// distributing s/zext. e.g., the old UserChain of the above example is
252  /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)),
253  /// and the new UserChain is
254  /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) ->
255  ///   zext(sext(a)) + (zext(sext(b)) + zext(sext(5))
256  ///
257  /// \p ChainIndex The index to UserChain. ChainIndex is initially
258  ///               UserChain.size() - 1, and is decremented during
259  ///               the recursion.
260  Value *distributeExtsAndCloneChain(unsigned ChainIndex);
261  /// Reassociates the GEP index to the form I' + C and returns I'.
262  Value *removeConstOffset(unsigned ChainIndex);
263  /// A helper function to apply ExtInsts, a list of s/zext, to value V.
264  /// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function
265  /// returns "sext i32 (zext i16 V to i32) to i64".
266  Value *applyExts(Value *V);
267
268  /// Returns true if LHS and RHS have no bits in common, i.e., LHS | RHS == 0.
269  bool NoCommonBits(Value *LHS, Value *RHS) const;
270  /// Computes which bits are known to be one or zero.
271  /// \p KnownOne Mask of all bits that are known to be one.
272  /// \p KnownZero Mask of all bits that are known to be zero.
273  void ComputeKnownBits(Value *V, APInt &KnownOne, APInt &KnownZero) const;
274  /// A helper function that returns whether we can trace into the operands
275  /// of binary operator BO for a constant offset.
276  ///
277  /// \p SignExtended Whether BO is surrounded by sext
278  /// \p ZeroExtended Whether BO is surrounded by zext
279  /// \p NonNegative Whether BO is known to be non-negative, e.g., an in-bound
280  ///                array index.
281  bool CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO,
282                    bool NonNegative);
283
284  /// The path from the constant offset to the old GEP index. e.g., if the GEP
285  /// index is "a * b + (c + 5)". After running function find, UserChain[0] will
286  /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and
287  /// UserChain[2] will be the entire expression "a * b + (c + 5)".
288  ///
289  /// This path helps to rebuild the new GEP index.
290  SmallVector<User *, 8> UserChain;
291  /// A data structure used in rebuildWithoutConstOffset. Contains all
292  /// sext/zext instructions along UserChain.
293  SmallVector<CastInst *, 16> ExtInsts;
294  Instruction *IP;  /// Insertion position of cloned instructions.
295};
296
297/// \brief A pass that tries to split every GEP in the function into a variadic
298/// base and a constant offset. It is a FunctionPass because searching for the
299/// constant offset may inspect other basic blocks.
300class SeparateConstOffsetFromGEP : public FunctionPass {
301 public:
302  static char ID;
303  SeparateConstOffsetFromGEP(const TargetMachine *TM = nullptr,
304                             bool LowerGEP = false)
305      : FunctionPass(ID), TM(TM), LowerGEP(LowerGEP) {
306    initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
307  }
308
309  void getAnalysisUsage(AnalysisUsage &AU) const override {
310    AU.addRequired<TargetTransformInfoWrapperPass>();
311    AU.setPreservesCFG();
312  }
313
314  bool runOnFunction(Function &F) override;
315
316 private:
317  /// Tries to split the given GEP into a variadic base and a constant offset,
318  /// and returns true if the splitting succeeds.
319  bool splitGEP(GetElementPtrInst *GEP);
320  /// Lower a GEP with multiple indices into multiple GEPs with a single index.
321  /// Function splitGEP already split the original GEP into a variadic part and
322  /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
323  /// variadic part into a set of GEPs with a single index and applies
324  /// AccumulativeByteOffset to it.
325  /// \p Variadic                  The variadic part of the original GEP.
326  /// \p AccumulativeByteOffset    The constant offset.
327  void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic,
328                              int64_t AccumulativeByteOffset);
329  /// Lower a GEP with multiple indices into ptrtoint+arithmetics+inttoptr form.
330  /// Function splitGEP already split the original GEP into a variadic part and
331  /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
332  /// variadic part into a set of arithmetic operations and applies
333  /// AccumulativeByteOffset to it.
334  /// \p Variadic                  The variadic part of the original GEP.
335  /// \p AccumulativeByteOffset    The constant offset.
336  void lowerToArithmetics(GetElementPtrInst *Variadic,
337                          int64_t AccumulativeByteOffset);
338  /// Finds the constant offset within each index and accumulates them. If
339  /// LowerGEP is true, it finds in indices of both sequential and structure
340  /// types, otherwise it only finds in sequential indices. The output
341  /// NeedsExtraction indicates whether we successfully find a non-zero constant
342  /// offset.
343  int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
344  /// Canonicalize array indices to pointer-size integers. This helps to
345  /// simplify the logic of splitting a GEP. For example, if a + b is a
346  /// pointer-size integer, we have
347  ///   gep base, a + b = gep (gep base, a), b
348  /// However, this equality may not hold if the size of a + b is smaller than
349  /// the pointer size, because LLVM conceptually sign-extends GEP indices to
350  /// pointer size before computing the address
351  /// (http://llvm.org/docs/LangRef.html#id181).
352  ///
353  /// This canonicalization is very likely already done in clang and
354  /// instcombine. Therefore, the program will probably remain the same.
355  ///
356  /// Returns true if the module changes.
357  ///
358  /// Verified in @i32_add in split-gep.ll
359  bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
360
361  const TargetMachine *TM;
362  /// Whether to lower a GEP with multiple indices into arithmetic operations or
363  /// multiple GEPs with a single index.
364  bool LowerGEP;
365};
366}  // anonymous namespace
367
368char SeparateConstOffsetFromGEP::ID = 0;
369INITIALIZE_PASS_BEGIN(
370    SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
371    "Split GEPs to a variadic base and a constant offset for better CSE", false,
372    false)
373INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
374INITIALIZE_PASS_END(
375    SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
376    "Split GEPs to a variadic base and a constant offset for better CSE", false,
377    false)
378
379FunctionPass *
380llvm::createSeparateConstOffsetFromGEPPass(const TargetMachine *TM,
381                                           bool LowerGEP) {
382  return new SeparateConstOffsetFromGEP(TM, LowerGEP);
383}
384
385bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended,
386                                            bool ZeroExtended,
387                                            BinaryOperator *BO,
388                                            bool NonNegative) {
389  // We only consider ADD, SUB and OR, because a non-zero constant found in
390  // expressions composed of these operations can be easily hoisted as a
391  // constant offset by reassociation.
392  if (BO->getOpcode() != Instruction::Add &&
393      BO->getOpcode() != Instruction::Sub &&
394      BO->getOpcode() != Instruction::Or) {
395    return false;
396  }
397
398  Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1);
399  // Do not trace into "or" unless it is equivalent to "add". If LHS and RHS
400  // don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS).
401  if (BO->getOpcode() == Instruction::Or && !NoCommonBits(LHS, RHS))
402    return false;
403
404  // In addition, tracing into BO requires that its surrounding s/zext (if
405  // any) is distributable to both operands.
406  //
407  // Suppose BO = A op B.
408  //  SignExtended | ZeroExtended | Distributable?
409  // --------------+--------------+----------------------------------
410  //       0       |      0       | true because no s/zext exists
411  //       0       |      1       | zext(BO) == zext(A) op zext(B)
412  //       1       |      0       | sext(BO) == sext(A) op sext(B)
413  //       1       |      1       | zext(sext(BO)) ==
414  //               |              |     zext(sext(A)) op zext(sext(B))
415  if (BO->getOpcode() == Instruction::Add && !ZeroExtended && NonNegative) {
416    // If a + b >= 0 and (a >= 0 or b >= 0), then
417    //   sext(a + b) = sext(a) + sext(b)
418    // even if the addition is not marked nsw.
419    //
420    // Leveraging this invarient, we can trace into an sext'ed inbound GEP
421    // index if the constant offset is non-negative.
422    //
423    // Verified in @sext_add in split-gep.ll.
424    if (ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
425      if (!ConstLHS->isNegative())
426        return true;
427    }
428    if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
429      if (!ConstRHS->isNegative())
430        return true;
431    }
432  }
433
434  // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B)
435  // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B)
436  if (BO->getOpcode() == Instruction::Add ||
437      BO->getOpcode() == Instruction::Sub) {
438    if (SignExtended && !BO->hasNoSignedWrap())
439      return false;
440    if (ZeroExtended && !BO->hasNoUnsignedWrap())
441      return false;
442  }
443
444  return true;
445}
446
447APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO,
448                                                   bool SignExtended,
449                                                   bool ZeroExtended) {
450  // BO being non-negative does not shed light on whether its operands are
451  // non-negative. Clear the NonNegative flag here.
452  APInt ConstantOffset = find(BO->getOperand(0), SignExtended, ZeroExtended,
453                              /* NonNegative */ false);
454  // If we found a constant offset in the left operand, stop and return that.
455  // This shortcut might cause us to miss opportunities of combining the
456  // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9.
457  // However, such cases are probably already handled by -instcombine,
458  // given this pass runs after the standard optimizations.
459  if (ConstantOffset != 0) return ConstantOffset;
460  ConstantOffset = find(BO->getOperand(1), SignExtended, ZeroExtended,
461                        /* NonNegative */ false);
462  // If U is a sub operator, negate the constant offset found in the right
463  // operand.
464  if (BO->getOpcode() == Instruction::Sub)
465    ConstantOffset = -ConstantOffset;
466  return ConstantOffset;
467}
468
469APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
470                                    bool ZeroExtended, bool NonNegative) {
471  // TODO(jingyue): We could trace into integer/pointer casts, such as
472  // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only
473  // integers because it gives good enough results for our benchmarks.
474  unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
475
476  // We cannot do much with Values that are not a User, such as an Argument.
477  User *U = dyn_cast<User>(V);
478  if (U == nullptr) return APInt(BitWidth, 0);
479
480  APInt ConstantOffset(BitWidth, 0);
481  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
482    // Hooray, we found it!
483    ConstantOffset = CI->getValue();
484  } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
485    // Trace into subexpressions for more hoisting opportunities.
486    if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative)) {
487      ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
488    }
489  } else if (isa<SExtInst>(V)) {
490    ConstantOffset = find(U->getOperand(0), /* SignExtended */ true,
491                          ZeroExtended, NonNegative).sext(BitWidth);
492  } else if (isa<ZExtInst>(V)) {
493    // As an optimization, we can clear the SignExtended flag because
494    // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
495    //
496    // Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0.
497    ConstantOffset =
498        find(U->getOperand(0), /* SignExtended */ false,
499             /* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth);
500  }
501
502  // If we found a non-zero constant offset, add it to the path for
503  // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't
504  // help this optimization.
505  if (ConstantOffset != 0)
506    UserChain.push_back(U);
507  return ConstantOffset;
508}
509
510Value *ConstantOffsetExtractor::applyExts(Value *V) {
511  Value *Current = V;
512  // ExtInsts is built in the use-def order. Therefore, we apply them to V
513  // in the reversed order.
514  for (auto I = ExtInsts.rbegin(), E = ExtInsts.rend(); I != E; ++I) {
515    if (Constant *C = dyn_cast<Constant>(Current)) {
516      // If Current is a constant, apply s/zext using ConstantExpr::getCast.
517      // ConstantExpr::getCast emits a ConstantInt if C is a ConstantInt.
518      Current = ConstantExpr::getCast((*I)->getOpcode(), C, (*I)->getType());
519    } else {
520      Instruction *Ext = (*I)->clone();
521      Ext->setOperand(0, Current);
522      Ext->insertBefore(IP);
523      Current = Ext;
524    }
525  }
526  return Current;
527}
528
529Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
530  distributeExtsAndCloneChain(UserChain.size() - 1);
531  // Remove all nullptrs (used to be s/zext) from UserChain.
532  unsigned NewSize = 0;
533  for (auto I = UserChain.begin(), E = UserChain.end(); I != E; ++I) {
534    if (*I != nullptr) {
535      UserChain[NewSize] = *I;
536      NewSize++;
537    }
538  }
539  UserChain.resize(NewSize);
540  return removeConstOffset(UserChain.size() - 1);
541}
542
543Value *
544ConstantOffsetExtractor::distributeExtsAndCloneChain(unsigned ChainIndex) {
545  User *U = UserChain[ChainIndex];
546  if (ChainIndex == 0) {
547    assert(isa<ConstantInt>(U));
548    // If U is a ConstantInt, applyExts will return a ConstantInt as well.
549    return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
550  }
551
552  if (CastInst *Cast = dyn_cast<CastInst>(U)) {
553    assert((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast)) &&
554           "We only traced into two types of CastInst: sext and zext");
555    ExtInsts.push_back(Cast);
556    UserChain[ChainIndex] = nullptr;
557    return distributeExtsAndCloneChain(ChainIndex - 1);
558  }
559
560  // Function find only trace into BinaryOperator and CastInst.
561  BinaryOperator *BO = cast<BinaryOperator>(U);
562  // OpNo = which operand of BO is UserChain[ChainIndex - 1]
563  unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
564  Value *TheOther = applyExts(BO->getOperand(1 - OpNo));
565  Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
566
567  BinaryOperator *NewBO = nullptr;
568  if (OpNo == 0) {
569    NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther,
570                                   BO->getName(), IP);
571  } else {
572    NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain,
573                                   BO->getName(), IP);
574  }
575  return UserChain[ChainIndex] = NewBO;
576}
577
578Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
579  if (ChainIndex == 0) {
580    assert(isa<ConstantInt>(UserChain[ChainIndex]));
581    return ConstantInt::getNullValue(UserChain[ChainIndex]->getType());
582  }
583
584  BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
585  unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
586  assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]);
587  Value *NextInChain = removeConstOffset(ChainIndex - 1);
588  Value *TheOther = BO->getOperand(1 - OpNo);
589
590  // If NextInChain is 0 and not the LHS of a sub, we can simplify the
591  // sub-expression to be just TheOther.
592  if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
593    if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0))
594      return TheOther;
595  }
596
597  if (BO->getOpcode() == Instruction::Or) {
598    // Rebuild "or" as "add", because "or" may be invalid for the new
599    // epxression.
600    //
601    // For instance, given
602    //   a | (b + 5) where a and b + 5 have no common bits,
603    // we can extract 5 as the constant offset.
604    //
605    // However, reusing the "or" in the new index would give us
606    //   (a | b) + 5
607    // which does not equal a | (b + 5).
608    //
609    // Replacing the "or" with "add" is fine, because
610    //   a | (b + 5) = a + (b + 5) = (a + b) + 5
611    if (OpNo == 0) {
612      return BinaryOperator::CreateAdd(NextInChain, TheOther, BO->getName(),
613                                       IP);
614    } else {
615      return BinaryOperator::CreateAdd(TheOther, NextInChain, BO->getName(),
616                                       IP);
617    }
618  }
619
620  // We can reuse BO in this case, because the new expression shares the same
621  // instruction type and BO is used at most once.
622  assert(BO->getNumUses() <= 1 &&
623         "distributeExtsAndCloneChain clones each BinaryOperator in "
624         "UserChain, so no one should be used more than "
625         "once");
626  BO->setOperand(OpNo, NextInChain);
627  BO->setHasNoSignedWrap(false);
628  BO->setHasNoUnsignedWrap(false);
629  // Make sure it appears after all instructions we've inserted so far.
630  BO->moveBefore(IP);
631  return BO;
632}
633
634Value *ConstantOffsetExtractor::Extract(Value *Idx, GetElementPtrInst *GEP) {
635  ConstantOffsetExtractor Extractor(GEP);
636  // Find a non-zero constant offset first.
637  APInt ConstantOffset =
638      Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
639                     GEP->isInBounds());
640  if (ConstantOffset == 0)
641    return nullptr;
642  // Separates the constant offset from the GEP index.
643  return Extractor.rebuildWithoutConstOffset();
644}
645
646int64_t ConstantOffsetExtractor::Find(Value *Idx, GetElementPtrInst *GEP) {
647  // If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative.
648  return ConstantOffsetExtractor(GEP)
649      .find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
650            GEP->isInBounds())
651      .getSExtValue();
652}
653
654void ConstantOffsetExtractor::ComputeKnownBits(Value *V, APInt &KnownOne,
655                                               APInt &KnownZero) const {
656  IntegerType *IT = cast<IntegerType>(V->getType());
657  KnownOne = APInt(IT->getBitWidth(), 0);
658  KnownZero = APInt(IT->getBitWidth(), 0);
659  const DataLayout &DL = IP->getModule()->getDataLayout();
660  llvm::computeKnownBits(V, KnownZero, KnownOne, DL, 0);
661}
662
663bool ConstantOffsetExtractor::NoCommonBits(Value *LHS, Value *RHS) const {
664  assert(LHS->getType() == RHS->getType() &&
665         "LHS and RHS should have the same type");
666  APInt LHSKnownOne, LHSKnownZero, RHSKnownOne, RHSKnownZero;
667  ComputeKnownBits(LHS, LHSKnownOne, LHSKnownZero);
668  ComputeKnownBits(RHS, RHSKnownOne, RHSKnownZero);
669  return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
670}
671
672bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
673    GetElementPtrInst *GEP) {
674  bool Changed = false;
675  const DataLayout &DL = GEP->getModule()->getDataLayout();
676  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
677  gep_type_iterator GTI = gep_type_begin(*GEP);
678  for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
679       I != E; ++I, ++GTI) {
680    // Skip struct member indices which must be i32.
681    if (isa<SequentialType>(*GTI)) {
682      if ((*I)->getType() != IntPtrTy) {
683        *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
684        Changed = true;
685      }
686    }
687  }
688  return Changed;
689}
690
691int64_t
692SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
693                                                 bool &NeedsExtraction) {
694  NeedsExtraction = false;
695  int64_t AccumulativeByteOffset = 0;
696  gep_type_iterator GTI = gep_type_begin(*GEP);
697  const DataLayout &DL = GEP->getModule()->getDataLayout();
698  for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
699    if (isa<SequentialType>(*GTI)) {
700      // Tries to extract a constant offset from this GEP index.
701      int64_t ConstantOffset =
702          ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP);
703      if (ConstantOffset != 0) {
704        NeedsExtraction = true;
705        // A GEP may have multiple indices.  We accumulate the extracted
706        // constant offset to a byte offset, and later offset the remainder of
707        // the original GEP with this byte offset.
708        AccumulativeByteOffset +=
709            ConstantOffset * DL.getTypeAllocSize(GTI.getIndexedType());
710      }
711    } else if (LowerGEP) {
712      StructType *StTy = cast<StructType>(*GTI);
713      uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
714      // Skip field 0 as the offset is always 0.
715      if (Field != 0) {
716        NeedsExtraction = true;
717        AccumulativeByteOffset +=
718            DL.getStructLayout(StTy)->getElementOffset(Field);
719      }
720    }
721  }
722  return AccumulativeByteOffset;
723}
724
725void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
726    GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset) {
727  IRBuilder<> Builder(Variadic);
728  const DataLayout &DL = Variadic->getModule()->getDataLayout();
729  Type *IntPtrTy = DL.getIntPtrType(Variadic->getType());
730
731  Type *I8PtrTy =
732      Builder.getInt8PtrTy(Variadic->getType()->getPointerAddressSpace());
733  Value *ResultPtr = Variadic->getOperand(0);
734  if (ResultPtr->getType() != I8PtrTy)
735    ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
736
737  gep_type_iterator GTI = gep_type_begin(*Variadic);
738  // Create an ugly GEP for each sequential index. We don't create GEPs for
739  // structure indices, as they are accumulated in the constant offset index.
740  for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
741    if (isa<SequentialType>(*GTI)) {
742      Value *Idx = Variadic->getOperand(I);
743      // Skip zero indices.
744      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
745        if (CI->isZero())
746          continue;
747
748      APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
749                                DL.getTypeAllocSize(GTI.getIndexedType()));
750      // Scale the index by element size.
751      if (ElementSize != 1) {
752        if (ElementSize.isPowerOf2()) {
753          Idx = Builder.CreateShl(
754              Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
755        } else {
756          Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
757        }
758      }
759      // Create an ugly GEP with a single index for each index.
760      ResultPtr = Builder.CreateGEP(ResultPtr, Idx, "uglygep");
761    }
762  }
763
764  // Create a GEP with the constant offset index.
765  if (AccumulativeByteOffset != 0) {
766    Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset);
767    ResultPtr = Builder.CreateGEP(ResultPtr, Offset, "uglygep");
768  }
769  if (ResultPtr->getType() != Variadic->getType())
770    ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType());
771
772  Variadic->replaceAllUsesWith(ResultPtr);
773  Variadic->eraseFromParent();
774}
775
776void
777SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
778                                               int64_t AccumulativeByteOffset) {
779  IRBuilder<> Builder(Variadic);
780  const DataLayout &DL = Variadic->getModule()->getDataLayout();
781  Type *IntPtrTy = DL.getIntPtrType(Variadic->getType());
782
783  Value *ResultPtr = Builder.CreatePtrToInt(Variadic->getOperand(0), IntPtrTy);
784  gep_type_iterator GTI = gep_type_begin(*Variadic);
785  // Create ADD/SHL/MUL arithmetic operations for each sequential indices. We
786  // don't create arithmetics for structure indices, as they are accumulated
787  // in the constant offset index.
788  for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
789    if (isa<SequentialType>(*GTI)) {
790      Value *Idx = Variadic->getOperand(I);
791      // Skip zero indices.
792      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
793        if (CI->isZero())
794          continue;
795
796      APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
797                                DL.getTypeAllocSize(GTI.getIndexedType()));
798      // Scale the index by element size.
799      if (ElementSize != 1) {
800        if (ElementSize.isPowerOf2()) {
801          Idx = Builder.CreateShl(
802              Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
803        } else {
804          Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
805        }
806      }
807      // Create an ADD for each index.
808      ResultPtr = Builder.CreateAdd(ResultPtr, Idx);
809    }
810  }
811
812  // Create an ADD for the constant offset index.
813  if (AccumulativeByteOffset != 0) {
814    ResultPtr = Builder.CreateAdd(
815        ResultPtr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset));
816  }
817
818  ResultPtr = Builder.CreateIntToPtr(ResultPtr, Variadic->getType());
819  Variadic->replaceAllUsesWith(ResultPtr);
820  Variadic->eraseFromParent();
821}
822
823bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
824  // Skip vector GEPs.
825  if (GEP->getType()->isVectorTy())
826    return false;
827
828  // The backend can already nicely handle the case where all indices are
829  // constant.
830  if (GEP->hasAllConstantIndices())
831    return false;
832
833  bool Changed = canonicalizeArrayIndicesToPointerSize(GEP);
834
835  bool NeedsExtraction;
836  int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
837
838  if (!NeedsExtraction)
839    return Changed;
840  // If LowerGEP is disabled, before really splitting the GEP, check whether the
841  // backend supports the addressing mode we are about to produce. If no, this
842  // splitting probably won't be beneficial.
843  // If LowerGEP is enabled, even the extracted constant offset can not match
844  // the addressing mode, we can still do optimizations to other lowered parts
845  // of variable indices. Therefore, we don't check for addressing modes in that
846  // case.
847  if (!LowerGEP) {
848    TargetTransformInfo &TTI =
849        getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
850            *GEP->getParent()->getParent());
851    if (!TTI.isLegalAddressingMode(GEP->getType()->getElementType(),
852                                   /*BaseGV=*/nullptr, AccumulativeByteOffset,
853                                   /*HasBaseReg=*/true, /*Scale=*/0)) {
854      return Changed;
855    }
856  }
857
858  // Remove the constant offset in each sequential index. The resultant GEP
859  // computes the variadic base.
860  // Notice that we don't remove struct field indices here. If LowerGEP is
861  // disabled, a structure index is not accumulated and we still use the old
862  // one. If LowerGEP is enabled, a structure index is accumulated in the
863  // constant offset. LowerToSingleIndexGEPs or lowerToArithmetics will later
864  // handle the constant offset and won't need a new structure index.
865  gep_type_iterator GTI = gep_type_begin(*GEP);
866  for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
867    if (isa<SequentialType>(*GTI)) {
868      // Splits this GEP index into a variadic part and a constant offset, and
869      // uses the variadic part as the new index.
870      Value *NewIdx = ConstantOffsetExtractor::Extract(GEP->getOperand(I), GEP);
871      if (NewIdx != nullptr) {
872        GEP->setOperand(I, NewIdx);
873      }
874    }
875  }
876
877  // Clear the inbounds attribute because the new index may be off-bound.
878  // e.g.,
879  //
880  // b = add i64 a, 5
881  // addr = gep inbounds float* p, i64 b
882  //
883  // is transformed to:
884  //
885  // addr2 = gep float* p, i64 a
886  // addr = gep float* addr2, i64 5
887  //
888  // If a is -4, although the old index b is in bounds, the new index a is
889  // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
890  // inbounds keyword is not present, the offsets are added to the base
891  // address with silently-wrapping two's complement arithmetic".
892  // Therefore, the final code will be a semantically equivalent.
893  //
894  // TODO(jingyue): do some range analysis to keep as many inbounds as
895  // possible. GEPs with inbounds are more friendly to alias analysis.
896  GEP->setIsInBounds(false);
897
898  // Lowers a GEP to either GEPs with a single index or arithmetic operations.
899  if (LowerGEP) {
900    // As currently BasicAA does not analyze ptrtoint/inttoptr, do not lower to
901    // arithmetic operations if the target uses alias analysis in codegen.
902    if (TM && TM->getSubtargetImpl(*GEP->getParent()->getParent())->useAA())
903      lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
904    else
905      lowerToArithmetics(GEP, AccumulativeByteOffset);
906    return true;
907  }
908
909  // No need to create another GEP if the accumulative byte offset is 0.
910  if (AccumulativeByteOffset == 0)
911    return true;
912
913  // Offsets the base with the accumulative byte offset.
914  //
915  //   %gep                        ; the base
916  //   ... %gep ...
917  //
918  // => add the offset
919  //
920  //   %gep2                       ; clone of %gep
921  //   %new.gep = gep %gep2, <offset / sizeof(*%gep)>
922  //   %gep                        ; will be removed
923  //   ... %gep ...
924  //
925  // => replace all uses of %gep with %new.gep and remove %gep
926  //
927  //   %gep2                       ; clone of %gep
928  //   %new.gep = gep %gep2, <offset / sizeof(*%gep)>
929  //   ... %new.gep ...
930  //
931  // If AccumulativeByteOffset is not a multiple of sizeof(*%gep), we emit an
932  // uglygep (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep):
933  // bitcast %gep2 to i8*, add the offset, and bitcast the result back to the
934  // type of %gep.
935  //
936  //   %gep2                       ; clone of %gep
937  //   %0       = bitcast %gep2 to i8*
938  //   %uglygep = gep %0, <offset>
939  //   %new.gep = bitcast %uglygep to <type of %gep>
940  //   ... %new.gep ...
941  Instruction *NewGEP = GEP->clone();
942  NewGEP->insertBefore(GEP);
943
944  // Per ANSI C standard, signed / unsigned = unsigned and signed % unsigned =
945  // unsigned.. Therefore, we cast ElementTypeSizeOfGEP to signed because it is
946  // used with unsigned integers later.
947  const DataLayout &DL = GEP->getModule()->getDataLayout();
948  int64_t ElementTypeSizeOfGEP = static_cast<int64_t>(
949      DL.getTypeAllocSize(GEP->getType()->getElementType()));
950  Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
951  if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
952    // Very likely. As long as %gep is natually aligned, the byte offset we
953    // extracted should be a multiple of sizeof(*%gep).
954    int64_t Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
955    NewGEP = GetElementPtrInst::Create(GEP->getResultElementType(), NewGEP,
956                                       ConstantInt::get(IntPtrTy, Index, true),
957                                       GEP->getName(), GEP);
958  } else {
959    // Unlikely but possible. For example,
960    // #pragma pack(1)
961    // struct S {
962    //   int a[3];
963    //   int64 b[8];
964    // };
965    // #pragma pack()
966    //
967    // Suppose the gep before extraction is &s[i + 1].b[j + 3]. After
968    // extraction, it becomes &s[i].b[j] and AccumulativeByteOffset is
969    // sizeof(S) + 3 * sizeof(int64) = 100, which is not a multiple of
970    // sizeof(int64).
971    //
972    // Emit an uglygep in this case.
973    Type *I8PtrTy = Type::getInt8PtrTy(GEP->getContext(),
974                                       GEP->getPointerAddressSpace());
975    NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP);
976    NewGEP = GetElementPtrInst::Create(
977        Type::getInt8Ty(GEP->getContext()), NewGEP,
978        ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), "uglygep",
979        GEP);
980    if (GEP->getType() != I8PtrTy)
981      NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP);
982  }
983
984  GEP->replaceAllUsesWith(NewGEP);
985  GEP->eraseFromParent();
986
987  return true;
988}
989
990bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
991  if (skipOptnoneFunction(F))
992    return false;
993
994  if (DisableSeparateConstOffsetFromGEP)
995    return false;
996
997  bool Changed = false;
998  for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) {
999    for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ) {
1000      if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++)) {
1001        Changed |= splitGEP(GEP);
1002      }
1003      // No need to split GEP ConstantExprs because all its indices are constant
1004      // already.
1005    }
1006  }
1007  return Changed;
1008}
1009