bounds_check_elimination.cc revision 19b5021a49285627485675ef31276b8194269600
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "bounds_check_elimination.h"
18
19#include <limits>
20
21#include "base/arena_containers.h"
22#include "induction_var_range.h"
23#include "side_effects_analysis.h"
24#include "nodes.h"
25
26namespace art {
27
28class MonotonicValueRange;
29
30/**
31 * A value bound is represented as a pair of value and constant,
32 * e.g. array.length - 1.
33 */
34class ValueBound : public ValueObject {
35 public:
36  ValueBound(HInstruction* instruction, int32_t constant) {
37    if (instruction != nullptr && instruction->IsIntConstant()) {
38      // Normalize ValueBound with constant instruction.
39      int32_t instr_const = instruction->AsIntConstant()->GetValue();
40      if (!WouldAddOverflowOrUnderflow(instr_const, constant)) {
41        instruction_ = nullptr;
42        constant_ = instr_const + constant;
43        return;
44      }
45    }
46    instruction_ = instruction;
47    constant_ = constant;
48  }
49
50  // Return whether (left + right) overflows or underflows.
51  static bool WouldAddOverflowOrUnderflow(int32_t left, int32_t right) {
52    if (right == 0) {
53      return false;
54    }
55    if ((right > 0) && (left <= (std::numeric_limits<int32_t>::max() - right))) {
56      // No overflow.
57      return false;
58    }
59    if ((right < 0) && (left >= (std::numeric_limits<int32_t>::min() - right))) {
60      // No underflow.
61      return false;
62    }
63    return true;
64  }
65
66  // Return true if instruction can be expressed as "left_instruction + right_constant".
67  static bool IsAddOrSubAConstant(HInstruction* instruction,
68                                  /* out */ HInstruction** left_instruction,
69                                  /* out */ int32_t* right_constant) {
70    HInstruction* left_so_far = nullptr;
71    int32_t right_so_far = 0;
72    while (instruction->IsAdd() || instruction->IsSub()) {
73      HBinaryOperation* bin_op = instruction->AsBinaryOperation();
74      HInstruction* left = bin_op->GetLeft();
75      HInstruction* right = bin_op->GetRight();
76      if (right->IsIntConstant()) {
77        int32_t v = right->AsIntConstant()->GetValue();
78        int32_t c = instruction->IsAdd() ? v : -v;
79        if (!WouldAddOverflowOrUnderflow(right_so_far, c)) {
80          instruction = left;
81          left_so_far = left;
82          right_so_far += c;
83          continue;
84        }
85      }
86      break;
87    }
88    // Return result: either false and "null+0" or true and "instr+constant".
89    *left_instruction = left_so_far;
90    *right_constant = right_so_far;
91    return left_so_far != nullptr;
92  }
93
94  // Expresses any instruction as a value bound.
95  static ValueBound AsValueBound(HInstruction* instruction) {
96    if (instruction->IsIntConstant()) {
97      return ValueBound(nullptr, instruction->AsIntConstant()->GetValue());
98    }
99    HInstruction *left;
100    int32_t right;
101    if (IsAddOrSubAConstant(instruction, &left, &right)) {
102      return ValueBound(left, right);
103    }
104    return ValueBound(instruction, 0);
105  }
106
107  // Try to detect useful value bound format from an instruction, e.g.
108  // a constant or array length related value.
109  static ValueBound DetectValueBoundFromValue(HInstruction* instruction, /* out */ bool* found) {
110    DCHECK(instruction != nullptr);
111    if (instruction->IsIntConstant()) {
112      *found = true;
113      return ValueBound(nullptr, instruction->AsIntConstant()->GetValue());
114    }
115
116    if (instruction->IsArrayLength()) {
117      *found = true;
118      return ValueBound(instruction, 0);
119    }
120    // Try to detect (array.length + c) format.
121    HInstruction *left;
122    int32_t right;
123    if (IsAddOrSubAConstant(instruction, &left, &right)) {
124      if (left->IsArrayLength()) {
125        *found = true;
126        return ValueBound(left, right);
127      }
128    }
129
130    // No useful bound detected.
131    *found = false;
132    return ValueBound::Max();
133  }
134
135  HInstruction* GetInstruction() const { return instruction_; }
136  int32_t GetConstant() const { return constant_; }
137
138  bool IsRelatedToArrayLength() const {
139    // Some bounds are created with HNewArray* as the instruction instead
140    // of HArrayLength*. They are treated the same.
141    return (instruction_ != nullptr) &&
142           (instruction_->IsArrayLength() || instruction_->IsNewArray());
143  }
144
145  bool IsConstant() const {
146    return instruction_ == nullptr;
147  }
148
149  static ValueBound Min() { return ValueBound(nullptr, std::numeric_limits<int32_t>::min()); }
150  static ValueBound Max() { return ValueBound(nullptr, std::numeric_limits<int32_t>::max()); }
151
152  bool Equals(ValueBound bound) const {
153    return instruction_ == bound.instruction_ && constant_ == bound.constant_;
154  }
155
156  /*
157   * Hunt "under the hood" of array lengths (leading to array references),
158   * null checks (also leading to array references), and new arrays
159   * (leading to the actual length). This makes it more likely related
160   * instructions become actually comparable.
161   */
162  static HInstruction* HuntForDeclaration(HInstruction* instruction) {
163    while (instruction->IsArrayLength() ||
164           instruction->IsNullCheck() ||
165           instruction->IsNewArray()) {
166      instruction = instruction->InputAt(0);
167    }
168    return instruction;
169  }
170
171  static bool Equal(HInstruction* instruction1, HInstruction* instruction2) {
172    if (instruction1 == instruction2) {
173      return true;
174    }
175    if (instruction1 == nullptr || instruction2 == nullptr) {
176      return false;
177    }
178    instruction1 = HuntForDeclaration(instruction1);
179    instruction2 = HuntForDeclaration(instruction2);
180    return instruction1 == instruction2;
181  }
182
183  // Returns if it's certain this->bound >= `bound`.
184  bool GreaterThanOrEqualTo(ValueBound bound) const {
185    if (Equal(instruction_, bound.instruction_)) {
186      return constant_ >= bound.constant_;
187    }
188    // Not comparable. Just return false.
189    return false;
190  }
191
192  // Returns if it's certain this->bound <= `bound`.
193  bool LessThanOrEqualTo(ValueBound bound) const {
194    if (Equal(instruction_, bound.instruction_)) {
195      return constant_ <= bound.constant_;
196    }
197    // Not comparable. Just return false.
198    return false;
199  }
200
201  // Returns if it's certain this->bound > `bound`.
202  bool GreaterThan(ValueBound bound) const {
203    if (Equal(instruction_, bound.instruction_)) {
204      return constant_ > bound.constant_;
205    }
206    // Not comparable. Just return false.
207    return false;
208  }
209
210  // Returns if it's certain this->bound < `bound`.
211  bool LessThan(ValueBound bound) const {
212    if (Equal(instruction_, bound.instruction_)) {
213      return constant_ < bound.constant_;
214    }
215    // Not comparable. Just return false.
216    return false;
217  }
218
219  // Try to narrow lower bound. Returns the greatest of the two if possible.
220  // Pick one if they are not comparable.
221  static ValueBound NarrowLowerBound(ValueBound bound1, ValueBound bound2) {
222    if (bound1.GreaterThanOrEqualTo(bound2)) {
223      return bound1;
224    }
225    if (bound2.GreaterThanOrEqualTo(bound1)) {
226      return bound2;
227    }
228
229    // Not comparable. Just pick one. We may lose some info, but that's ok.
230    // Favor constant as lower bound.
231    return bound1.IsConstant() ? bound1 : bound2;
232  }
233
234  // Try to narrow upper bound. Returns the lowest of the two if possible.
235  // Pick one if they are not comparable.
236  static ValueBound NarrowUpperBound(ValueBound bound1, ValueBound bound2) {
237    if (bound1.LessThanOrEqualTo(bound2)) {
238      return bound1;
239    }
240    if (bound2.LessThanOrEqualTo(bound1)) {
241      return bound2;
242    }
243
244    // Not comparable. Just pick one. We may lose some info, but that's ok.
245    // Favor array length as upper bound.
246    return bound1.IsRelatedToArrayLength() ? bound1 : bound2;
247  }
248
249  // Add a constant to a ValueBound.
250  // `overflow` or `underflow` will return whether the resulting bound may
251  // overflow or underflow an int.
252  ValueBound Add(int32_t c, /* out */ bool* overflow, /* out */ bool* underflow) const {
253    *overflow = *underflow = false;
254    if (c == 0) {
255      return *this;
256    }
257
258    int32_t new_constant;
259    if (c > 0) {
260      if (constant_ > (std::numeric_limits<int32_t>::max() - c)) {
261        *overflow = true;
262        return Max();
263      }
264
265      new_constant = constant_ + c;
266      // (array.length + non-positive-constant) won't overflow an int.
267      if (IsConstant() || (IsRelatedToArrayLength() && new_constant <= 0)) {
268        return ValueBound(instruction_, new_constant);
269      }
270      // Be conservative.
271      *overflow = true;
272      return Max();
273    } else {
274      if (constant_ < (std::numeric_limits<int32_t>::min() - c)) {
275        *underflow = true;
276        return Min();
277      }
278
279      new_constant = constant_ + c;
280      // Regardless of the value new_constant, (array.length+new_constant) will
281      // never underflow since array.length is no less than 0.
282      if (IsConstant() || IsRelatedToArrayLength()) {
283        return ValueBound(instruction_, new_constant);
284      }
285      // Be conservative.
286      *underflow = true;
287      return Min();
288    }
289  }
290
291 private:
292  HInstruction* instruction_;
293  int32_t constant_;
294};
295
296/**
297 * Represent a range of lower bound and upper bound, both being inclusive.
298 * Currently a ValueRange may be generated as a result of the following:
299 * comparisons related to array bounds, array bounds check, add/sub on top
300 * of an existing value range, NewArray or a loop phi corresponding to an
301 * incrementing/decrementing array index (MonotonicValueRange).
302 */
303class ValueRange : public ArenaObject<kArenaAllocBoundsCheckElimination> {
304 public:
305  ValueRange(ArenaAllocator* allocator, ValueBound lower, ValueBound upper)
306      : allocator_(allocator), lower_(lower), upper_(upper) {}
307
308  virtual ~ValueRange() {}
309
310  virtual MonotonicValueRange* AsMonotonicValueRange() { return nullptr; }
311  bool IsMonotonicValueRange() {
312    return AsMonotonicValueRange() != nullptr;
313  }
314
315  ArenaAllocator* GetAllocator() const { return allocator_; }
316  ValueBound GetLower() const { return lower_; }
317  ValueBound GetUpper() const { return upper_; }
318
319  bool IsConstantValueRange() { return lower_.IsConstant() && upper_.IsConstant(); }
320
321  // If it's certain that this value range fits in other_range.
322  virtual bool FitsIn(ValueRange* other_range) const {
323    if (other_range == nullptr) {
324      return true;
325    }
326    DCHECK(!other_range->IsMonotonicValueRange());
327    return lower_.GreaterThanOrEqualTo(other_range->lower_) &&
328           upper_.LessThanOrEqualTo(other_range->upper_);
329  }
330
331  // Returns the intersection of this and range.
332  // If it's not possible to do intersection because some
333  // bounds are not comparable, it's ok to pick either bound.
334  virtual ValueRange* Narrow(ValueRange* range) {
335    if (range == nullptr) {
336      return this;
337    }
338
339    if (range->IsMonotonicValueRange()) {
340      return this;
341    }
342
343    return new (allocator_) ValueRange(
344        allocator_,
345        ValueBound::NarrowLowerBound(lower_, range->lower_),
346        ValueBound::NarrowUpperBound(upper_, range->upper_));
347  }
348
349  // Shift a range by a constant.
350  ValueRange* Add(int32_t constant) const {
351    bool overflow, underflow;
352    ValueBound lower = lower_.Add(constant, &overflow, &underflow);
353    if (underflow) {
354      // Lower bound underflow will wrap around to positive values
355      // and invalidate the upper bound.
356      return nullptr;
357    }
358    ValueBound upper = upper_.Add(constant, &overflow, &underflow);
359    if (overflow) {
360      // Upper bound overflow will wrap around to negative values
361      // and invalidate the lower bound.
362      return nullptr;
363    }
364    return new (allocator_) ValueRange(allocator_, lower, upper);
365  }
366
367 private:
368  ArenaAllocator* const allocator_;
369  const ValueBound lower_;  // inclusive
370  const ValueBound upper_;  // inclusive
371
372  DISALLOW_COPY_AND_ASSIGN(ValueRange);
373};
374
375/**
376 * A monotonically incrementing/decrementing value range, e.g.
377 * the variable i in "for (int i=0; i<array.length; i++)".
378 * Special care needs to be taken to account for overflow/underflow
379 * of such value ranges.
380 */
381class MonotonicValueRange : public ValueRange {
382 public:
383  MonotonicValueRange(ArenaAllocator* allocator,
384                      HPhi* induction_variable,
385                      HInstruction* initial,
386                      int32_t increment,
387                      ValueBound bound)
388      // To be conservative, give it full range [Min(), Max()] in case it's
389      // used as a regular value range, due to possible overflow/underflow.
390      : ValueRange(allocator, ValueBound::Min(), ValueBound::Max()),
391        induction_variable_(induction_variable),
392        initial_(initial),
393        increment_(increment),
394        bound_(bound) {}
395
396  virtual ~MonotonicValueRange() {}
397
398  int32_t GetIncrement() const { return increment_; }
399  ValueBound GetBound() const { return bound_; }
400  HBasicBlock* GetLoopHeader() const {
401    DCHECK(induction_variable_->GetBlock()->IsLoopHeader());
402    return induction_variable_->GetBlock();
403  }
404
405  MonotonicValueRange* AsMonotonicValueRange() OVERRIDE { return this; }
406
407  // If it's certain that this value range fits in other_range.
408  bool FitsIn(ValueRange* other_range) const OVERRIDE {
409    if (other_range == nullptr) {
410      return true;
411    }
412    DCHECK(!other_range->IsMonotonicValueRange());
413    return false;
414  }
415
416  // Try to narrow this MonotonicValueRange given another range.
417  // Ideally it will return a normal ValueRange. But due to
418  // possible overflow/underflow, that may not be possible.
419  ValueRange* Narrow(ValueRange* range) OVERRIDE {
420    if (range == nullptr) {
421      return this;
422    }
423    DCHECK(!range->IsMonotonicValueRange());
424
425    if (increment_ > 0) {
426      // Monotonically increasing.
427      ValueBound lower = ValueBound::NarrowLowerBound(bound_, range->GetLower());
428      if (!lower.IsConstant() || lower.GetConstant() == std::numeric_limits<int32_t>::min()) {
429        // Lower bound isn't useful. Leave it to deoptimization.
430        return this;
431      }
432
433      // We currently conservatively assume max array length is Max().
434      // If we can make assumptions about the max array length, e.g. due to the max heap size,
435      // divided by the element size (such as 4 bytes for each integer array), we can
436      // lower this number and rule out some possible overflows.
437      int32_t max_array_len = std::numeric_limits<int32_t>::max();
438
439      // max possible integer value of range's upper value.
440      int32_t upper = std::numeric_limits<int32_t>::max();
441      // Try to lower upper.
442      ValueBound upper_bound = range->GetUpper();
443      if (upper_bound.IsConstant()) {
444        upper = upper_bound.GetConstant();
445      } else if (upper_bound.IsRelatedToArrayLength() && upper_bound.GetConstant() <= 0) {
446        // Normal case. e.g. <= array.length - 1.
447        upper = max_array_len + upper_bound.GetConstant();
448      }
449
450      // If we can prove for the last number in sequence of initial_,
451      // initial_ + increment_, initial_ + 2 x increment_, ...
452      // that's <= upper, (last_num_in_sequence + increment_) doesn't trigger overflow,
453      // then this MonoticValueRange is narrowed to a normal value range.
454
455      // Be conservative first, assume last number in the sequence hits upper.
456      int32_t last_num_in_sequence = upper;
457      if (initial_->IsIntConstant()) {
458        int32_t initial_constant = initial_->AsIntConstant()->GetValue();
459        if (upper <= initial_constant) {
460          last_num_in_sequence = upper;
461        } else {
462          // Cast to int64_t for the substraction part to avoid int32_t overflow.
463          last_num_in_sequence = initial_constant +
464              ((int64_t)upper - (int64_t)initial_constant) / increment_ * increment_;
465        }
466      }
467      if (last_num_in_sequence <= (std::numeric_limits<int32_t>::max() - increment_)) {
468        // No overflow. The sequence will be stopped by the upper bound test as expected.
469        return new (GetAllocator()) ValueRange(GetAllocator(), lower, range->GetUpper());
470      }
471
472      // There might be overflow. Give up narrowing.
473      return this;
474    } else {
475      DCHECK_NE(increment_, 0);
476      // Monotonically decreasing.
477      ValueBound upper = ValueBound::NarrowUpperBound(bound_, range->GetUpper());
478      if ((!upper.IsConstant() || upper.GetConstant() == std::numeric_limits<int32_t>::max()) &&
479          !upper.IsRelatedToArrayLength()) {
480        // Upper bound isn't useful. Leave it to deoptimization.
481        return this;
482      }
483
484      // Need to take care of underflow. Try to prove underflow won't happen
485      // for common cases.
486      if (range->GetLower().IsConstant()) {
487        int32_t constant = range->GetLower().GetConstant();
488        if (constant >= (std::numeric_limits<int32_t>::min() - increment_)) {
489          return new (GetAllocator()) ValueRange(GetAllocator(), range->GetLower(), upper);
490        }
491      }
492
493      // For non-constant lower bound, just assume might be underflow. Give up narrowing.
494      return this;
495    }
496  }
497
498 private:
499  HPhi* const induction_variable_;  // Induction variable for this monotonic value range.
500  HInstruction* const initial_;     // Initial value.
501  const int32_t increment_;         // Increment for each loop iteration.
502  const ValueBound bound_;          // Additional value bound info for initial_.
503
504  DISALLOW_COPY_AND_ASSIGN(MonotonicValueRange);
505};
506
507class BCEVisitor : public HGraphVisitor {
508 public:
509  // The least number of bounds checks that should be eliminated by triggering
510  // the deoptimization technique.
511  static constexpr size_t kThresholdForAddingDeoptimize = 2;
512
513  // Very large lengths are considered an anomaly. This is a threshold beyond which we don't
514  // bother to apply the deoptimization technique since it's likely, or sometimes certain,
515  // an AIOOBE will be thrown.
516  static constexpr uint32_t kMaxLengthForAddingDeoptimize =
517      std::numeric_limits<int32_t>::max() - 1024 * 1024;
518
519  // Added blocks for loop body entry test.
520  bool IsAddedBlock(HBasicBlock* block) const {
521    return block->GetBlockId() >= initial_block_size_;
522  }
523
524  BCEVisitor(HGraph* graph,
525             const SideEffectsAnalysis& side_effects,
526             HInductionVarAnalysis* induction_analysis)
527      : HGraphVisitor(graph),
528        maps_(graph->GetBlocks().size(),
529              ArenaSafeMap<int, ValueRange*>(
530                  std::less<int>(),
531                  graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
532              graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
533        first_index_bounds_check_map_(
534            std::less<int>(),
535            graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
536        dynamic_bce_standby_(
537            graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
538        record_dynamic_bce_standby_(true),
539        early_exit_loop_(
540            std::less<uint32_t>(),
541            graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
542        taken_test_loop_(
543            std::less<uint32_t>(),
544            graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
545        finite_loop_(graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
546        has_dom_based_dynamic_bce_(false),
547        initial_block_size_(graph->GetBlocks().size()),
548        side_effects_(side_effects),
549        induction_range_(induction_analysis) {}
550
551  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
552    DCHECK(!IsAddedBlock(block));
553    first_index_bounds_check_map_.clear();
554    HGraphVisitor::VisitBasicBlock(block);
555    // We should never deoptimize from an osr method, otherwise we might wrongly optimize
556    // code dominated by the deoptimization.
557    if (!GetGraph()->IsCompilingOsr()) {
558      AddComparesWithDeoptimization(block);
559    }
560  }
561
562  void Finish() {
563    // Retry dynamic bce candidates on standby that are still in the graph.
564    record_dynamic_bce_standby_ = false;
565    for (HBoundsCheck* bounds_check : dynamic_bce_standby_) {
566      if (bounds_check->IsInBlock()) {
567        TryDynamicBCE(bounds_check);
568      }
569    }
570
571    // Preserve SSA structure which may have been broken by adding one or more
572    // new taken-test structures (see TransformLoopForDeoptimizationIfNeeded()).
573    InsertPhiNodes();
574
575    // Clear the loop data structures.
576    early_exit_loop_.clear();
577    taken_test_loop_.clear();
578    finite_loop_.clear();
579    dynamic_bce_standby_.clear();
580  }
581
582 private:
583  // Return the map of proven value ranges at the beginning of a basic block.
584  ArenaSafeMap<int, ValueRange*>* GetValueRangeMap(HBasicBlock* basic_block) {
585    if (IsAddedBlock(basic_block)) {
586      // Added blocks don't keep value ranges.
587      return nullptr;
588    }
589    return &maps_[basic_block->GetBlockId()];
590  }
591
592  // Traverse up the dominator tree to look for value range info.
593  ValueRange* LookupValueRange(HInstruction* instruction, HBasicBlock* basic_block) {
594    while (basic_block != nullptr) {
595      ArenaSafeMap<int, ValueRange*>* map = GetValueRangeMap(basic_block);
596      if (map != nullptr) {
597        if (map->find(instruction->GetId()) != map->end()) {
598          return map->Get(instruction->GetId());
599        }
600      } else {
601        DCHECK(IsAddedBlock(basic_block));
602      }
603      basic_block = basic_block->GetDominator();
604    }
605    // Didn't find any.
606    return nullptr;
607  }
608
609  // Helper method to assign a new range to an instruction in given basic block.
610  void AssignRange(HBasicBlock* basic_block, HInstruction* instruction, ValueRange* range) {
611    GetValueRangeMap(basic_block)->Overwrite(instruction->GetId(), range);
612  }
613
614  // Narrow the value range of `instruction` at the end of `basic_block` with `range`,
615  // and push the narrowed value range to `successor`.
616  void ApplyRangeFromComparison(HInstruction* instruction, HBasicBlock* basic_block,
617                                HBasicBlock* successor, ValueRange* range) {
618    ValueRange* existing_range = LookupValueRange(instruction, basic_block);
619    if (existing_range == nullptr) {
620      if (range != nullptr) {
621        AssignRange(successor, instruction, range);
622      }
623      return;
624    }
625    if (existing_range->IsMonotonicValueRange()) {
626      DCHECK(instruction->IsLoopHeaderPhi());
627      // Make sure the comparison is in the loop header so each increment is
628      // checked with a comparison.
629      if (instruction->GetBlock() != basic_block) {
630        return;
631      }
632    }
633    AssignRange(successor, instruction, existing_range->Narrow(range));
634  }
635
636  // Special case that we may simultaneously narrow two MonotonicValueRange's to
637  // regular value ranges.
638  void HandleIfBetweenTwoMonotonicValueRanges(HIf* instruction,
639                                              HInstruction* left,
640                                              HInstruction* right,
641                                              IfCondition cond,
642                                              MonotonicValueRange* left_range,
643                                              MonotonicValueRange* right_range) {
644    DCHECK(left->IsLoopHeaderPhi());
645    DCHECK(right->IsLoopHeaderPhi());
646    if (instruction->GetBlock() != left->GetBlock()) {
647      // Comparison needs to be in loop header to make sure it's done after each
648      // increment/decrement.
649      return;
650    }
651
652    // Handle common cases which also don't have overflow/underflow concerns.
653    if (left_range->GetIncrement() == 1 &&
654        left_range->GetBound().IsConstant() &&
655        right_range->GetIncrement() == -1 &&
656        right_range->GetBound().IsRelatedToArrayLength() &&
657        right_range->GetBound().GetConstant() < 0) {
658      HBasicBlock* successor = nullptr;
659      int32_t left_compensation = 0;
660      int32_t right_compensation = 0;
661      if (cond == kCondLT) {
662        left_compensation = -1;
663        right_compensation = 1;
664        successor = instruction->IfTrueSuccessor();
665      } else if (cond == kCondLE) {
666        successor = instruction->IfTrueSuccessor();
667      } else if (cond == kCondGT) {
668        successor = instruction->IfFalseSuccessor();
669      } else if (cond == kCondGE) {
670        left_compensation = -1;
671        right_compensation = 1;
672        successor = instruction->IfFalseSuccessor();
673      } else {
674        // We don't handle '=='/'!=' test in case left and right can cross and
675        // miss each other.
676        return;
677      }
678
679      if (successor != nullptr) {
680        bool overflow;
681        bool underflow;
682        ValueRange* new_left_range = new (GetGraph()->GetArena()) ValueRange(
683            GetGraph()->GetArena(),
684            left_range->GetBound(),
685            right_range->GetBound().Add(left_compensation, &overflow, &underflow));
686        if (!overflow && !underflow) {
687          ApplyRangeFromComparison(left, instruction->GetBlock(), successor,
688                                   new_left_range);
689        }
690
691        ValueRange* new_right_range = new (GetGraph()->GetArena()) ValueRange(
692            GetGraph()->GetArena(),
693            left_range->GetBound().Add(right_compensation, &overflow, &underflow),
694            right_range->GetBound());
695        if (!overflow && !underflow) {
696          ApplyRangeFromComparison(right, instruction->GetBlock(), successor,
697                                   new_right_range);
698        }
699      }
700    }
701  }
702
703  // Handle "if (left cmp_cond right)".
704  void HandleIf(HIf* instruction, HInstruction* left, HInstruction* right, IfCondition cond) {
705    HBasicBlock* block = instruction->GetBlock();
706
707    HBasicBlock* true_successor = instruction->IfTrueSuccessor();
708    // There should be no critical edge at this point.
709    DCHECK_EQ(true_successor->GetPredecessors().size(), 1u);
710
711    HBasicBlock* false_successor = instruction->IfFalseSuccessor();
712    // There should be no critical edge at this point.
713    DCHECK_EQ(false_successor->GetPredecessors().size(), 1u);
714
715    ValueRange* left_range = LookupValueRange(left, block);
716    MonotonicValueRange* left_monotonic_range = nullptr;
717    if (left_range != nullptr) {
718      left_monotonic_range = left_range->AsMonotonicValueRange();
719      if (left_monotonic_range != nullptr) {
720        HBasicBlock* loop_head = left_monotonic_range->GetLoopHeader();
721        if (instruction->GetBlock() != loop_head) {
722          // For monotonic value range, don't handle `instruction`
723          // if it's not defined in the loop header.
724          return;
725        }
726      }
727    }
728
729    bool found;
730    ValueBound bound = ValueBound::DetectValueBoundFromValue(right, &found);
731    // Each comparison can establish a lower bound and an upper bound
732    // for the left hand side.
733    ValueBound lower = bound;
734    ValueBound upper = bound;
735    if (!found) {
736      // No constant or array.length+c format bound found.
737      // For i<j, we can still use j's upper bound as i's upper bound. Same for lower.
738      ValueRange* right_range = LookupValueRange(right, block);
739      if (right_range != nullptr) {
740        if (right_range->IsMonotonicValueRange()) {
741          if (left_range != nullptr && left_range->IsMonotonicValueRange()) {
742            HandleIfBetweenTwoMonotonicValueRanges(instruction, left, right, cond,
743                                                   left_range->AsMonotonicValueRange(),
744                                                   right_range->AsMonotonicValueRange());
745            return;
746          }
747        }
748        lower = right_range->GetLower();
749        upper = right_range->GetUpper();
750      } else {
751        lower = ValueBound::Min();
752        upper = ValueBound::Max();
753      }
754    }
755
756    bool overflow, underflow;
757    if (cond == kCondLT || cond == kCondLE) {
758      if (!upper.Equals(ValueBound::Max())) {
759        int32_t compensation = (cond == kCondLT) ? -1 : 0;  // upper bound is inclusive
760        ValueBound new_upper = upper.Add(compensation, &overflow, &underflow);
761        if (overflow || underflow) {
762          return;
763        }
764        ValueRange* new_range = new (GetGraph()->GetArena())
765            ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
766        ApplyRangeFromComparison(left, block, true_successor, new_range);
767      }
768
769      // array.length as a lower bound isn't considered useful.
770      if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) {
771        int32_t compensation = (cond == kCondLE) ? 1 : 0;  // lower bound is inclusive
772        ValueBound new_lower = lower.Add(compensation, &overflow, &underflow);
773        if (overflow || underflow) {
774          return;
775        }
776        ValueRange* new_range = new (GetGraph()->GetArena())
777            ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
778        ApplyRangeFromComparison(left, block, false_successor, new_range);
779      }
780    } else if (cond == kCondGT || cond == kCondGE) {
781      // array.length as a lower bound isn't considered useful.
782      if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) {
783        int32_t compensation = (cond == kCondGT) ? 1 : 0;  // lower bound is inclusive
784        ValueBound new_lower = lower.Add(compensation, &overflow, &underflow);
785        if (overflow || underflow) {
786          return;
787        }
788        ValueRange* new_range = new (GetGraph()->GetArena())
789            ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
790        ApplyRangeFromComparison(left, block, true_successor, new_range);
791      }
792
793      if (!upper.Equals(ValueBound::Max())) {
794        int32_t compensation = (cond == kCondGE) ? -1 : 0;  // upper bound is inclusive
795        ValueBound new_upper = upper.Add(compensation, &overflow, &underflow);
796        if (overflow || underflow) {
797          return;
798        }
799        ValueRange* new_range = new (GetGraph()->GetArena())
800            ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
801        ApplyRangeFromComparison(left, block, false_successor, new_range);
802      }
803    }
804  }
805
806  void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
807    HBasicBlock* block = bounds_check->GetBlock();
808    HInstruction* index = bounds_check->InputAt(0);
809    HInstruction* array_length = bounds_check->InputAt(1);
810    DCHECK(array_length->IsIntConstant() ||
811           array_length->IsArrayLength() ||
812           array_length->IsPhi());
813    bool try_dynamic_bce = true;
814
815    // Analyze index range.
816    if (!index->IsIntConstant()) {
817      // Non-constant index.
818      ValueBound lower = ValueBound(nullptr, 0);        // constant 0
819      ValueBound upper = ValueBound(array_length, -1);  // array_length - 1
820      ValueRange array_range(GetGraph()->GetArena(), lower, upper);
821      // Try index range obtained by dominator-based analysis.
822      ValueRange* index_range = LookupValueRange(index, block);
823      if (index_range != nullptr && index_range->FitsIn(&array_range)) {
824        ReplaceInstruction(bounds_check, index);
825        return;
826      }
827      // Try index range obtained by induction variable analysis.
828      // Disables dynamic bce if OOB is certain.
829      if (InductionRangeFitsIn(&array_range, bounds_check, index, &try_dynamic_bce)) {
830        ReplaceInstruction(bounds_check, index);
831        return;
832      }
833    } else {
834      // Constant index.
835      int32_t constant = index->AsIntConstant()->GetValue();
836      if (constant < 0) {
837        // Will always throw exception.
838        return;
839      } else if (array_length->IsIntConstant()) {
840        if (constant < array_length->AsIntConstant()->GetValue()) {
841          ReplaceInstruction(bounds_check, index);
842        }
843        return;
844      }
845      // Analyze array length range.
846      DCHECK(array_length->IsArrayLength());
847      ValueRange* existing_range = LookupValueRange(array_length, block);
848      if (existing_range != nullptr) {
849        ValueBound lower = existing_range->GetLower();
850        DCHECK(lower.IsConstant());
851        if (constant < lower.GetConstant()) {
852          ReplaceInstruction(bounds_check, index);
853          return;
854        } else {
855          // Existing range isn't strong enough to eliminate the bounds check.
856          // Fall through to update the array_length range with info from this
857          // bounds check.
858        }
859      }
860      // Once we have an array access like 'array[5] = 1', we record array.length >= 6.
861      // We currently don't do it for non-constant index since a valid array[i] can't prove
862      // a valid array[i-1] yet due to the lower bound side.
863      if (constant == std::numeric_limits<int32_t>::max()) {
864        // Max() as an index will definitely throw AIOOBE.
865        return;
866      } else {
867        ValueBound lower = ValueBound(nullptr, constant + 1);
868        ValueBound upper = ValueBound::Max();
869        ValueRange* range = new (GetGraph()->GetArena())
870            ValueRange(GetGraph()->GetArena(), lower, upper);
871        AssignRange(block, array_length, range);
872      }
873    }
874
875    // If static analysis fails, and OOB is not certain, try dynamic elimination.
876    if (try_dynamic_bce) {
877      // Try loop-based dynamic elimination.
878      if (TryDynamicBCE(bounds_check)) {
879        return;
880      }
881      // Prepare dominator-based dynamic elimination.
882      if (first_index_bounds_check_map_.find(array_length->GetId()) ==
883          first_index_bounds_check_map_.end()) {
884        // Remember the first bounds check against each array_length. That bounds check
885        // instruction has an associated HEnvironment where we may add an HDeoptimize
886        // to eliminate subsequent bounds checks against the same array_length.
887        first_index_bounds_check_map_.Put(array_length->GetId(), bounds_check);
888      }
889    }
890  }
891
892  static bool HasSameInputAtBackEdges(HPhi* phi) {
893    DCHECK(phi->IsLoopHeaderPhi());
894    // Start with input 1. Input 0 is from the incoming block.
895    HInstruction* input1 = phi->InputAt(1);
896    DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
897        *phi->GetBlock()->GetPredecessors()[1]));
898    for (size_t i = 2, e = phi->InputCount(); i < e; ++i) {
899      DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
900          *phi->GetBlock()->GetPredecessors()[i]));
901      if (input1 != phi->InputAt(i)) {
902        return false;
903      }
904    }
905    return true;
906  }
907
908  void VisitPhi(HPhi* phi) OVERRIDE {
909    if (phi->IsLoopHeaderPhi()
910        && (phi->GetType() == Primitive::kPrimInt)
911        && HasSameInputAtBackEdges(phi)) {
912      HInstruction* instruction = phi->InputAt(1);
913      HInstruction *left;
914      int32_t increment;
915      if (ValueBound::IsAddOrSubAConstant(instruction, &left, &increment)) {
916        if (left == phi) {
917          HInstruction* initial_value = phi->InputAt(0);
918          ValueRange* range = nullptr;
919          if (increment == 0) {
920            // Add constant 0. It's really a fixed value.
921            range = new (GetGraph()->GetArena()) ValueRange(
922                GetGraph()->GetArena(),
923                ValueBound(initial_value, 0),
924                ValueBound(initial_value, 0));
925          } else {
926            // Monotonically increasing/decreasing.
927            bool found;
928            ValueBound bound = ValueBound::DetectValueBoundFromValue(
929                initial_value, &found);
930            if (!found) {
931              // No constant or array.length+c bound found.
932              // For i=j, we can still use j's upper bound as i's upper bound.
933              // Same for lower.
934              ValueRange* initial_range = LookupValueRange(initial_value, phi->GetBlock());
935              if (initial_range != nullptr) {
936                bound = increment > 0 ? initial_range->GetLower() :
937                                        initial_range->GetUpper();
938              } else {
939                bound = increment > 0 ? ValueBound::Min() : ValueBound::Max();
940              }
941            }
942            range = new (GetGraph()->GetArena()) MonotonicValueRange(
943                GetGraph()->GetArena(),
944                phi,
945                initial_value,
946                increment,
947                bound);
948          }
949          AssignRange(phi->GetBlock(), phi, range);
950        }
951      }
952    }
953  }
954
955  void VisitIf(HIf* instruction) OVERRIDE {
956    if (instruction->InputAt(0)->IsCondition()) {
957      HCondition* cond = instruction->InputAt(0)->AsCondition();
958      IfCondition cmp = cond->GetCondition();
959      if (cmp == kCondGT || cmp == kCondGE ||
960          cmp == kCondLT || cmp == kCondLE) {
961        HInstruction* left = cond->GetLeft();
962        HInstruction* right = cond->GetRight();
963        HandleIf(instruction, left, right, cmp);
964      }
965    }
966  }
967
968  void VisitAdd(HAdd* add) OVERRIDE {
969    HInstruction* right = add->GetRight();
970    if (right->IsIntConstant()) {
971      ValueRange* left_range = LookupValueRange(add->GetLeft(), add->GetBlock());
972      if (left_range == nullptr) {
973        return;
974      }
975      ValueRange* range = left_range->Add(right->AsIntConstant()->GetValue());
976      if (range != nullptr) {
977        AssignRange(add->GetBlock(), add, range);
978      }
979    }
980  }
981
982  void VisitSub(HSub* sub) OVERRIDE {
983    HInstruction* left = sub->GetLeft();
984    HInstruction* right = sub->GetRight();
985    if (right->IsIntConstant()) {
986      ValueRange* left_range = LookupValueRange(left, sub->GetBlock());
987      if (left_range == nullptr) {
988        return;
989      }
990      ValueRange* range = left_range->Add(-right->AsIntConstant()->GetValue());
991      if (range != nullptr) {
992        AssignRange(sub->GetBlock(), sub, range);
993        return;
994      }
995    }
996
997    // Here we are interested in the typical triangular case of nested loops,
998    // such as the inner loop 'for (int j=0; j<array.length-i; j++)' where i
999    // is the index for outer loop. In this case, we know j is bounded by array.length-1.
1000
1001    // Try to handle (array.length - i) or (array.length + c - i) format.
1002    HInstruction* left_of_left;  // left input of left.
1003    int32_t right_const = 0;
1004    if (ValueBound::IsAddOrSubAConstant(left, &left_of_left, &right_const)) {
1005      left = left_of_left;
1006    }
1007    // The value of left input of the sub equals (left + right_const).
1008
1009    if (left->IsArrayLength()) {
1010      HInstruction* array_length = left->AsArrayLength();
1011      ValueRange* right_range = LookupValueRange(right, sub->GetBlock());
1012      if (right_range != nullptr) {
1013        ValueBound lower = right_range->GetLower();
1014        ValueBound upper = right_range->GetUpper();
1015        if (lower.IsConstant() && upper.IsRelatedToArrayLength()) {
1016          HInstruction* upper_inst = upper.GetInstruction();
1017          // Make sure it's the same array.
1018          if (ValueBound::Equal(array_length, upper_inst)) {
1019            int32_t c0 = right_const;
1020            int32_t c1 = lower.GetConstant();
1021            int32_t c2 = upper.GetConstant();
1022            // (array.length + c0 - v) where v is in [c1, array.length + c2]
1023            // gets [c0 - c2, array.length + c0 - c1] as its value range.
1024            if (!ValueBound::WouldAddOverflowOrUnderflow(c0, -c2) &&
1025                !ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) {
1026              if ((c0 - c1) <= 0) {
1027                // array.length + (c0 - c1) won't overflow/underflow.
1028                ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
1029                    GetGraph()->GetArena(),
1030                    ValueBound(nullptr, right_const - upper.GetConstant()),
1031                    ValueBound(array_length, right_const - lower.GetConstant()));
1032                AssignRange(sub->GetBlock(), sub, range);
1033              }
1034            }
1035          }
1036        }
1037      }
1038    }
1039  }
1040
1041  void FindAndHandlePartialArrayLength(HBinaryOperation* instruction) {
1042    DCHECK(instruction->IsDiv() || instruction->IsShr() || instruction->IsUShr());
1043    HInstruction* right = instruction->GetRight();
1044    int32_t right_const;
1045    if (right->IsIntConstant()) {
1046      right_const = right->AsIntConstant()->GetValue();
1047      // Detect division by two or more.
1048      if ((instruction->IsDiv() && right_const <= 1) ||
1049          (instruction->IsShr() && right_const < 1) ||
1050          (instruction->IsUShr() && right_const < 1)) {
1051        return;
1052      }
1053    } else {
1054      return;
1055    }
1056
1057    // Try to handle array.length/2 or (array.length-1)/2 format.
1058    HInstruction* left = instruction->GetLeft();
1059    HInstruction* left_of_left;  // left input of left.
1060    int32_t c = 0;
1061    if (ValueBound::IsAddOrSubAConstant(left, &left_of_left, &c)) {
1062      left = left_of_left;
1063    }
1064    // The value of left input of instruction equals (left + c).
1065
1066    // (array_length + 1) or smaller divided by two or more
1067    // always generate a value in [Min(), array_length].
1068    // This is true even if array_length is Max().
1069    if (left->IsArrayLength() && c <= 1) {
1070      if (instruction->IsUShr() && c < 0) {
1071        // Make sure for unsigned shift, left side is not negative.
1072        // e.g. if array_length is 2, ((array_length - 3) >>> 2) is way bigger
1073        // than array_length.
1074        return;
1075      }
1076      ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
1077          GetGraph()->GetArena(),
1078          ValueBound(nullptr, std::numeric_limits<int32_t>::min()),
1079          ValueBound(left, 0));
1080      AssignRange(instruction->GetBlock(), instruction, range);
1081    }
1082  }
1083
1084  void VisitDiv(HDiv* div) OVERRIDE {
1085    FindAndHandlePartialArrayLength(div);
1086  }
1087
1088  void VisitShr(HShr* shr) OVERRIDE {
1089    FindAndHandlePartialArrayLength(shr);
1090  }
1091
1092  void VisitUShr(HUShr* ushr) OVERRIDE {
1093    FindAndHandlePartialArrayLength(ushr);
1094  }
1095
1096  void VisitAnd(HAnd* instruction) OVERRIDE {
1097    if (instruction->GetRight()->IsIntConstant()) {
1098      int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue();
1099      if (constant > 0) {
1100        // constant serves as a mask so any number masked with it
1101        // gets a [0, constant] value range.
1102        ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
1103            GetGraph()->GetArena(),
1104            ValueBound(nullptr, 0),
1105            ValueBound(nullptr, constant));
1106        AssignRange(instruction->GetBlock(), instruction, range);
1107      }
1108    }
1109  }
1110
1111  void VisitNewArray(HNewArray* new_array) OVERRIDE {
1112    HInstruction* len = new_array->InputAt(0);
1113    if (!len->IsIntConstant()) {
1114      HInstruction *left;
1115      int32_t right_const;
1116      if (ValueBound::IsAddOrSubAConstant(len, &left, &right_const)) {
1117        // (left + right_const) is used as size to new the array.
1118        // We record "-right_const <= left <= new_array - right_const";
1119        ValueBound lower = ValueBound(nullptr, -right_const);
1120        // We use new_array for the bound instead of new_array.length,
1121        // which isn't available as an instruction yet. new_array will
1122        // be treated the same as new_array.length when it's used in a ValueBound.
1123        ValueBound upper = ValueBound(new_array, -right_const);
1124        ValueRange* range = new (GetGraph()->GetArena())
1125            ValueRange(GetGraph()->GetArena(), lower, upper);
1126        ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock());
1127        if (existing_range != nullptr) {
1128          range = existing_range->Narrow(range);
1129        }
1130        AssignRange(new_array->GetBlock(), left, range);
1131      }
1132    }
1133  }
1134
1135  /**
1136    * After null/bounds checks are eliminated, some invariant array references
1137    * may be exposed underneath which can be hoisted out of the loop to the
1138    * preheader or, in combination with dynamic bce, the deoptimization block.
1139    *
1140    * for (int i = 0; i < n; i++) {
1141    *                                <-------+
1142    *   for (int j = 0; j < n; j++)          |
1143    *     a[i][j] = 0;               --a[i]--+
1144    * }
1145    *
1146    * Note: this optimization is no longer applied after dominator-based dynamic deoptimization
1147    * has occurred (see AddCompareWithDeoptimization()), since in those cases it would be
1148    * unsafe to hoist array references across their deoptimization instruction inside a loop.
1149    */
1150  void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
1151    if (!has_dom_based_dynamic_bce_ && array_get->IsInLoop()) {
1152      HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation();
1153      if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) &&
1154          loop->IsDefinedOutOfTheLoop(array_get->InputAt(1))) {
1155        SideEffects loop_effects = side_effects_.GetLoopEffects(loop->GetHeader());
1156        if (!array_get->GetSideEffects().MayDependOn(loop_effects)) {
1157          HoistToPreHeaderOrDeoptBlock(loop, array_get);
1158        }
1159      }
1160    }
1161  }
1162
1163  // Perform dominator-based dynamic elimination on suitable set of bounds checks.
1164  void AddCompareWithDeoptimization(HBasicBlock* block,
1165                                    HInstruction* array_length,
1166                                    HInstruction* base,
1167                                    int32_t min_c, int32_t max_c) {
1168    HBoundsCheck* bounds_check =
1169        first_index_bounds_check_map_.Get(array_length->GetId())->AsBoundsCheck();
1170    // Construct deoptimization on single or double bounds on range [base-min_c,base+max_c],
1171    // for example either for a[0]..a[3] just 3 or for a[base-1]..a[base+3] both base-1
1172    // and base+3, since we made the assumption any in between value may occur too.
1173    static_assert(kMaxLengthForAddingDeoptimize < std::numeric_limits<int32_t>::max(),
1174                  "Incorrect max length may be subject to arithmetic wrap-around");
1175    HInstruction* upper = GetGraph()->GetIntConstant(max_c);
1176    if (base == nullptr) {
1177      DCHECK_GE(min_c, 0);
1178    } else {
1179      HInstruction* lower = new (GetGraph()->GetArena())
1180          HAdd(Primitive::kPrimInt, base, GetGraph()->GetIntConstant(min_c));
1181      upper = new (GetGraph()->GetArena()) HAdd(Primitive::kPrimInt, base, upper);
1182      block->InsertInstructionBefore(lower, bounds_check);
1183      block->InsertInstructionBefore(upper, bounds_check);
1184      InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAbove(lower, upper));
1185    }
1186    InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAboveOrEqual(upper, array_length));
1187    // Flag that this kind of deoptimization has occurred.
1188    has_dom_based_dynamic_bce_ = true;
1189  }
1190
1191  // Attempt dominator-based dynamic elimination on remaining candidates.
1192  void AddComparesWithDeoptimization(HBasicBlock* block) {
1193    for (const auto& entry : first_index_bounds_check_map_) {
1194      HBoundsCheck* bounds_check = entry.second;
1195      HInstruction* index = bounds_check->InputAt(0);
1196      HInstruction* array_length = bounds_check->InputAt(1);
1197      if (!array_length->IsArrayLength()) {
1198        continue;  // disregard phis and constants
1199      }
1200      // Collect all bounds checks that are still there and that are related as "a[base + constant]"
1201      // for a base instruction (possibly absent) and various constants. Note that no attempt
1202      // is made to partition the set into matching subsets (viz. a[0], a[1] and a[base+1] and
1203      // a[base+2] are considered as one set).
1204      // TODO: would such a partitioning be worthwhile?
1205      ValueBound value = ValueBound::AsValueBound(index);
1206      HInstruction* base = value.GetInstruction();
1207      int32_t min_c = base == nullptr ? 0 : value.GetConstant();
1208      int32_t max_c = value.GetConstant();
1209      ArenaVector<HBoundsCheck*> candidates(
1210          GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
1211      ArenaVector<HBoundsCheck*> standby(
1212          GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
1213      for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
1214        // Another bounds check in same or dominated block?
1215        HInstruction* user = use.GetUser();
1216        HBasicBlock* other_block = user->GetBlock();
1217        if (user->IsBoundsCheck() && block->Dominates(other_block)) {
1218          HBoundsCheck* other_bounds_check = user->AsBoundsCheck();
1219          HInstruction* other_index = other_bounds_check->InputAt(0);
1220          HInstruction* other_array_length = other_bounds_check->InputAt(1);
1221          ValueBound other_value = ValueBound::AsValueBound(other_index);
1222          if (array_length == other_array_length && base == other_value.GetInstruction()) {
1223            // Reject certain OOB if BoundsCheck(l, l) occurs on considered subset.
1224            if (array_length == other_index) {
1225              candidates.clear();
1226              standby.clear();
1227              break;
1228            }
1229            // Since a subsequent dominated block could be under a conditional, only accept
1230            // the other bounds check if it is in same block or both blocks dominate the exit.
1231            // TODO: we could improve this by testing proper post-dominance, or even if this
1232            //       constant is seen along *all* conditional paths that follow.
1233            HBasicBlock* exit = GetGraph()->GetExitBlock();
1234            if (block == user->GetBlock() ||
1235                (block->Dominates(exit) && other_block->Dominates(exit))) {
1236              int32_t other_c = other_value.GetConstant();
1237              min_c = std::min(min_c, other_c);
1238              max_c = std::max(max_c, other_c);
1239              candidates.push_back(other_bounds_check);
1240            } else {
1241              // Add this candidate later only if it falls into the range.
1242              standby.push_back(other_bounds_check);
1243            }
1244          }
1245        }
1246      }
1247      // Add standby candidates that fall in selected range.
1248      for (HBoundsCheck* other_bounds_check : standby) {
1249        HInstruction* other_index = other_bounds_check->InputAt(0);
1250        int32_t other_c = ValueBound::AsValueBound(other_index).GetConstant();
1251        if (min_c <= other_c && other_c <= max_c) {
1252          candidates.push_back(other_bounds_check);
1253        }
1254      }
1255      // Perform dominator-based deoptimization if it seems profitable. Note that we reject cases
1256      // where the distance min_c:max_c range gets close to the maximum possible array length,
1257      // since those cases are likely to always deopt (such situations do not necessarily go
1258      // OOB, though, since the programmer could rely on wrap-around from max to min).
1259      size_t threshold = kThresholdForAddingDeoptimize + (base == nullptr ? 0 : 1);  // extra test?
1260      uint32_t distance = static_cast<uint32_t>(max_c) - static_cast<uint32_t>(min_c);
1261      if (candidates.size() >= threshold &&
1262          (base != nullptr || min_c >= 0) &&  // reject certain OOB
1263           distance <= kMaxLengthForAddingDeoptimize) {  // reject likely/certain deopt
1264        AddCompareWithDeoptimization(block, array_length, base, min_c, max_c);
1265        for (HInstruction* other_bounds_check : candidates) {
1266          // Only replace if still in the graph. This avoids visiting the same
1267          // bounds check twice if it occurred multiple times in the use list.
1268          if (other_bounds_check->IsInBlock()) {
1269            ReplaceInstruction(other_bounds_check, other_bounds_check->InputAt(0));
1270          }
1271        }
1272      }
1273    }
1274  }
1275
1276  /**
1277   * Returns true if static range analysis based on induction variables can determine the bounds
1278   * check on the given array range is always satisfied with the computed index range. The output
1279   * parameter try_dynamic_bce is set to false if OOB is certain.
1280   */
1281  bool InductionRangeFitsIn(ValueRange* array_range,
1282                            HInstruction* context,
1283                            HInstruction* index,
1284                            bool* try_dynamic_bce) {
1285    InductionVarRange::Value v1;
1286    InductionVarRange::Value v2;
1287    bool needs_finite_test = false;
1288    if (induction_range_.GetInductionRange(context, index, &v1, &v2, &needs_finite_test)) {
1289      do {
1290        if (v1.is_known && (v1.a_constant == 0 || v1.a_constant == 1) &&
1291            v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
1292          DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
1293          DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
1294          ValueRange index_range(GetGraph()->GetArena(),
1295                                 ValueBound(v1.instruction, v1.b_constant),
1296                                 ValueBound(v2.instruction, v2.b_constant));
1297          // If analysis reveals a certain OOB, disable dynamic BCE.
1298          if (index_range.GetLower().LessThan(array_range->GetLower()) ||
1299              index_range.GetUpper().GreaterThan(array_range->GetUpper())) {
1300            *try_dynamic_bce = false;
1301            return false;
1302          }
1303          // Use analysis for static bce only if loop is finite.
1304          if (!needs_finite_test && index_range.FitsIn(array_range)) {
1305            return true;
1306          }
1307        }
1308      } while (induction_range_.RefineOuter(&v1, &v2));
1309    }
1310    return false;
1311  }
1312
1313  /**
1314   * When the compiler fails to remove a bounds check statically, we try to remove the bounds
1315   * check dynamically by adding runtime tests that trigger a deoptimization in case bounds
1316   * will go out of range (we want to be rather certain of that given the slowdown of
1317   * deoptimization). If no deoptimization occurs, the loop is executed with all corresponding
1318   * bounds checks and related null checks removed.
1319   */
1320  bool TryDynamicBCE(HBoundsCheck* instruction) {
1321    HLoopInformation* loop = instruction->GetBlock()->GetLoopInformation();
1322    HInstruction* index = instruction->InputAt(0);
1323    HInstruction* length = instruction->InputAt(1);
1324    // If dynamic bounds check elimination seems profitable and is possible, then proceed.
1325    bool needs_finite_test = false;
1326    bool needs_taken_test = false;
1327    if (DynamicBCESeemsProfitable(loop, instruction->GetBlock()) &&
1328        induction_range_.CanGenerateCode(
1329            instruction, index, &needs_finite_test, &needs_taken_test) &&
1330        CanHandleInfiniteLoop(loop, instruction, index, needs_finite_test) &&
1331        CanHandleLength(loop, length, needs_taken_test)) {  // do this test last (may code gen)
1332      HInstruction* lower = nullptr;
1333      HInstruction* upper = nullptr;
1334      // Generate the following unsigned comparisons
1335      //     if (lower > upper)   deoptimize;
1336      //     if (upper >= length) deoptimize;
1337      // or, for a non-induction index, just the unsigned comparison on its 'upper' value
1338      //     if (upper >= length) deoptimize;
1339      // as runtime test. By restricting dynamic bce to unit strides (with a maximum of 32-bit
1340      // iterations) and by not combining access (e.g. a[i], a[i-3], a[i+5] etc.), these tests
1341      // correctly guard against any possible OOB (including arithmetic wrap-around cases).
1342      TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
1343      HBasicBlock* block = GetPreHeader(loop, instruction);
1344      induction_range_.GenerateRangeCode(instruction, index, GetGraph(), block, &lower, &upper);
1345      if (lower != nullptr) {
1346        InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(lower, upper));
1347      }
1348      InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAboveOrEqual(upper, length));
1349      ReplaceInstruction(instruction, index);
1350      return true;
1351    }
1352    return false;
1353  }
1354
1355  /**
1356   * Returns true if heuristics indicate that dynamic bce may be profitable.
1357   */
1358  bool DynamicBCESeemsProfitable(HLoopInformation* loop, HBasicBlock* block) {
1359    if (loop != nullptr) {
1360      // The loop preheader of an irreducible loop does not dominate all the blocks in
1361      // the loop. We would need to find the common dominator of all blocks in the loop.
1362      if (loop->IsIrreducible()) {
1363        return false;
1364      }
1365      // We should never deoptimize from an osr method, otherwise we might wrongly optimize
1366      // code dominated by the deoptimization.
1367      if (GetGraph()->IsCompilingOsr()) {
1368        return false;
1369      }
1370      // A try boundary preheader is hard to handle.
1371      // TODO: remove this restriction.
1372      if (loop->GetPreHeader()->GetLastInstruction()->IsTryBoundary()) {
1373        return false;
1374      }
1375      // Does loop have early-exits? If so, the full range may not be covered by the loop
1376      // at runtime and testing the range may apply deoptimization unnecessarily.
1377      if (IsEarlyExitLoop(loop)) {
1378        return false;
1379      }
1380      // Does the current basic block dominate all back edges? If not,
1381      // don't apply dynamic bce to something that may not be executed.
1382      for (HBasicBlock* back_edge : loop->GetBackEdges()) {
1383        if (!block->Dominates(back_edge)) {
1384          return false;
1385        }
1386      }
1387      // Success!
1388      return true;
1389    }
1390    return false;
1391  }
1392
1393  /**
1394   * Returns true if the loop has early exits, which implies it may not cover
1395   * the full range computed by range analysis based on induction variables.
1396   */
1397  bool IsEarlyExitLoop(HLoopInformation* loop) {
1398    const uint32_t loop_id = loop->GetHeader()->GetBlockId();
1399    // If loop has been analyzed earlier for early-exit, don't repeat the analysis.
1400    auto it = early_exit_loop_.find(loop_id);
1401    if (it != early_exit_loop_.end()) {
1402      return it->second;
1403    }
1404    // First time early-exit analysis for this loop. Since analysis requires scanning
1405    // the full loop-body, results of the analysis is stored for subsequent queries.
1406    HBlocksInLoopReversePostOrderIterator it_loop(*loop);
1407    for (it_loop.Advance(); !it_loop.Done(); it_loop.Advance()) {
1408      for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) {
1409        if (!loop->Contains(*successor)) {
1410          early_exit_loop_.Put(loop_id, true);
1411          return true;
1412        }
1413      }
1414    }
1415    early_exit_loop_.Put(loop_id, false);
1416    return false;
1417  }
1418
1419  /**
1420   * Returns true if the array length is already loop invariant, or can be made so
1421   * by handling the null check under the hood of the array length operation.
1422   */
1423  bool CanHandleLength(HLoopInformation* loop, HInstruction* length, bool needs_taken_test) {
1424    if (loop->IsDefinedOutOfTheLoop(length)) {
1425      return true;
1426    } else if (length->IsArrayLength() && length->GetBlock()->GetLoopInformation() == loop) {
1427      if (CanHandleNullCheck(loop, length->InputAt(0), needs_taken_test)) {
1428        HoistToPreHeaderOrDeoptBlock(loop, length);
1429        return true;
1430      }
1431    }
1432    return false;
1433  }
1434
1435  /**
1436   * Returns true if the null check is already loop invariant, or can be made so
1437   * by generating a deoptimization test.
1438   */
1439  bool CanHandleNullCheck(HLoopInformation* loop, HInstruction* check, bool needs_taken_test) {
1440    if (loop->IsDefinedOutOfTheLoop(check)) {
1441      return true;
1442    } else if (check->IsNullCheck() && check->GetBlock()->GetLoopInformation() == loop) {
1443      HInstruction* array = check->InputAt(0);
1444      if (loop->IsDefinedOutOfTheLoop(array)) {
1445        // Generate: if (array == null) deoptimize;
1446        TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
1447        HBasicBlock* block = GetPreHeader(loop, check);
1448        HInstruction* cond =
1449            new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant());
1450        InsertDeoptInLoop(loop, block, cond);
1451        ReplaceInstruction(check, array);
1452        return true;
1453      }
1454    }
1455    return false;
1456  }
1457
1458  /**
1459   * Returns true if compiler can apply dynamic bce to loops that may be infinite
1460   * (e.g. for (int i = 0; i <= U; i++) with U = MAX_INT), which would invalidate
1461   * the range analysis evaluation code by "overshooting" the computed range.
1462   * Since deoptimization would be a bad choice, and there is no other version
1463   * of the loop to use, dynamic bce in such cases is only allowed if other tests
1464   * ensure the loop is finite.
1465   */
1466  bool CanHandleInfiniteLoop(
1467      HLoopInformation* loop, HBoundsCheck* check, HInstruction* index, bool needs_infinite_test) {
1468    if (needs_infinite_test) {
1469      // If we already forced the loop to be finite, allow directly.
1470      const uint32_t loop_id = loop->GetHeader()->GetBlockId();
1471      if (finite_loop_.find(loop_id) != finite_loop_.end()) {
1472        return true;
1473      }
1474      // Otherwise, allow dynamic bce if the index (which is necessarily an induction at
1475      // this point) is the direct loop index (viz. a[i]), since then the runtime tests
1476      // ensure upper bound cannot cause an infinite loop.
1477      HInstruction* control = loop->GetHeader()->GetLastInstruction();
1478      if (control->IsIf()) {
1479        HInstruction* if_expr = control->AsIf()->InputAt(0);
1480        if (if_expr->IsCondition()) {
1481          HCondition* condition = if_expr->AsCondition();
1482          if (index == condition->InputAt(0) ||
1483              index == condition->InputAt(1)) {
1484            finite_loop_.insert(loop_id);
1485            return true;
1486          }
1487        }
1488      }
1489      // If bounds check made it this far, it is worthwhile to check later if
1490      // the loop was forced finite by another candidate.
1491      if (record_dynamic_bce_standby_) {
1492        dynamic_bce_standby_.push_back(check);
1493      }
1494      return false;
1495    }
1496    return true;
1497  }
1498
1499  /**
1500   * Returns appropriate preheader for the loop, depending on whether the
1501   * instruction appears in the loop header or proper loop-body.
1502   */
1503  HBasicBlock* GetPreHeader(HLoopInformation* loop, HInstruction* instruction) {
1504    // Use preheader unless there is an earlier generated deoptimization block since
1505    // hoisted expressions may depend on and/or used by the deoptimization tests.
1506    HBasicBlock* header = loop->GetHeader();
1507    const uint32_t loop_id = header->GetBlockId();
1508    auto it = taken_test_loop_.find(loop_id);
1509    if (it != taken_test_loop_.end()) {
1510      HBasicBlock* block = it->second;
1511      // If always taken, keep it that way by returning the original preheader,
1512      // which can be found by following the predecessor of the true-block twice.
1513      if (instruction->GetBlock() == header) {
1514        return block->GetSinglePredecessor()->GetSinglePredecessor();
1515      }
1516      return block;
1517    }
1518    return loop->GetPreHeader();
1519  }
1520
1521  /** Inserts a deoptimization test in a loop preheader. */
1522  void InsertDeoptInLoop(HLoopInformation* loop, HBasicBlock* block, HInstruction* condition) {
1523    HInstruction* suspend = loop->GetSuspendCheck();
1524    block->InsertInstructionBefore(condition, block->GetLastInstruction());
1525    HDeoptimize* deoptimize =
1526        new (GetGraph()->GetArena()) HDeoptimize(condition, suspend->GetDexPc());
1527    block->InsertInstructionBefore(deoptimize, block->GetLastInstruction());
1528    if (suspend->HasEnvironment()) {
1529      deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
1530          suspend->GetEnvironment(), loop->GetHeader());
1531    }
1532  }
1533
1534  /** Inserts a deoptimization test right before a bounds check. */
1535  void InsertDeoptInBlock(HBoundsCheck* bounds_check, HInstruction* condition) {
1536    HBasicBlock* block = bounds_check->GetBlock();
1537    block->InsertInstructionBefore(condition, bounds_check);
1538    HDeoptimize* deoptimize =
1539        new (GetGraph()->GetArena()) HDeoptimize(condition, bounds_check->GetDexPc());
1540    block->InsertInstructionBefore(deoptimize, bounds_check);
1541    deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment());
1542  }
1543
1544  /** Hoists instruction out of the loop to preheader or deoptimization block. */
1545  void HoistToPreHeaderOrDeoptBlock(HLoopInformation* loop, HInstruction* instruction) {
1546    HBasicBlock* block = GetPreHeader(loop, instruction);
1547    DCHECK(!instruction->HasEnvironment());
1548    instruction->MoveBefore(block->GetLastInstruction());
1549  }
1550
1551  /**
1552   * Adds a new taken-test structure to a loop if needed and not already done.
1553   * The taken-test protects range analysis evaluation code to avoid any
1554   * deoptimization caused by incorrect trip-count evaluation in non-taken loops.
1555   *
1556   *          old_preheader
1557   *               |
1558   *            if_block          <- taken-test protects deoptimization block
1559   *            /      \
1560   *     true_block  false_block  <- deoptimizations/invariants are placed in true_block
1561   *            \       /
1562   *          new_preheader       <- may require phi nodes to preserve SSA structure
1563   *                |
1564   *             header
1565   *
1566   * For example, this loop:
1567   *
1568   *   for (int i = lower; i < upper; i++) {
1569   *     array[i] = 0;
1570   *   }
1571   *
1572   * will be transformed to:
1573   *
1574   *   if (lower < upper) {
1575   *     if (array == null) deoptimize;
1576   *     array_length = array.length;
1577   *     if (lower > upper)         deoptimize;  // unsigned
1578   *     if (upper >= array_length) deoptimize;  // unsigned
1579   *   } else {
1580   *     array_length = 0;
1581   *   }
1582   *   for (int i = lower; i < upper; i++) {
1583   *     // Loop without null check and bounds check, and any array.length replaced with array_length.
1584   *     array[i] = 0;
1585   *   }
1586   */
1587  void TransformLoopForDeoptimizationIfNeeded(HLoopInformation* loop, bool needs_taken_test) {
1588    // Not needed (can use preheader) or already done (can reuse)?
1589    const uint32_t loop_id = loop->GetHeader()->GetBlockId();
1590    if (!needs_taken_test || taken_test_loop_.find(loop_id) != taken_test_loop_.end()) {
1591      return;
1592    }
1593
1594    // Generate top test structure.
1595    HBasicBlock* header = loop->GetHeader();
1596    GetGraph()->TransformLoopHeaderForBCE(header);
1597    HBasicBlock* new_preheader = loop->GetPreHeader();
1598    HBasicBlock* if_block = new_preheader->GetDominator();
1599    HBasicBlock* true_block = if_block->GetSuccessors()[0];  // True successor.
1600    HBasicBlock* false_block = if_block->GetSuccessors()[1];  // False successor.
1601
1602    // Goto instructions.
1603    true_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
1604    false_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
1605    new_preheader->AddInstruction(new (GetGraph()->GetArena()) HGoto());
1606
1607    // Insert the taken-test to see if the loop body is entered. If the
1608    // loop isn't entered at all, it jumps around the deoptimization block.
1609    if_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());  // placeholder
1610    HInstruction* condition = nullptr;
1611    induction_range_.GenerateTakenTest(header->GetLastInstruction(),
1612                                       GetGraph(),
1613                                       if_block,
1614                                       &condition);
1615    DCHECK(condition != nullptr);
1616    if_block->RemoveInstruction(if_block->GetLastInstruction());
1617    if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition));
1618
1619    taken_test_loop_.Put(loop_id, true_block);
1620  }
1621
1622  /**
1623   * Inserts phi nodes that preserve SSA structure in generated top test structures.
1624   * All uses of instructions in the deoptimization block that reach the loop need
1625   * a phi node in the new loop preheader to fix the dominance relation.
1626   *
1627   * Example:
1628   *           if_block
1629   *            /      \
1630   *         x_0 = ..  false_block
1631   *            \       /
1632   *           x_1 = phi(x_0, null)   <- synthetic phi
1633   *               |
1634   *          new_preheader
1635   */
1636  void InsertPhiNodes() {
1637    // Scan all new deoptimization blocks.
1638    for (auto it1 = taken_test_loop_.begin(); it1 != taken_test_loop_.end(); ++it1) {
1639      HBasicBlock* true_block = it1->second;
1640      HBasicBlock* new_preheader = true_block->GetSingleSuccessor();
1641      // Scan all instructions in a new deoptimization block.
1642      for (HInstructionIterator it(true_block->GetInstructions()); !it.Done(); it.Advance()) {
1643        HInstruction* instruction = it.Current();
1644        Primitive::Type type = instruction->GetType();
1645        HPhi* phi = nullptr;
1646        // Scan all uses of an instruction and replace each later use with a phi node.
1647        const HUseList<HInstruction*>& uses = instruction->GetUses();
1648        for (auto it2 = uses.begin(), end2 = uses.end(); it2 != end2; /* ++it2 below */) {
1649          HInstruction* user = it2->GetUser();
1650          size_t index = it2->GetIndex();
1651          // Increment `it2` now because `*it2` may disappear thanks to user->ReplaceInput().
1652          ++it2;
1653          if (user->GetBlock() != true_block) {
1654            if (phi == nullptr) {
1655              phi = NewPhi(new_preheader, instruction, type);
1656            }
1657            user->ReplaceInput(phi, index);  // Removes the use node from the list.
1658          }
1659        }
1660        // Scan all environment uses of an instruction and replace each later use with a phi node.
1661        const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
1662        for (auto it2 = env_uses.begin(), end2 = env_uses.end(); it2 != end2; /* ++it2 below */) {
1663          HEnvironment* user = it2->GetUser();
1664          size_t index = it2->GetIndex();
1665          // Increment `it2` now because `*it2` may disappear thanks to user->RemoveAsUserOfInput().
1666          ++it2;
1667          if (user->GetHolder()->GetBlock() != true_block) {
1668            if (phi == nullptr) {
1669              phi = NewPhi(new_preheader, instruction, type);
1670            }
1671            user->RemoveAsUserOfInput(index);
1672            user->SetRawEnvAt(index, phi);
1673            phi->AddEnvUseAt(user, index);
1674          }
1675        }
1676      }
1677    }
1678  }
1679
1680  /**
1681   * Construct a phi(instruction, 0) in the new preheader to fix the dominance relation.
1682   * These are synthetic phi nodes without a virtual register.
1683   */
1684  HPhi* NewPhi(HBasicBlock* new_preheader,
1685               HInstruction* instruction,
1686               Primitive::Type type) {
1687    HGraph* graph = GetGraph();
1688    HInstruction* zero;
1689    switch (type) {
1690      case Primitive::kPrimNot: zero = graph->GetNullConstant(); break;
1691      case Primitive::kPrimFloat: zero = graph->GetFloatConstant(0); break;
1692      case Primitive::kPrimDouble: zero = graph->GetDoubleConstant(0); break;
1693      default: zero = graph->GetConstant(type, 0); break;
1694    }
1695    HPhi* phi = new (graph->GetArena())
1696        HPhi(graph->GetArena(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type));
1697    phi->SetRawInputAt(0, instruction);
1698    phi->SetRawInputAt(1, zero);
1699    if (type == Primitive::kPrimNot) {
1700      phi->SetReferenceTypeInfo(instruction->GetReferenceTypeInfo());
1701    }
1702    new_preheader->AddPhi(phi);
1703    return phi;
1704  }
1705
1706  /** Helper method to replace an instruction with another instruction. */
1707  static void ReplaceInstruction(HInstruction* instruction, HInstruction* replacement) {
1708    instruction->ReplaceWith(replacement);
1709    instruction->GetBlock()->RemoveInstruction(instruction);
1710  }
1711
1712  // A set of maps, one per basic block, from instruction to range.
1713  ArenaVector<ArenaSafeMap<int, ValueRange*>> maps_;
1714
1715  // Map an HArrayLength instruction's id to the first HBoundsCheck instruction
1716  // in a block that checks an index against that HArrayLength.
1717  ArenaSafeMap<int, HBoundsCheck*> first_index_bounds_check_map_;
1718
1719  // Stand by list for dynamic bce.
1720  ArenaVector<HBoundsCheck*> dynamic_bce_standby_;
1721  bool record_dynamic_bce_standby_;
1722
1723  // Early-exit loop bookkeeping.
1724  ArenaSafeMap<uint32_t, bool> early_exit_loop_;
1725
1726  // Taken-test loop bookkeeping.
1727  ArenaSafeMap<uint32_t, HBasicBlock*> taken_test_loop_;
1728
1729  // Finite loop bookkeeping.
1730  ArenaSet<uint32_t> finite_loop_;
1731
1732  // Flag that denotes whether dominator-based dynamic elimination has occurred.
1733  bool has_dom_based_dynamic_bce_;
1734
1735  // Initial number of blocks.
1736  uint32_t initial_block_size_;
1737
1738  // Side effects.
1739  const SideEffectsAnalysis& side_effects_;
1740
1741  // Range analysis based on induction variables.
1742  InductionVarRange induction_range_;
1743
1744  DISALLOW_COPY_AND_ASSIGN(BCEVisitor);
1745};
1746
1747void BoundsCheckElimination::Run() {
1748  if (!graph_->HasBoundsChecks()) {
1749    return;
1750  }
1751
1752  // Reverse post order guarantees a node's dominators are visited first.
1753  // We want to visit in the dominator-based order since if a value is known to
1754  // be bounded by a range at one instruction, it must be true that all uses of
1755  // that value dominated by that instruction fits in that range. Range of that
1756  // value can be narrowed further down in the dominator tree.
1757  BCEVisitor visitor(graph_, side_effects_, induction_analysis_);
1758  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
1759    HBasicBlock* current = it.Current();
1760    if (visitor.IsAddedBlock(current)) {
1761      // Skip added blocks. Their effects are already taken care of.
1762      continue;
1763    }
1764    visitor.VisitBasicBlock(current);
1765    // Skip forward to the current block in case new basic blocks were inserted
1766    // (which always appear earlier in reverse post order) to avoid visiting the
1767    // same basic block twice.
1768    for ( ; !it.Done() && it.Current() != current; it.Advance()) {
1769    }
1770  }
1771
1772  // Perform cleanup.
1773  visitor.Finish();
1774}
1775
1776}  // namespace art
1777