bounds_check_elimination.cc revision d59c70627cc42878cc30b46bd29ff497b4483b22
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "bounds_check_elimination.h" 18 19#include <limits> 20 21#include "base/arena_containers.h" 22#include "induction_var_range.h" 23#include "nodes.h" 24 25namespace art { 26 27class MonotonicValueRange; 28 29/** 30 * A value bound is represented as a pair of value and constant, 31 * e.g. array.length - 1. 32 */ 33class ValueBound : public ValueObject { 34 public: 35 ValueBound(HInstruction* instruction, int32_t constant) { 36 if (instruction != nullptr && instruction->IsIntConstant()) { 37 // Normalize ValueBound with constant instruction. 38 int32_t instr_const = instruction->AsIntConstant()->GetValue(); 39 if (!WouldAddOverflowOrUnderflow(instr_const, constant)) { 40 instruction_ = nullptr; 41 constant_ = instr_const + constant; 42 return; 43 } 44 } 45 instruction_ = instruction; 46 constant_ = constant; 47 } 48 49 // Return whether (left + right) overflows or underflows. 50 static bool WouldAddOverflowOrUnderflow(int32_t left, int32_t right) { 51 if (right == 0) { 52 return false; 53 } 54 if ((right > 0) && (left <= (std::numeric_limits<int32_t>::max() - right))) { 55 // No overflow. 56 return false; 57 } 58 if ((right < 0) && (left >= (std::numeric_limits<int32_t>::min() - right))) { 59 // No underflow. 60 return false; 61 } 62 return true; 63 } 64 65 static bool IsAddOrSubAConstant(HInstruction* instruction, 66 HInstruction** left_instruction, 67 int* right_constant) { 68 if (instruction->IsAdd() || instruction->IsSub()) { 69 HBinaryOperation* bin_op = instruction->AsBinaryOperation(); 70 HInstruction* left = bin_op->GetLeft(); 71 HInstruction* right = bin_op->GetRight(); 72 if (right->IsIntConstant()) { 73 *left_instruction = left; 74 int32_t c = right->AsIntConstant()->GetValue(); 75 *right_constant = instruction->IsAdd() ? c : -c; 76 return true; 77 } 78 } 79 *left_instruction = nullptr; 80 *right_constant = 0; 81 return false; 82 } 83 84 // Try to detect useful value bound format from an instruction, e.g. 85 // a constant or array length related value. 86 static ValueBound DetectValueBoundFromValue(HInstruction* instruction, bool* found) { 87 DCHECK(instruction != nullptr); 88 if (instruction->IsIntConstant()) { 89 *found = true; 90 return ValueBound(nullptr, instruction->AsIntConstant()->GetValue()); 91 } 92 93 if (instruction->IsArrayLength()) { 94 *found = true; 95 return ValueBound(instruction, 0); 96 } 97 // Try to detect (array.length + c) format. 98 HInstruction *left; 99 int32_t right; 100 if (IsAddOrSubAConstant(instruction, &left, &right)) { 101 if (left->IsArrayLength()) { 102 *found = true; 103 return ValueBound(left, right); 104 } 105 } 106 107 // No useful bound detected. 108 *found = false; 109 return ValueBound::Max(); 110 } 111 112 HInstruction* GetInstruction() const { return instruction_; } 113 int32_t GetConstant() const { return constant_; } 114 115 bool IsRelatedToArrayLength() const { 116 // Some bounds are created with HNewArray* as the instruction instead 117 // of HArrayLength*. They are treated the same. 118 return (instruction_ != nullptr) && 119 (instruction_->IsArrayLength() || instruction_->IsNewArray()); 120 } 121 122 bool IsConstant() const { 123 return instruction_ == nullptr; 124 } 125 126 static ValueBound Min() { return ValueBound(nullptr, std::numeric_limits<int32_t>::min()); } 127 static ValueBound Max() { return ValueBound(nullptr, std::numeric_limits<int32_t>::max()); } 128 129 bool Equals(ValueBound bound) const { 130 return instruction_ == bound.instruction_ && constant_ == bound.constant_; 131 } 132 133 /* 134 * Hunt "under the hood" of array lengths (leading to array references), 135 * null checks (also leading to array references), and new arrays 136 * (leading to the actual length). This makes it more likely related 137 * instructions become actually comparable. 138 */ 139 static HInstruction* HuntForDeclaration(HInstruction* instruction) { 140 while (instruction->IsArrayLength() || 141 instruction->IsNullCheck() || 142 instruction->IsNewArray()) { 143 instruction = instruction->InputAt(0); 144 } 145 return instruction; 146 } 147 148 static bool Equal(HInstruction* instruction1, HInstruction* instruction2) { 149 if (instruction1 == instruction2) { 150 return true; 151 } 152 if (instruction1 == nullptr || instruction2 == nullptr) { 153 return false; 154 } 155 instruction1 = HuntForDeclaration(instruction1); 156 instruction2 = HuntForDeclaration(instruction2); 157 return instruction1 == instruction2; 158 } 159 160 // Returns if it's certain this->bound >= `bound`. 161 bool GreaterThanOrEqualTo(ValueBound bound) const { 162 if (Equal(instruction_, bound.instruction_)) { 163 return constant_ >= bound.constant_; 164 } 165 // Not comparable. Just return false. 166 return false; 167 } 168 169 // Returns if it's certain this->bound <= `bound`. 170 bool LessThanOrEqualTo(ValueBound bound) const { 171 if (Equal(instruction_, bound.instruction_)) { 172 return constant_ <= bound.constant_; 173 } 174 // Not comparable. Just return false. 175 return false; 176 } 177 178 // Try to narrow lower bound. Returns the greatest of the two if possible. 179 // Pick one if they are not comparable. 180 static ValueBound NarrowLowerBound(ValueBound bound1, ValueBound bound2) { 181 if (bound1.GreaterThanOrEqualTo(bound2)) { 182 return bound1; 183 } 184 if (bound2.GreaterThanOrEqualTo(bound1)) { 185 return bound2; 186 } 187 188 // Not comparable. Just pick one. We may lose some info, but that's ok. 189 // Favor constant as lower bound. 190 return bound1.IsConstant() ? bound1 : bound2; 191 } 192 193 // Try to narrow upper bound. Returns the lowest of the two if possible. 194 // Pick one if they are not comparable. 195 static ValueBound NarrowUpperBound(ValueBound bound1, ValueBound bound2) { 196 if (bound1.LessThanOrEqualTo(bound2)) { 197 return bound1; 198 } 199 if (bound2.LessThanOrEqualTo(bound1)) { 200 return bound2; 201 } 202 203 // Not comparable. Just pick one. We may lose some info, but that's ok. 204 // Favor array length as upper bound. 205 return bound1.IsRelatedToArrayLength() ? bound1 : bound2; 206 } 207 208 // Add a constant to a ValueBound. 209 // `overflow` or `underflow` will return whether the resulting bound may 210 // overflow or underflow an int. 211 ValueBound Add(int32_t c, bool* overflow, bool* underflow) const { 212 *overflow = *underflow = false; 213 if (c == 0) { 214 return *this; 215 } 216 217 int32_t new_constant; 218 if (c > 0) { 219 if (constant_ > (std::numeric_limits<int32_t>::max() - c)) { 220 *overflow = true; 221 return Max(); 222 } 223 224 new_constant = constant_ + c; 225 // (array.length + non-positive-constant) won't overflow an int. 226 if (IsConstant() || (IsRelatedToArrayLength() && new_constant <= 0)) { 227 return ValueBound(instruction_, new_constant); 228 } 229 // Be conservative. 230 *overflow = true; 231 return Max(); 232 } else { 233 if (constant_ < (std::numeric_limits<int32_t>::min() - c)) { 234 *underflow = true; 235 return Min(); 236 } 237 238 new_constant = constant_ + c; 239 // Regardless of the value new_constant, (array.length+new_constant) will 240 // never underflow since array.length is no less than 0. 241 if (IsConstant() || IsRelatedToArrayLength()) { 242 return ValueBound(instruction_, new_constant); 243 } 244 // Be conservative. 245 *underflow = true; 246 return Min(); 247 } 248 } 249 250 private: 251 HInstruction* instruction_; 252 int32_t constant_; 253}; 254 255// Collect array access data for a loop. 256// TODO: make it work for multiple arrays inside the loop. 257class ArrayAccessInsideLoopFinder : public ValueObject { 258 public: 259 explicit ArrayAccessInsideLoopFinder(HInstruction* induction_variable) 260 : induction_variable_(induction_variable), 261 found_array_length_(nullptr), 262 offset_low_(std::numeric_limits<int32_t>::max()), 263 offset_high_(std::numeric_limits<int32_t>::min()) { 264 Run(); 265 } 266 267 HArrayLength* GetFoundArrayLength() const { return found_array_length_; } 268 bool HasFoundArrayLength() const { return found_array_length_ != nullptr; } 269 int32_t GetOffsetLow() const { return offset_low_; } 270 int32_t GetOffsetHigh() const { return offset_high_; } 271 272 // Returns if `block` that is in loop_info may exit the loop, unless it's 273 // the loop header for loop_info. 274 static bool EarlyExit(HBasicBlock* block, HLoopInformation* loop_info) { 275 DCHECK(loop_info->Contains(*block)); 276 if (block == loop_info->GetHeader()) { 277 // Loop header of loop_info. Exiting loop is normal. 278 return false; 279 } 280 for (HBasicBlock* successor : block->GetSuccessors()) { 281 if (!loop_info->Contains(*successor)) { 282 // One of the successors exits the loop. 283 return true; 284 } 285 } 286 return false; 287 } 288 289 static bool DominatesAllBackEdges(HBasicBlock* block, HLoopInformation* loop_info) { 290 for (HBasicBlock* back_edge : loop_info->GetBackEdges()) { 291 if (!block->Dominates(back_edge)) { 292 return false; 293 } 294 } 295 return true; 296 } 297 298 void Run() { 299 HLoopInformation* loop_info = induction_variable_->GetBlock()->GetLoopInformation(); 300 HBlocksInLoopReversePostOrderIterator it_loop(*loop_info); 301 HBasicBlock* block = it_loop.Current(); 302 DCHECK(block == induction_variable_->GetBlock()); 303 // Skip loop header. Since narrowed value range of a MonotonicValueRange only 304 // applies to the loop body (after the test at the end of the loop header). 305 it_loop.Advance(); 306 for (; !it_loop.Done(); it_loop.Advance()) { 307 block = it_loop.Current(); 308 DCHECK(block->IsInLoop()); 309 if (!DominatesAllBackEdges(block, loop_info)) { 310 // In order not to trigger deoptimization unnecessarily, make sure 311 // that all array accesses collected are really executed in the loop. 312 // For array accesses in a branch inside the loop, don't collect the 313 // access. The bounds check in that branch might not be eliminated. 314 continue; 315 } 316 if (EarlyExit(block, loop_info)) { 317 // If the loop body can exit loop (like break, return, etc.), it's not guaranteed 318 // that the loop will loop through the full monotonic value range from 319 // initial_ to end_. So adding deoptimization might be too aggressive and can 320 // trigger deoptimization unnecessarily even if the loop won't actually throw 321 // AIOOBE. 322 found_array_length_ = nullptr; 323 return; 324 } 325 for (HInstruction* instruction = block->GetFirstInstruction(); 326 instruction != nullptr; 327 instruction = instruction->GetNext()) { 328 if (!instruction->IsBoundsCheck()) { 329 continue; 330 } 331 332 HInstruction* length_value = instruction->InputAt(1); 333 if (length_value->IsIntConstant()) { 334 // TODO: may optimize for constant case. 335 continue; 336 } 337 338 if (length_value->IsPhi()) { 339 // When adding deoptimizations in outer loops, we might create 340 // a phi for the array length, and update all uses of the 341 // length in the loop to that phi. Therefore, inner loops having 342 // bounds checks on the same array will use that phi. 343 // TODO: handle these cases. 344 continue; 345 } 346 347 DCHECK(length_value->IsArrayLength()); 348 HArrayLength* array_length = length_value->AsArrayLength(); 349 350 HInstruction* array = array_length->InputAt(0); 351 if (array->IsNullCheck()) { 352 array = array->AsNullCheck()->InputAt(0); 353 } 354 if (loop_info->Contains(*array->GetBlock())) { 355 // Array is defined inside the loop. Skip. 356 continue; 357 } 358 359 if (found_array_length_ != nullptr && found_array_length_ != array_length) { 360 // There is already access for another array recorded for the loop. 361 // TODO: handle multiple arrays. 362 continue; 363 } 364 365 HInstruction* index = instruction->AsBoundsCheck()->InputAt(0); 366 HInstruction* left = index; 367 int32_t right = 0; 368 if (left == induction_variable_ || 369 (ValueBound::IsAddOrSubAConstant(index, &left, &right) && 370 left == induction_variable_)) { 371 // For patterns like array[i] or array[i + 2]. 372 if (right < offset_low_) { 373 offset_low_ = right; 374 } 375 if (right > offset_high_) { 376 offset_high_ = right; 377 } 378 } else { 379 // Access not in induction_variable/(induction_variable_ + constant) 380 // format. Skip. 381 continue; 382 } 383 // Record this array. 384 found_array_length_ = array_length; 385 } 386 } 387 } 388 389 private: 390 // The instruction that corresponds to a MonotonicValueRange. 391 HInstruction* induction_variable_; 392 393 // The array length of the array that's accessed inside the loop body. 394 HArrayLength* found_array_length_; 395 396 // The lowest and highest constant offsets relative to induction variable 397 // instruction_ in all array accesses. 398 // If array access are: array[i-1], array[i], array[i+1], 399 // offset_low_ is -1 and offset_high is 1. 400 int32_t offset_low_; 401 int32_t offset_high_; 402 403 DISALLOW_COPY_AND_ASSIGN(ArrayAccessInsideLoopFinder); 404}; 405 406/** 407 * Represent a range of lower bound and upper bound, both being inclusive. 408 * Currently a ValueRange may be generated as a result of the following: 409 * comparisons related to array bounds, array bounds check, add/sub on top 410 * of an existing value range, NewArray or a loop phi corresponding to an 411 * incrementing/decrementing array index (MonotonicValueRange). 412 */ 413class ValueRange : public ArenaObject<kArenaAllocBoundsCheckElimination> { 414 public: 415 ValueRange(ArenaAllocator* allocator, ValueBound lower, ValueBound upper) 416 : allocator_(allocator), lower_(lower), upper_(upper) {} 417 418 virtual ~ValueRange() {} 419 420 virtual MonotonicValueRange* AsMonotonicValueRange() { return nullptr; } 421 bool IsMonotonicValueRange() { 422 return AsMonotonicValueRange() != nullptr; 423 } 424 425 ArenaAllocator* GetAllocator() const { return allocator_; } 426 ValueBound GetLower() const { return lower_; } 427 ValueBound GetUpper() const { return upper_; } 428 429 bool IsConstantValueRange() { return lower_.IsConstant() && upper_.IsConstant(); } 430 431 // If it's certain that this value range fits in other_range. 432 virtual bool FitsIn(ValueRange* other_range) const { 433 if (other_range == nullptr) { 434 return true; 435 } 436 DCHECK(!other_range->IsMonotonicValueRange()); 437 return lower_.GreaterThanOrEqualTo(other_range->lower_) && 438 upper_.LessThanOrEqualTo(other_range->upper_); 439 } 440 441 // Returns the intersection of this and range. 442 // If it's not possible to do intersection because some 443 // bounds are not comparable, it's ok to pick either bound. 444 virtual ValueRange* Narrow(ValueRange* range) { 445 if (range == nullptr) { 446 return this; 447 } 448 449 if (range->IsMonotonicValueRange()) { 450 return this; 451 } 452 453 return new (allocator_) ValueRange( 454 allocator_, 455 ValueBound::NarrowLowerBound(lower_, range->lower_), 456 ValueBound::NarrowUpperBound(upper_, range->upper_)); 457 } 458 459 // Shift a range by a constant. 460 ValueRange* Add(int32_t constant) const { 461 bool overflow, underflow; 462 ValueBound lower = lower_.Add(constant, &overflow, &underflow); 463 if (underflow) { 464 // Lower bound underflow will wrap around to positive values 465 // and invalidate the upper bound. 466 return nullptr; 467 } 468 ValueBound upper = upper_.Add(constant, &overflow, &underflow); 469 if (overflow) { 470 // Upper bound overflow will wrap around to negative values 471 // and invalidate the lower bound. 472 return nullptr; 473 } 474 return new (allocator_) ValueRange(allocator_, lower, upper); 475 } 476 477 private: 478 ArenaAllocator* const allocator_; 479 const ValueBound lower_; // inclusive 480 const ValueBound upper_; // inclusive 481 482 DISALLOW_COPY_AND_ASSIGN(ValueRange); 483}; 484 485/** 486 * A monotonically incrementing/decrementing value range, e.g. 487 * the variable i in "for (int i=0; i<array.length; i++)". 488 * Special care needs to be taken to account for overflow/underflow 489 * of such value ranges. 490 */ 491class MonotonicValueRange : public ValueRange { 492 public: 493 MonotonicValueRange(ArenaAllocator* allocator, 494 HPhi* induction_variable, 495 HInstruction* initial, 496 int32_t increment, 497 ValueBound bound) 498 // To be conservative, give it full range [Min(), Max()] in case it's 499 // used as a regular value range, due to possible overflow/underflow. 500 : ValueRange(allocator, ValueBound::Min(), ValueBound::Max()), 501 induction_variable_(induction_variable), 502 initial_(initial), 503 end_(nullptr), 504 inclusive_(false), 505 increment_(increment), 506 bound_(bound) {} 507 508 virtual ~MonotonicValueRange() {} 509 510 HInstruction* GetInductionVariable() const { return induction_variable_; } 511 int32_t GetIncrement() const { return increment_; } 512 ValueBound GetBound() const { return bound_; } 513 void SetEnd(HInstruction* end) { end_ = end; } 514 void SetInclusive(bool inclusive) { inclusive_ = inclusive; } 515 HBasicBlock* GetLoopHeader() const { 516 DCHECK(induction_variable_->GetBlock()->IsLoopHeader()); 517 return induction_variable_->GetBlock(); 518 } 519 520 MonotonicValueRange* AsMonotonicValueRange() OVERRIDE { return this; } 521 522 HBasicBlock* GetLoopHeaderSuccesorInLoop() { 523 HBasicBlock* header = GetLoopHeader(); 524 HInstruction* instruction = header->GetLastInstruction(); 525 DCHECK(instruction->IsIf()); 526 HIf* h_if = instruction->AsIf(); 527 HLoopInformation* loop_info = header->GetLoopInformation(); 528 bool true_successor_in_loop = loop_info->Contains(*h_if->IfTrueSuccessor()); 529 bool false_successor_in_loop = loop_info->Contains(*h_if->IfFalseSuccessor()); 530 531 // Just in case it's some strange loop structure. 532 if (true_successor_in_loop && false_successor_in_loop) { 533 return nullptr; 534 } 535 DCHECK(true_successor_in_loop || false_successor_in_loop); 536 return false_successor_in_loop ? h_if->IfFalseSuccessor() : h_if->IfTrueSuccessor(); 537 } 538 539 // If it's certain that this value range fits in other_range. 540 bool FitsIn(ValueRange* other_range) const OVERRIDE { 541 if (other_range == nullptr) { 542 return true; 543 } 544 DCHECK(!other_range->IsMonotonicValueRange()); 545 return false; 546 } 547 548 // Try to narrow this MonotonicValueRange given another range. 549 // Ideally it will return a normal ValueRange. But due to 550 // possible overflow/underflow, that may not be possible. 551 ValueRange* Narrow(ValueRange* range) OVERRIDE { 552 if (range == nullptr) { 553 return this; 554 } 555 DCHECK(!range->IsMonotonicValueRange()); 556 557 if (increment_ > 0) { 558 // Monotonically increasing. 559 ValueBound lower = ValueBound::NarrowLowerBound(bound_, range->GetLower()); 560 if (!lower.IsConstant() || lower.GetConstant() == std::numeric_limits<int32_t>::min()) { 561 // Lower bound isn't useful. Leave it to deoptimization. 562 return this; 563 } 564 565 // We currently conservatively assume max array length is Max(). 566 // If we can make assumptions about the max array length, e.g. due to the max heap size, 567 // divided by the element size (such as 4 bytes for each integer array), we can 568 // lower this number and rule out some possible overflows. 569 int32_t max_array_len = std::numeric_limits<int32_t>::max(); 570 571 // max possible integer value of range's upper value. 572 int32_t upper = std::numeric_limits<int32_t>::max(); 573 // Try to lower upper. 574 ValueBound upper_bound = range->GetUpper(); 575 if (upper_bound.IsConstant()) { 576 upper = upper_bound.GetConstant(); 577 } else if (upper_bound.IsRelatedToArrayLength() && upper_bound.GetConstant() <= 0) { 578 // Normal case. e.g. <= array.length - 1. 579 upper = max_array_len + upper_bound.GetConstant(); 580 } 581 582 // If we can prove for the last number in sequence of initial_, 583 // initial_ + increment_, initial_ + 2 x increment_, ... 584 // that's <= upper, (last_num_in_sequence + increment_) doesn't trigger overflow, 585 // then this MonoticValueRange is narrowed to a normal value range. 586 587 // Be conservative first, assume last number in the sequence hits upper. 588 int32_t last_num_in_sequence = upper; 589 if (initial_->IsIntConstant()) { 590 int32_t initial_constant = initial_->AsIntConstant()->GetValue(); 591 if (upper <= initial_constant) { 592 last_num_in_sequence = upper; 593 } else { 594 // Cast to int64_t for the substraction part to avoid int32_t overflow. 595 last_num_in_sequence = initial_constant + 596 ((int64_t)upper - (int64_t)initial_constant) / increment_ * increment_; 597 } 598 } 599 if (last_num_in_sequence <= (std::numeric_limits<int32_t>::max() - increment_)) { 600 // No overflow. The sequence will be stopped by the upper bound test as expected. 601 return new (GetAllocator()) ValueRange(GetAllocator(), lower, range->GetUpper()); 602 } 603 604 // There might be overflow. Give up narrowing. 605 return this; 606 } else { 607 DCHECK_NE(increment_, 0); 608 // Monotonically decreasing. 609 ValueBound upper = ValueBound::NarrowUpperBound(bound_, range->GetUpper()); 610 if ((!upper.IsConstant() || upper.GetConstant() == std::numeric_limits<int32_t>::max()) && 611 !upper.IsRelatedToArrayLength()) { 612 // Upper bound isn't useful. Leave it to deoptimization. 613 return this; 614 } 615 616 // Need to take care of underflow. Try to prove underflow won't happen 617 // for common cases. 618 if (range->GetLower().IsConstant()) { 619 int32_t constant = range->GetLower().GetConstant(); 620 if (constant >= (std::numeric_limits<int32_t>::min() - increment_)) { 621 return new (GetAllocator()) ValueRange(GetAllocator(), range->GetLower(), upper); 622 } 623 } 624 625 // For non-constant lower bound, just assume might be underflow. Give up narrowing. 626 return this; 627 } 628 } 629 630 // Try to add HDeoptimize's in the loop pre-header first to narrow this range. 631 // For example, this loop: 632 // 633 // for (int i = start; i < end; i++) { 634 // array[i - 1] = array[i] + array[i + 1]; 635 // } 636 // 637 // will be transformed to: 638 // 639 // int array_length_in_loop_body_if_needed; 640 // if (start >= end) { 641 // array_length_in_loop_body_if_needed = 0; 642 // } else { 643 // if (start < 1) deoptimize(); 644 // if (array == null) deoptimize(); 645 // array_length = array.length; 646 // if (end > array_length - 1) deoptimize; 647 // array_length_in_loop_body_if_needed = array_length; 648 // } 649 // for (int i = start; i < end; i++) { 650 // // No more null check and bounds check. 651 // // array.length value is replaced with array_length_in_loop_body_if_needed 652 // // in the loop body. 653 // array[i - 1] = array[i] + array[i + 1]; 654 // } 655 // 656 // We basically first go through the loop body and find those array accesses whose 657 // index is at a constant offset from the induction variable ('i' in the above example), 658 // and update offset_low and offset_high along the way. We then add the following 659 // deoptimizations in the loop pre-header (suppose end is not inclusive). 660 // if (start < -offset_low) deoptimize(); 661 // if (end >= array.length - offset_high) deoptimize(); 662 // It might be necessary to first hoist array.length (and the null check on it) out of 663 // the loop with another deoptimization. 664 // 665 // In order not to trigger deoptimization unnecessarily, we want to make a strong 666 // guarantee that no deoptimization is triggered if the loop body itself doesn't 667 // throw AIOOBE. (It's the same as saying if deoptimization is triggered, the loop 668 // body must throw AIOOBE). 669 // This is achieved by the following: 670 // 1) We only process loops that iterate through the full monotonic range from 671 // initial_ to end_. We do the following checks to make sure that's the case: 672 // a) The loop doesn't have early exit (via break, return, etc.) 673 // b) The increment_ is 1/-1. An increment of 2, for example, may skip end_. 674 // 2) We only collect array accesses of blocks in the loop body that dominate 675 // all loop back edges, these array accesses are guaranteed to happen 676 // at each loop iteration. 677 // With 1) and 2), if the loop body doesn't throw AIOOBE, collected array accesses 678 // when the induction variable is at initial_ and end_ must be in a legal range. 679 // Since the added deoptimizations are basically checking the induction variable 680 // at initial_ and end_ values, no deoptimization will be triggered either. 681 // 682 // A special case is the loop body isn't entered at all. In that case, we may still 683 // add deoptimization due to the analysis described above. In order not to trigger 684 // deoptimization, we do a test between initial_ and end_ first and skip over 685 // the added deoptimization. 686 ValueRange* NarrowWithDeoptimization() { 687 if (increment_ != 1 && increment_ != -1) { 688 // In order not to trigger deoptimization unnecessarily, we want to 689 // make sure the loop iterates through the full range from initial_ to 690 // end_ so that boundaries are covered by the loop. An increment of 2, 691 // for example, may skip end_. 692 return this; 693 } 694 695 if (end_ == nullptr) { 696 // No full info to add deoptimization. 697 return this; 698 } 699 700 HBasicBlock* header = induction_variable_->GetBlock(); 701 DCHECK(header->IsLoopHeader()); 702 HBasicBlock* pre_header = header->GetLoopInformation()->GetPreHeader(); 703 if (!initial_->GetBlock()->Dominates(pre_header) || 704 !end_->GetBlock()->Dominates(pre_header)) { 705 // Can't add a check in loop pre-header if the value isn't available there. 706 return this; 707 } 708 709 ArrayAccessInsideLoopFinder finder(induction_variable_); 710 711 if (!finder.HasFoundArrayLength()) { 712 // No array access was found inside the loop that can benefit 713 // from deoptimization. 714 return this; 715 } 716 717 if (!AddDeoptimization(finder)) { 718 return this; 719 } 720 721 // After added deoptimizations, induction variable fits in 722 // [-offset_low, array.length-1-offset_high], adjusted with collected offsets. 723 ValueBound lower = ValueBound(0, -finder.GetOffsetLow()); 724 ValueBound upper = ValueBound(finder.GetFoundArrayLength(), -1 - finder.GetOffsetHigh()); 725 // We've narrowed the range after added deoptimizations. 726 return new (GetAllocator()) ValueRange(GetAllocator(), lower, upper); 727 } 728 729 // Returns true if adding a (constant >= value) check for deoptimization 730 // is allowed and will benefit compiled code. 731 bool CanAddDeoptimizationConstant(HInstruction* value, int32_t constant, bool* is_proven) { 732 *is_proven = false; 733 HBasicBlock* header = induction_variable_->GetBlock(); 734 DCHECK(header->IsLoopHeader()); 735 HBasicBlock* pre_header = header->GetLoopInformation()->GetPreHeader(); 736 DCHECK(value->GetBlock()->Dominates(pre_header)); 737 738 // See if we can prove the relationship first. 739 if (value->IsIntConstant()) { 740 if (value->AsIntConstant()->GetValue() >= constant) { 741 // Already true. 742 *is_proven = true; 743 return true; 744 } else { 745 // May throw exception. Don't add deoptimization. 746 // Keep bounds checks in the loops. 747 return false; 748 } 749 } 750 // Can benefit from deoptimization. 751 return true; 752 } 753 754 // Try to filter out cases that the loop entry test will never be true. 755 bool LoopEntryTestUseful() { 756 if (initial_->IsIntConstant() && end_->IsIntConstant()) { 757 int32_t initial_val = initial_->AsIntConstant()->GetValue(); 758 int32_t end_val = end_->AsIntConstant()->GetValue(); 759 if (increment_ == 1) { 760 if (inclusive_) { 761 return initial_val > end_val; 762 } else { 763 return initial_val >= end_val; 764 } 765 } else { 766 DCHECK_EQ(increment_, -1); 767 if (inclusive_) { 768 return initial_val < end_val; 769 } else { 770 return initial_val <= end_val; 771 } 772 } 773 } 774 return true; 775 } 776 777 // Returns the block for adding deoptimization. 778 HBasicBlock* TransformLoopForDeoptimizationIfNeeded() { 779 HBasicBlock* header = induction_variable_->GetBlock(); 780 DCHECK(header->IsLoopHeader()); 781 HBasicBlock* pre_header = header->GetLoopInformation()->GetPreHeader(); 782 // Deoptimization is only added when both initial_ and end_ are defined 783 // before the loop. 784 DCHECK(initial_->GetBlock()->Dominates(pre_header)); 785 DCHECK(end_->GetBlock()->Dominates(pre_header)); 786 787 // If it can be proven the loop body is definitely entered (unless exception 788 // is thrown in the loop header for which triggering deoptimization is fine), 789 // there is no need for tranforming the loop. In that case, deoptimization 790 // will just be added in the loop pre-header. 791 if (!LoopEntryTestUseful()) { 792 return pre_header; 793 } 794 795 HGraph* graph = header->GetGraph(); 796 graph->TransformLoopHeaderForBCE(header); 797 HBasicBlock* new_pre_header = header->GetDominator(); 798 DCHECK(new_pre_header == header->GetLoopInformation()->GetPreHeader()); 799 HBasicBlock* if_block = new_pre_header->GetDominator(); 800 HBasicBlock* dummy_block = if_block->GetSuccessors()[0]; // True successor. 801 HBasicBlock* deopt_block = if_block->GetSuccessors()[1]; // False successor. 802 803 dummy_block->AddInstruction(new (graph->GetArena()) HGoto()); 804 deopt_block->AddInstruction(new (graph->GetArena()) HGoto()); 805 new_pre_header->AddInstruction(new (graph->GetArena()) HGoto()); 806 return deopt_block; 807 } 808 809 // Adds a test between initial_ and end_ to see if the loop body is entered. 810 // If the loop body isn't entered at all, it jumps to the loop pre-header (after 811 // transformation) to avoid any deoptimization. 812 void AddLoopBodyEntryTest() { 813 HBasicBlock* header = induction_variable_->GetBlock(); 814 DCHECK(header->IsLoopHeader()); 815 HBasicBlock* pre_header = header->GetLoopInformation()->GetPreHeader(); 816 HBasicBlock* if_block = pre_header->GetDominator(); 817 HGraph* graph = header->GetGraph(); 818 819 HCondition* cond; 820 if (increment_ == 1) { 821 if (inclusive_) { 822 cond = new (graph->GetArena()) HGreaterThan(initial_, end_); 823 } else { 824 cond = new (graph->GetArena()) HGreaterThanOrEqual(initial_, end_); 825 } 826 } else { 827 DCHECK_EQ(increment_, -1); 828 if (inclusive_) { 829 cond = new (graph->GetArena()) HLessThan(initial_, end_); 830 } else { 831 cond = new (graph->GetArena()) HLessThanOrEqual(initial_, end_); 832 } 833 } 834 HIf* h_if = new (graph->GetArena()) HIf(cond); 835 if_block->AddInstruction(cond); 836 if_block->AddInstruction(h_if); 837 } 838 839 // Adds a check that (value >= constant), and HDeoptimize otherwise. 840 void AddDeoptimizationConstant(HInstruction* value, 841 int32_t constant, 842 HBasicBlock* deopt_block, 843 bool loop_entry_test_block_added) { 844 HBasicBlock* header = induction_variable_->GetBlock(); 845 DCHECK(header->IsLoopHeader()); 846 HBasicBlock* pre_header = header->GetDominator(); 847 if (loop_entry_test_block_added) { 848 DCHECK(deopt_block->GetSuccessors()[0] == pre_header); 849 } else { 850 DCHECK(deopt_block == pre_header); 851 } 852 HGraph* graph = header->GetGraph(); 853 HSuspendCheck* suspend_check = header->GetLoopInformation()->GetSuspendCheck(); 854 if (loop_entry_test_block_added) { 855 DCHECK_EQ(deopt_block, header->GetDominator()->GetDominator()->GetSuccessors()[1]); 856 } 857 858 HIntConstant* const_instr = graph->GetIntConstant(constant); 859 HCondition* cond = new (graph->GetArena()) HLessThan(value, const_instr); 860 HDeoptimize* deoptimize = new (graph->GetArena()) 861 HDeoptimize(cond, suspend_check->GetDexPc()); 862 deopt_block->InsertInstructionBefore(cond, deopt_block->GetLastInstruction()); 863 deopt_block->InsertInstructionBefore(deoptimize, deopt_block->GetLastInstruction()); 864 deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment( 865 suspend_check->GetEnvironment(), header); 866 } 867 868 // Returns true if adding a (value <= array_length + offset) check for deoptimization 869 // is allowed and will benefit compiled code. 870 bool CanAddDeoptimizationArrayLength(HInstruction* value, 871 HArrayLength* array_length, 872 int32_t offset, 873 bool* is_proven) { 874 *is_proven = false; 875 HBasicBlock* header = induction_variable_->GetBlock(); 876 DCHECK(header->IsLoopHeader()); 877 HBasicBlock* pre_header = header->GetLoopInformation()->GetPreHeader(); 878 DCHECK(value->GetBlock()->Dominates(pre_header)); 879 880 if (array_length->GetBlock() == header) { 881 // array_length_in_loop_body_if_needed only has correct value when the loop 882 // body is entered. We bail out in this case. Usually array_length defined 883 // in the loop header is already hoisted by licm. 884 return false; 885 } else { 886 // array_length is defined either before the loop header already, or in 887 // the loop body since it's used in the loop body. If it's defined in the loop body, 888 // a phi array_length_in_loop_body_if_needed is used to replace it. In that case, 889 // all the uses of array_length must be dominated by its definition in the loop 890 // body. array_length_in_loop_body_if_needed is guaranteed to be the same as 891 // array_length once the loop body is entered so all the uses of the phi will 892 // use the correct value. 893 } 894 895 if (offset > 0) { 896 // There might be overflow issue. 897 // TODO: handle this, possibly with some distance relationship between 898 // offset_low and offset_high, or using another deoptimization to make 899 // sure (array_length + offset) doesn't overflow. 900 return false; 901 } 902 903 // See if we can prove the relationship first. 904 if (value == array_length) { 905 if (offset >= 0) { 906 // Already true. 907 *is_proven = true; 908 return true; 909 } else { 910 // May throw exception. Don't add deoptimization. 911 // Keep bounds checks in the loops. 912 return false; 913 } 914 } 915 // Can benefit from deoptimization. 916 return true; 917 } 918 919 // Adds a check that (value <= array_length + offset), and HDeoptimize otherwise. 920 void AddDeoptimizationArrayLength(HInstruction* value, 921 HArrayLength* array_length, 922 int32_t offset, 923 HBasicBlock* deopt_block, 924 bool loop_entry_test_block_added) { 925 HBasicBlock* header = induction_variable_->GetBlock(); 926 DCHECK(header->IsLoopHeader()); 927 HBasicBlock* pre_header = header->GetDominator(); 928 if (loop_entry_test_block_added) { 929 DCHECK(deopt_block->GetSuccessors()[0] == pre_header); 930 } else { 931 DCHECK(deopt_block == pre_header); 932 } 933 HGraph* graph = header->GetGraph(); 934 HSuspendCheck* suspend_check = header->GetLoopInformation()->GetSuspendCheck(); 935 936 // We may need to hoist null-check and array_length out of loop first. 937 if (!array_length->GetBlock()->Dominates(deopt_block)) { 938 // array_length must be defined in the loop body. 939 DCHECK(header->GetLoopInformation()->Contains(*array_length->GetBlock())); 940 DCHECK(array_length->GetBlock() != header); 941 942 HInstruction* array = array_length->InputAt(0); 943 HNullCheck* null_check = array->AsNullCheck(); 944 if (null_check != nullptr) { 945 array = null_check->InputAt(0); 946 } 947 // We've already made sure the array is defined before the loop when collecting 948 // array accesses for the loop. 949 DCHECK(array->GetBlock()->Dominates(deopt_block)); 950 if (null_check != nullptr && !null_check->GetBlock()->Dominates(deopt_block)) { 951 // Hoist null check out of loop with a deoptimization. 952 HNullConstant* null_constant = graph->GetNullConstant(); 953 HCondition* null_check_cond = new (graph->GetArena()) HEqual(array, null_constant); 954 // TODO: for one dex_pc, share the same deoptimization slow path. 955 HDeoptimize* null_check_deoptimize = new (graph->GetArena()) 956 HDeoptimize(null_check_cond, suspend_check->GetDexPc()); 957 deopt_block->InsertInstructionBefore( 958 null_check_cond, deopt_block->GetLastInstruction()); 959 deopt_block->InsertInstructionBefore( 960 null_check_deoptimize, deopt_block->GetLastInstruction()); 961 // Eliminate null check in the loop. 962 null_check->ReplaceWith(array); 963 null_check->GetBlock()->RemoveInstruction(null_check); 964 null_check_deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment( 965 suspend_check->GetEnvironment(), header); 966 } 967 968 HArrayLength* new_array_length 969 = new (graph->GetArena()) HArrayLength(array, array->GetDexPc()); 970 deopt_block->InsertInstructionBefore(new_array_length, deopt_block->GetLastInstruction()); 971 972 if (loop_entry_test_block_added) { 973 // Replace array_length defined inside the loop body with a phi 974 // array_length_in_loop_body_if_needed. This is a synthetic phi so there is 975 // no vreg number for it. 976 HPhi* phi = new (graph->GetArena()) HPhi( 977 graph->GetArena(), kNoRegNumber, 2, Primitive::kPrimInt); 978 // Set to 0 if the loop body isn't entered. 979 phi->SetRawInputAt(0, graph->GetIntConstant(0)); 980 // Set to array.length if the loop body is entered. 981 phi->SetRawInputAt(1, new_array_length); 982 pre_header->AddPhi(phi); 983 array_length->ReplaceWith(phi); 984 // Make sure phi is only used after the loop body is entered. 985 if (kIsDebugBuild) { 986 for (HUseIterator<HInstruction*> it(phi->GetUses()); 987 !it.Done(); 988 it.Advance()) { 989 HInstruction* user = it.Current()->GetUser(); 990 DCHECK(GetLoopHeaderSuccesorInLoop()->Dominates(user->GetBlock())); 991 } 992 } 993 } else { 994 array_length->ReplaceWith(new_array_length); 995 } 996 997 array_length->GetBlock()->RemoveInstruction(array_length); 998 // Use new_array_length for deopt. 999 array_length = new_array_length; 1000 } 1001 1002 HInstruction* added = array_length; 1003 if (offset != 0) { 1004 HIntConstant* offset_instr = graph->GetIntConstant(offset); 1005 added = new (graph->GetArena()) HAdd(Primitive::kPrimInt, array_length, offset_instr); 1006 deopt_block->InsertInstructionBefore(added, deopt_block->GetLastInstruction()); 1007 } 1008 HCondition* cond = new (graph->GetArena()) HGreaterThan(value, added); 1009 HDeoptimize* deopt = new (graph->GetArena()) HDeoptimize(cond, suspend_check->GetDexPc()); 1010 deopt_block->InsertInstructionBefore(cond, deopt_block->GetLastInstruction()); 1011 deopt_block->InsertInstructionBefore(deopt, deopt_block->GetLastInstruction()); 1012 deopt->CopyEnvironmentFromWithLoopPhiAdjustment(suspend_check->GetEnvironment(), header); 1013 } 1014 1015 // Adds deoptimizations in loop pre-header with the collected array access 1016 // data so that value ranges can be established in loop body. 1017 // Returns true if deoptimizations are successfully added, or if it's proven 1018 // it's not necessary. 1019 bool AddDeoptimization(const ArrayAccessInsideLoopFinder& finder) { 1020 int32_t offset_low = finder.GetOffsetLow(); 1021 int32_t offset_high = finder.GetOffsetHigh(); 1022 HArrayLength* array_length = finder.GetFoundArrayLength(); 1023 1024 HBasicBlock* pre_header = 1025 induction_variable_->GetBlock()->GetLoopInformation()->GetPreHeader(); 1026 if (!initial_->GetBlock()->Dominates(pre_header) || 1027 !end_->GetBlock()->Dominates(pre_header)) { 1028 // Can't move initial_ or end_ into pre_header for comparisons. 1029 return false; 1030 } 1031 1032 HBasicBlock* deopt_block; 1033 bool loop_entry_test_block_added = false; 1034 bool is_constant_proven, is_length_proven; 1035 1036 HInstruction* const_comparing_instruction; 1037 int32_t const_compared_to; 1038 HInstruction* array_length_comparing_instruction; 1039 int32_t array_length_offset; 1040 if (increment_ == 1) { 1041 // Increasing from initial_ to end_. 1042 const_comparing_instruction = initial_; 1043 const_compared_to = -offset_low; 1044 array_length_comparing_instruction = end_; 1045 array_length_offset = inclusive_ ? -offset_high - 1 : -offset_high; 1046 } else { 1047 const_comparing_instruction = end_; 1048 const_compared_to = inclusive_ ? -offset_low : -offset_low - 1; 1049 array_length_comparing_instruction = initial_; 1050 array_length_offset = -offset_high - 1; 1051 } 1052 1053 if (CanAddDeoptimizationConstant(const_comparing_instruction, 1054 const_compared_to, 1055 &is_constant_proven) && 1056 CanAddDeoptimizationArrayLength(array_length_comparing_instruction, 1057 array_length, 1058 array_length_offset, 1059 &is_length_proven)) { 1060 if (!is_constant_proven || !is_length_proven) { 1061 deopt_block = TransformLoopForDeoptimizationIfNeeded(); 1062 loop_entry_test_block_added = (deopt_block != pre_header); 1063 if (loop_entry_test_block_added) { 1064 // Loop body may be entered. 1065 AddLoopBodyEntryTest(); 1066 } 1067 } 1068 if (!is_constant_proven) { 1069 AddDeoptimizationConstant(const_comparing_instruction, 1070 const_compared_to, 1071 deopt_block, 1072 loop_entry_test_block_added); 1073 } 1074 if (!is_length_proven) { 1075 AddDeoptimizationArrayLength(array_length_comparing_instruction, 1076 array_length, 1077 array_length_offset, 1078 deopt_block, 1079 loop_entry_test_block_added); 1080 } 1081 return true; 1082 } 1083 return false; 1084 } 1085 1086 private: 1087 HPhi* const induction_variable_; // Induction variable for this monotonic value range. 1088 HInstruction* const initial_; // Initial value. 1089 HInstruction* end_; // End value. 1090 bool inclusive_; // Whether end value is inclusive. 1091 const int32_t increment_; // Increment for each loop iteration. 1092 const ValueBound bound_; // Additional value bound info for initial_. 1093 1094 DISALLOW_COPY_AND_ASSIGN(MonotonicValueRange); 1095}; 1096 1097class BCEVisitor : public HGraphVisitor { 1098 public: 1099 // The least number of bounds checks that should be eliminated by triggering 1100 // the deoptimization technique. 1101 static constexpr size_t kThresholdForAddingDeoptimize = 2; 1102 1103 // Very large constant index is considered as an anomaly. This is a threshold 1104 // beyond which we don't bother to apply the deoptimization technique since 1105 // it's likely some AIOOBE will be thrown. 1106 static constexpr int32_t kMaxConstantForAddingDeoptimize = 1107 std::numeric_limits<int32_t>::max() - 1024 * 1024; 1108 1109 // Added blocks for loop body entry test. 1110 bool IsAddedBlock(HBasicBlock* block) const { 1111 return block->GetBlockId() >= initial_block_size_; 1112 } 1113 1114 BCEVisitor(HGraph* graph, HInductionVarAnalysis* induction_analysis) 1115 : HGraphVisitor(graph), 1116 maps_(graph->GetBlocks().size(), 1117 ArenaSafeMap<int, ValueRange*>( 1118 std::less<int>(), 1119 graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)), 1120 graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)), 1121 first_constant_index_bounds_check_map_( 1122 std::less<int>(), 1123 graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)), 1124 need_to_revisit_block_(false), 1125 initial_block_size_(graph->GetBlocks().size()), 1126 induction_range_(induction_analysis) {} 1127 1128 void VisitBasicBlock(HBasicBlock* block) OVERRIDE { 1129 DCHECK(!IsAddedBlock(block)); 1130 first_constant_index_bounds_check_map_.clear(); 1131 HGraphVisitor::VisitBasicBlock(block); 1132 if (need_to_revisit_block_) { 1133 AddComparesWithDeoptimization(block); 1134 need_to_revisit_block_ = false; 1135 first_constant_index_bounds_check_map_.clear(); 1136 GetValueRangeMap(block)->clear(); 1137 HGraphVisitor::VisitBasicBlock(block); 1138 } 1139 } 1140 1141 private: 1142 // Return the map of proven value ranges at the beginning of a basic block. 1143 ArenaSafeMap<int, ValueRange*>* GetValueRangeMap(HBasicBlock* basic_block) { 1144 if (IsAddedBlock(basic_block)) { 1145 // Added blocks don't keep value ranges. 1146 return nullptr; 1147 } 1148 uint32_t block_id = basic_block->GetBlockId(); 1149 return &maps_[block_id]; 1150 } 1151 1152 // Traverse up the dominator tree to look for value range info. 1153 ValueRange* LookupValueRange(HInstruction* instruction, HBasicBlock* basic_block) { 1154 while (basic_block != nullptr) { 1155 ArenaSafeMap<int, ValueRange*>* map = GetValueRangeMap(basic_block); 1156 if (map != nullptr) { 1157 if (map->find(instruction->GetId()) != map->end()) { 1158 return map->Get(instruction->GetId()); 1159 } 1160 } else { 1161 DCHECK(IsAddedBlock(basic_block)); 1162 } 1163 basic_block = basic_block->GetDominator(); 1164 } 1165 // Didn't find any. 1166 return nullptr; 1167 } 1168 1169 // Return the range resulting from induction variable analysis of "instruction" when the value 1170 // is used from "context", for example, an index used from a bounds-check inside a loop body. 1171 ValueRange* LookupInductionRange(HInstruction* context, HInstruction* instruction) { 1172 InductionVarRange::Value v1; 1173 InductionVarRange::Value v2; 1174 bool needs_finite_test = false; 1175 induction_range_.GetInductionRange(context, instruction, &v1, &v2, &needs_finite_test); 1176 if (v1.is_known && (v1.a_constant == 0 || v1.a_constant == 1) && 1177 v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) { 1178 DCHECK(v1.a_constant == 1 || v1.instruction == nullptr); 1179 DCHECK(v2.a_constant == 1 || v2.instruction == nullptr); 1180 ValueBound low = ValueBound(v1.instruction, v1.b_constant); 1181 ValueBound up = ValueBound(v2.instruction, v2.b_constant); 1182 return new (GetGraph()->GetArena()) ValueRange(GetGraph()->GetArena(), low, up); 1183 } 1184 // Didn't find anything useful. 1185 return nullptr; 1186 } 1187 1188 // Narrow the value range of `instruction` at the end of `basic_block` with `range`, 1189 // and push the narrowed value range to `successor`. 1190 void ApplyRangeFromComparison(HInstruction* instruction, HBasicBlock* basic_block, 1191 HBasicBlock* successor, ValueRange* range) { 1192 ValueRange* existing_range = LookupValueRange(instruction, basic_block); 1193 if (existing_range == nullptr) { 1194 if (range != nullptr) { 1195 GetValueRangeMap(successor)->Overwrite(instruction->GetId(), range); 1196 } 1197 return; 1198 } 1199 if (existing_range->IsMonotonicValueRange()) { 1200 DCHECK(instruction->IsLoopHeaderPhi()); 1201 // Make sure the comparison is in the loop header so each increment is 1202 // checked with a comparison. 1203 if (instruction->GetBlock() != basic_block) { 1204 return; 1205 } 1206 } 1207 ValueRange* narrowed_range = existing_range->Narrow(range); 1208 GetValueRangeMap(successor)->Overwrite(instruction->GetId(), narrowed_range); 1209 } 1210 1211 // Special case that we may simultaneously narrow two MonotonicValueRange's to 1212 // regular value ranges. 1213 void HandleIfBetweenTwoMonotonicValueRanges(HIf* instruction, 1214 HInstruction* left, 1215 HInstruction* right, 1216 IfCondition cond, 1217 MonotonicValueRange* left_range, 1218 MonotonicValueRange* right_range) { 1219 DCHECK(left->IsLoopHeaderPhi()); 1220 DCHECK(right->IsLoopHeaderPhi()); 1221 if (instruction->GetBlock() != left->GetBlock()) { 1222 // Comparison needs to be in loop header to make sure it's done after each 1223 // increment/decrement. 1224 return; 1225 } 1226 1227 // Handle common cases which also don't have overflow/underflow concerns. 1228 if (left_range->GetIncrement() == 1 && 1229 left_range->GetBound().IsConstant() && 1230 right_range->GetIncrement() == -1 && 1231 right_range->GetBound().IsRelatedToArrayLength() && 1232 right_range->GetBound().GetConstant() < 0) { 1233 HBasicBlock* successor = nullptr; 1234 int32_t left_compensation = 0; 1235 int32_t right_compensation = 0; 1236 if (cond == kCondLT) { 1237 left_compensation = -1; 1238 right_compensation = 1; 1239 successor = instruction->IfTrueSuccessor(); 1240 } else if (cond == kCondLE) { 1241 successor = instruction->IfTrueSuccessor(); 1242 } else if (cond == kCondGT) { 1243 successor = instruction->IfFalseSuccessor(); 1244 } else if (cond == kCondGE) { 1245 left_compensation = -1; 1246 right_compensation = 1; 1247 successor = instruction->IfFalseSuccessor(); 1248 } else { 1249 // We don't handle '=='/'!=' test in case left and right can cross and 1250 // miss each other. 1251 return; 1252 } 1253 1254 if (successor != nullptr) { 1255 bool overflow; 1256 bool underflow; 1257 ValueRange* new_left_range = new (GetGraph()->GetArena()) ValueRange( 1258 GetGraph()->GetArena(), 1259 left_range->GetBound(), 1260 right_range->GetBound().Add(left_compensation, &overflow, &underflow)); 1261 if (!overflow && !underflow) { 1262 ApplyRangeFromComparison(left, instruction->GetBlock(), successor, 1263 new_left_range); 1264 } 1265 1266 ValueRange* new_right_range = new (GetGraph()->GetArena()) ValueRange( 1267 GetGraph()->GetArena(), 1268 left_range->GetBound().Add(right_compensation, &overflow, &underflow), 1269 right_range->GetBound()); 1270 if (!overflow && !underflow) { 1271 ApplyRangeFromComparison(right, instruction->GetBlock(), successor, 1272 new_right_range); 1273 } 1274 } 1275 } 1276 } 1277 1278 // Handle "if (left cmp_cond right)". 1279 void HandleIf(HIf* instruction, HInstruction* left, HInstruction* right, IfCondition cond) { 1280 HBasicBlock* block = instruction->GetBlock(); 1281 1282 HBasicBlock* true_successor = instruction->IfTrueSuccessor(); 1283 // There should be no critical edge at this point. 1284 DCHECK_EQ(true_successor->GetPredecessors().size(), 1u); 1285 1286 HBasicBlock* false_successor = instruction->IfFalseSuccessor(); 1287 // There should be no critical edge at this point. 1288 DCHECK_EQ(false_successor->GetPredecessors().size(), 1u); 1289 1290 ValueRange* left_range = LookupValueRange(left, block); 1291 MonotonicValueRange* left_monotonic_range = nullptr; 1292 if (left_range != nullptr) { 1293 left_monotonic_range = left_range->AsMonotonicValueRange(); 1294 if (left_monotonic_range != nullptr) { 1295 HBasicBlock* loop_head = left_monotonic_range->GetLoopHeader(); 1296 if (instruction->GetBlock() != loop_head) { 1297 // For monotonic value range, don't handle `instruction` 1298 // if it's not defined in the loop header. 1299 return; 1300 } 1301 } 1302 } 1303 1304 bool found; 1305 ValueBound bound = ValueBound::DetectValueBoundFromValue(right, &found); 1306 // Each comparison can establish a lower bound and an upper bound 1307 // for the left hand side. 1308 ValueBound lower = bound; 1309 ValueBound upper = bound; 1310 if (!found) { 1311 // No constant or array.length+c format bound found. 1312 // For i<j, we can still use j's upper bound as i's upper bound. Same for lower. 1313 ValueRange* right_range = LookupValueRange(right, block); 1314 if (right_range != nullptr) { 1315 if (right_range->IsMonotonicValueRange()) { 1316 if (left_range != nullptr && left_range->IsMonotonicValueRange()) { 1317 HandleIfBetweenTwoMonotonicValueRanges(instruction, left, right, cond, 1318 left_range->AsMonotonicValueRange(), 1319 right_range->AsMonotonicValueRange()); 1320 return; 1321 } 1322 } 1323 lower = right_range->GetLower(); 1324 upper = right_range->GetUpper(); 1325 } else { 1326 lower = ValueBound::Min(); 1327 upper = ValueBound::Max(); 1328 } 1329 } 1330 1331 bool overflow, underflow; 1332 if (cond == kCondLT || cond == kCondLE) { 1333 if (left_monotonic_range != nullptr) { 1334 // Update the info for monotonic value range. 1335 if (left_monotonic_range->GetInductionVariable() == left && 1336 left_monotonic_range->GetIncrement() < 0 && 1337 block == left_monotonic_range->GetLoopHeader() && 1338 instruction->IfFalseSuccessor()->GetLoopInformation() == block->GetLoopInformation()) { 1339 left_monotonic_range->SetEnd(right); 1340 left_monotonic_range->SetInclusive(cond == kCondLT); 1341 } 1342 } 1343 1344 if (!upper.Equals(ValueBound::Max())) { 1345 int32_t compensation = (cond == kCondLT) ? -1 : 0; // upper bound is inclusive 1346 ValueBound new_upper = upper.Add(compensation, &overflow, &underflow); 1347 if (overflow || underflow) { 1348 return; 1349 } 1350 ValueRange* new_range = new (GetGraph()->GetArena()) 1351 ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper); 1352 ApplyRangeFromComparison(left, block, true_successor, new_range); 1353 } 1354 1355 // array.length as a lower bound isn't considered useful. 1356 if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) { 1357 int32_t compensation = (cond == kCondLE) ? 1 : 0; // lower bound is inclusive 1358 ValueBound new_lower = lower.Add(compensation, &overflow, &underflow); 1359 if (overflow || underflow) { 1360 return; 1361 } 1362 ValueRange* new_range = new (GetGraph()->GetArena()) 1363 ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max()); 1364 ApplyRangeFromComparison(left, block, false_successor, new_range); 1365 } 1366 } else if (cond == kCondGT || cond == kCondGE) { 1367 if (left_monotonic_range != nullptr) { 1368 // Update the info for monotonic value range. 1369 if (left_monotonic_range->GetInductionVariable() == left && 1370 left_monotonic_range->GetIncrement() > 0 && 1371 block == left_monotonic_range->GetLoopHeader() && 1372 instruction->IfFalseSuccessor()->GetLoopInformation() == block->GetLoopInformation()) { 1373 left_monotonic_range->SetEnd(right); 1374 left_monotonic_range->SetInclusive(cond == kCondGT); 1375 } 1376 } 1377 1378 // array.length as a lower bound isn't considered useful. 1379 if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) { 1380 int32_t compensation = (cond == kCondGT) ? 1 : 0; // lower bound is inclusive 1381 ValueBound new_lower = lower.Add(compensation, &overflow, &underflow); 1382 if (overflow || underflow) { 1383 return; 1384 } 1385 ValueRange* new_range = new (GetGraph()->GetArena()) 1386 ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max()); 1387 ApplyRangeFromComparison(left, block, true_successor, new_range); 1388 } 1389 1390 if (!upper.Equals(ValueBound::Max())) { 1391 int32_t compensation = (cond == kCondGE) ? -1 : 0; // upper bound is inclusive 1392 ValueBound new_upper = upper.Add(compensation, &overflow, &underflow); 1393 if (overflow || underflow) { 1394 return; 1395 } 1396 ValueRange* new_range = new (GetGraph()->GetArena()) 1397 ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper); 1398 ApplyRangeFromComparison(left, block, false_successor, new_range); 1399 } 1400 } 1401 } 1402 1403 void VisitBoundsCheck(HBoundsCheck* bounds_check) { 1404 HBasicBlock* block = bounds_check->GetBlock(); 1405 HInstruction* index = bounds_check->InputAt(0); 1406 HInstruction* array_length = bounds_check->InputAt(1); 1407 DCHECK(array_length->IsIntConstant() || 1408 array_length->IsArrayLength() || 1409 array_length->IsPhi()); 1410 1411 if (array_length->IsPhi()) { 1412 // Input 1 of the phi contains the real array.length once the loop body is 1413 // entered. That value will be used for bound analysis. The graph is still 1414 // strictly in SSA form. 1415 array_length = array_length->AsPhi()->InputAt(1)->AsArrayLength(); 1416 } 1417 1418 if (!index->IsIntConstant()) { 1419 ValueBound lower = ValueBound(nullptr, 0); // constant 0 1420 ValueBound upper = ValueBound(array_length, -1); // array_length - 1 1421 ValueRange array_range(GetGraph()->GetArena(), lower, upper); 1422 // Try range obtained by local analysis. 1423 ValueRange* index_range = LookupValueRange(index, block); 1424 if (index_range != nullptr && index_range->FitsIn(&array_range)) { 1425 ReplaceBoundsCheck(bounds_check, index); 1426 return; 1427 } 1428 // Try range obtained by induction variable analysis. 1429 index_range = LookupInductionRange(bounds_check, index); 1430 if (index_range != nullptr && index_range->FitsIn(&array_range)) { 1431 ReplaceBoundsCheck(bounds_check, index); 1432 return; 1433 } 1434 } else { 1435 int32_t constant = index->AsIntConstant()->GetValue(); 1436 if (constant < 0) { 1437 // Will always throw exception. 1438 return; 1439 } 1440 if (array_length->IsIntConstant()) { 1441 if (constant < array_length->AsIntConstant()->GetValue()) { 1442 ReplaceBoundsCheck(bounds_check, index); 1443 } 1444 return; 1445 } 1446 1447 DCHECK(array_length->IsArrayLength()); 1448 ValueRange* existing_range = LookupValueRange(array_length, block); 1449 if (existing_range != nullptr) { 1450 ValueBound lower = existing_range->GetLower(); 1451 DCHECK(lower.IsConstant()); 1452 if (constant < lower.GetConstant()) { 1453 ReplaceBoundsCheck(bounds_check, index); 1454 return; 1455 } else { 1456 // Existing range isn't strong enough to eliminate the bounds check. 1457 // Fall through to update the array_length range with info from this 1458 // bounds check. 1459 } 1460 } 1461 1462 if (first_constant_index_bounds_check_map_.find(array_length->GetId()) == 1463 first_constant_index_bounds_check_map_.end()) { 1464 // Remember the first bounds check against array_length of a constant index. 1465 // That bounds check instruction has an associated HEnvironment where we 1466 // may add an HDeoptimize to eliminate bounds checks of constant indices 1467 // against array_length. 1468 first_constant_index_bounds_check_map_.Put(array_length->GetId(), bounds_check); 1469 } else { 1470 // We've seen it at least twice. It's beneficial to introduce a compare with 1471 // deoptimization fallback to eliminate the bounds checks. 1472 need_to_revisit_block_ = true; 1473 } 1474 1475 // Once we have an array access like 'array[5] = 1', we record array.length >= 6. 1476 // We currently don't do it for non-constant index since a valid array[i] can't prove 1477 // a valid array[i-1] yet due to the lower bound side. 1478 if (constant == std::numeric_limits<int32_t>::max()) { 1479 // Max() as an index will definitely throw AIOOBE. 1480 return; 1481 } 1482 ValueBound lower = ValueBound(nullptr, constant + 1); 1483 ValueBound upper = ValueBound::Max(); 1484 ValueRange* range = new (GetGraph()->GetArena()) 1485 ValueRange(GetGraph()->GetArena(), lower, upper); 1486 GetValueRangeMap(block)->Overwrite(array_length->GetId(), range); 1487 } 1488 } 1489 1490 void ReplaceBoundsCheck(HInstruction* bounds_check, HInstruction* index) { 1491 bounds_check->ReplaceWith(index); 1492 bounds_check->GetBlock()->RemoveInstruction(bounds_check); 1493 } 1494 1495 static bool HasSameInputAtBackEdges(HPhi* phi) { 1496 DCHECK(phi->IsLoopHeaderPhi()); 1497 // Start with input 1. Input 0 is from the incoming block. 1498 HInstruction* input1 = phi->InputAt(1); 1499 DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge( 1500 *phi->GetBlock()->GetPredecessors()[1])); 1501 for (size_t i = 2, e = phi->InputCount(); i < e; ++i) { 1502 DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge( 1503 *phi->GetBlock()->GetPredecessors()[i])); 1504 if (input1 != phi->InputAt(i)) { 1505 return false; 1506 } 1507 } 1508 return true; 1509 } 1510 1511 void VisitPhi(HPhi* phi) { 1512 if (phi->IsLoopHeaderPhi() 1513 && (phi->GetType() == Primitive::kPrimInt) 1514 && HasSameInputAtBackEdges(phi)) { 1515 HInstruction* instruction = phi->InputAt(1); 1516 HInstruction *left; 1517 int32_t increment; 1518 if (ValueBound::IsAddOrSubAConstant(instruction, &left, &increment)) { 1519 if (left == phi) { 1520 HInstruction* initial_value = phi->InputAt(0); 1521 ValueRange* range = nullptr; 1522 if (increment == 0) { 1523 // Add constant 0. It's really a fixed value. 1524 range = new (GetGraph()->GetArena()) ValueRange( 1525 GetGraph()->GetArena(), 1526 ValueBound(initial_value, 0), 1527 ValueBound(initial_value, 0)); 1528 } else { 1529 // Monotonically increasing/decreasing. 1530 bool found; 1531 ValueBound bound = ValueBound::DetectValueBoundFromValue( 1532 initial_value, &found); 1533 if (!found) { 1534 // No constant or array.length+c bound found. 1535 // For i=j, we can still use j's upper bound as i's upper bound. 1536 // Same for lower. 1537 ValueRange* initial_range = LookupValueRange(initial_value, phi->GetBlock()); 1538 if (initial_range != nullptr) { 1539 bound = increment > 0 ? initial_range->GetLower() : 1540 initial_range->GetUpper(); 1541 } else { 1542 bound = increment > 0 ? ValueBound::Min() : ValueBound::Max(); 1543 } 1544 } 1545 range = new (GetGraph()->GetArena()) MonotonicValueRange( 1546 GetGraph()->GetArena(), 1547 phi, 1548 initial_value, 1549 increment, 1550 bound); 1551 } 1552 GetValueRangeMap(phi->GetBlock())->Overwrite(phi->GetId(), range); 1553 } 1554 } 1555 } 1556 } 1557 1558 void VisitIf(HIf* instruction) { 1559 if (instruction->InputAt(0)->IsCondition()) { 1560 HCondition* cond = instruction->InputAt(0)->AsCondition(); 1561 IfCondition cmp = cond->GetCondition(); 1562 if (cmp == kCondGT || cmp == kCondGE || 1563 cmp == kCondLT || cmp == kCondLE) { 1564 HInstruction* left = cond->GetLeft(); 1565 HInstruction* right = cond->GetRight(); 1566 HandleIf(instruction, left, right, cmp); 1567 1568 HBasicBlock* block = instruction->GetBlock(); 1569 ValueRange* left_range = LookupValueRange(left, block); 1570 if (left_range == nullptr) { 1571 return; 1572 } 1573 1574 if (left_range->IsMonotonicValueRange() && 1575 block == left_range->AsMonotonicValueRange()->GetLoopHeader()) { 1576 // The comparison is for an induction variable in the loop header. 1577 DCHECK(left == left_range->AsMonotonicValueRange()->GetInductionVariable()); 1578 HBasicBlock* loop_body_successor = 1579 left_range->AsMonotonicValueRange()->GetLoopHeaderSuccesorInLoop(); 1580 if (loop_body_successor == nullptr) { 1581 // In case it's some strange loop structure. 1582 return; 1583 } 1584 ValueRange* new_left_range = LookupValueRange(left, loop_body_successor); 1585 if ((new_left_range == left_range) || 1586 // Range narrowed with deoptimization is usually more useful than 1587 // a constant range. 1588 new_left_range->IsConstantValueRange()) { 1589 // We are not successful in narrowing the monotonic value range to 1590 // a regular value range. Try using deoptimization. 1591 new_left_range = left_range->AsMonotonicValueRange()-> 1592 NarrowWithDeoptimization(); 1593 if (new_left_range != left_range) { 1594 GetValueRangeMap(loop_body_successor)->Overwrite(left->GetId(), new_left_range); 1595 } 1596 } 1597 } 1598 } 1599 } 1600 } 1601 1602 void VisitAdd(HAdd* add) { 1603 HInstruction* right = add->GetRight(); 1604 if (right->IsIntConstant()) { 1605 ValueRange* left_range = LookupValueRange(add->GetLeft(), add->GetBlock()); 1606 if (left_range == nullptr) { 1607 return; 1608 } 1609 ValueRange* range = left_range->Add(right->AsIntConstant()->GetValue()); 1610 if (range != nullptr) { 1611 GetValueRangeMap(add->GetBlock())->Overwrite(add->GetId(), range); 1612 } 1613 } 1614 } 1615 1616 void VisitSub(HSub* sub) { 1617 HInstruction* left = sub->GetLeft(); 1618 HInstruction* right = sub->GetRight(); 1619 if (right->IsIntConstant()) { 1620 ValueRange* left_range = LookupValueRange(left, sub->GetBlock()); 1621 if (left_range == nullptr) { 1622 return; 1623 } 1624 ValueRange* range = left_range->Add(-right->AsIntConstant()->GetValue()); 1625 if (range != nullptr) { 1626 GetValueRangeMap(sub->GetBlock())->Overwrite(sub->GetId(), range); 1627 return; 1628 } 1629 } 1630 1631 // Here we are interested in the typical triangular case of nested loops, 1632 // such as the inner loop 'for (int j=0; j<array.length-i; j++)' where i 1633 // is the index for outer loop. In this case, we know j is bounded by array.length-1. 1634 1635 // Try to handle (array.length - i) or (array.length + c - i) format. 1636 HInstruction* left_of_left; // left input of left. 1637 int32_t right_const = 0; 1638 if (ValueBound::IsAddOrSubAConstant(left, &left_of_left, &right_const)) { 1639 left = left_of_left; 1640 } 1641 // The value of left input of the sub equals (left + right_const). 1642 1643 if (left->IsArrayLength()) { 1644 HInstruction* array_length = left->AsArrayLength(); 1645 ValueRange* right_range = LookupValueRange(right, sub->GetBlock()); 1646 if (right_range != nullptr) { 1647 ValueBound lower = right_range->GetLower(); 1648 ValueBound upper = right_range->GetUpper(); 1649 if (lower.IsConstant() && upper.IsRelatedToArrayLength()) { 1650 HInstruction* upper_inst = upper.GetInstruction(); 1651 // Make sure it's the same array. 1652 if (ValueBound::Equal(array_length, upper_inst)) { 1653 int32_t c0 = right_const; 1654 int32_t c1 = lower.GetConstant(); 1655 int32_t c2 = upper.GetConstant(); 1656 // (array.length + c0 - v) where v is in [c1, array.length + c2] 1657 // gets [c0 - c2, array.length + c0 - c1] as its value range. 1658 if (!ValueBound::WouldAddOverflowOrUnderflow(c0, -c2) && 1659 !ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) { 1660 if ((c0 - c1) <= 0) { 1661 // array.length + (c0 - c1) won't overflow/underflow. 1662 ValueRange* range = new (GetGraph()->GetArena()) ValueRange( 1663 GetGraph()->GetArena(), 1664 ValueBound(nullptr, right_const - upper.GetConstant()), 1665 ValueBound(array_length, right_const - lower.GetConstant())); 1666 GetValueRangeMap(sub->GetBlock())->Overwrite(sub->GetId(), range); 1667 } 1668 } 1669 } 1670 } 1671 } 1672 } 1673 } 1674 1675 void FindAndHandlePartialArrayLength(HBinaryOperation* instruction) { 1676 DCHECK(instruction->IsDiv() || instruction->IsShr() || instruction->IsUShr()); 1677 HInstruction* right = instruction->GetRight(); 1678 int32_t right_const; 1679 if (right->IsIntConstant()) { 1680 right_const = right->AsIntConstant()->GetValue(); 1681 // Detect division by two or more. 1682 if ((instruction->IsDiv() && right_const <= 1) || 1683 (instruction->IsShr() && right_const < 1) || 1684 (instruction->IsUShr() && right_const < 1)) { 1685 return; 1686 } 1687 } else { 1688 return; 1689 } 1690 1691 // Try to handle array.length/2 or (array.length-1)/2 format. 1692 HInstruction* left = instruction->GetLeft(); 1693 HInstruction* left_of_left; // left input of left. 1694 int32_t c = 0; 1695 if (ValueBound::IsAddOrSubAConstant(left, &left_of_left, &c)) { 1696 left = left_of_left; 1697 } 1698 // The value of left input of instruction equals (left + c). 1699 1700 // (array_length + 1) or smaller divided by two or more 1701 // always generate a value in [Min(), array_length]. 1702 // This is true even if array_length is Max(). 1703 if (left->IsArrayLength() && c <= 1) { 1704 if (instruction->IsUShr() && c < 0) { 1705 // Make sure for unsigned shift, left side is not negative. 1706 // e.g. if array_length is 2, ((array_length - 3) >>> 2) is way bigger 1707 // than array_length. 1708 return; 1709 } 1710 ValueRange* range = new (GetGraph()->GetArena()) ValueRange( 1711 GetGraph()->GetArena(), 1712 ValueBound(nullptr, std::numeric_limits<int32_t>::min()), 1713 ValueBound(left, 0)); 1714 GetValueRangeMap(instruction->GetBlock())->Overwrite(instruction->GetId(), range); 1715 } 1716 } 1717 1718 void VisitDiv(HDiv* div) { 1719 FindAndHandlePartialArrayLength(div); 1720 } 1721 1722 void VisitShr(HShr* shr) { 1723 FindAndHandlePartialArrayLength(shr); 1724 } 1725 1726 void VisitUShr(HUShr* ushr) { 1727 FindAndHandlePartialArrayLength(ushr); 1728 } 1729 1730 void VisitAnd(HAnd* instruction) { 1731 if (instruction->GetRight()->IsIntConstant()) { 1732 int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue(); 1733 if (constant > 0) { 1734 // constant serves as a mask so any number masked with it 1735 // gets a [0, constant] value range. 1736 ValueRange* range = new (GetGraph()->GetArena()) ValueRange( 1737 GetGraph()->GetArena(), 1738 ValueBound(nullptr, 0), 1739 ValueBound(nullptr, constant)); 1740 GetValueRangeMap(instruction->GetBlock())->Overwrite(instruction->GetId(), range); 1741 } 1742 } 1743 } 1744 1745 void VisitNewArray(HNewArray* new_array) { 1746 HInstruction* len = new_array->InputAt(0); 1747 if (!len->IsIntConstant()) { 1748 HInstruction *left; 1749 int32_t right_const; 1750 if (ValueBound::IsAddOrSubAConstant(len, &left, &right_const)) { 1751 // (left + right_const) is used as size to new the array. 1752 // We record "-right_const <= left <= new_array - right_const"; 1753 ValueBound lower = ValueBound(nullptr, -right_const); 1754 // We use new_array for the bound instead of new_array.length, 1755 // which isn't available as an instruction yet. new_array will 1756 // be treated the same as new_array.length when it's used in a ValueBound. 1757 ValueBound upper = ValueBound(new_array, -right_const); 1758 ValueRange* range = new (GetGraph()->GetArena()) 1759 ValueRange(GetGraph()->GetArena(), lower, upper); 1760 ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock()); 1761 if (existing_range != nullptr) { 1762 range = existing_range->Narrow(range); 1763 } 1764 GetValueRangeMap(new_array->GetBlock())->Overwrite(left->GetId(), range); 1765 } 1766 } 1767 } 1768 1769 void VisitDeoptimize(HDeoptimize* deoptimize) { 1770 // Right now it's only HLessThanOrEqual. 1771 DCHECK(deoptimize->InputAt(0)->IsLessThanOrEqual()); 1772 HLessThanOrEqual* less_than_or_equal = deoptimize->InputAt(0)->AsLessThanOrEqual(); 1773 HInstruction* instruction = less_than_or_equal->InputAt(0); 1774 if (instruction->IsArrayLength()) { 1775 HInstruction* constant = less_than_or_equal->InputAt(1); 1776 DCHECK(constant->IsIntConstant()); 1777 DCHECK(constant->AsIntConstant()->GetValue() <= kMaxConstantForAddingDeoptimize); 1778 ValueBound lower = ValueBound(nullptr, constant->AsIntConstant()->GetValue() + 1); 1779 ValueRange* range = new (GetGraph()->GetArena()) 1780 ValueRange(GetGraph()->GetArena(), lower, ValueBound::Max()); 1781 GetValueRangeMap(deoptimize->GetBlock())->Overwrite(instruction->GetId(), range); 1782 } 1783 } 1784 1785 void AddCompareWithDeoptimization(HInstruction* array_length, 1786 HIntConstant* const_instr, 1787 HBasicBlock* block) { 1788 DCHECK(array_length->IsArrayLength()); 1789 ValueRange* range = LookupValueRange(array_length, block); 1790 ValueBound lower_bound = range->GetLower(); 1791 DCHECK(lower_bound.IsConstant()); 1792 DCHECK(const_instr->GetValue() <= kMaxConstantForAddingDeoptimize); 1793 // Note that the lower bound of the array length may have been refined 1794 // through other instructions (such as `HNewArray(length - 4)`). 1795 DCHECK_LE(const_instr->GetValue() + 1, lower_bound.GetConstant()); 1796 1797 // If array_length is less than lower_const, deoptimize. 1798 HBoundsCheck* bounds_check = first_constant_index_bounds_check_map_.Get( 1799 array_length->GetId())->AsBoundsCheck(); 1800 HCondition* cond = new (GetGraph()->GetArena()) HLessThanOrEqual(array_length, const_instr); 1801 HDeoptimize* deoptimize = new (GetGraph()->GetArena()) 1802 HDeoptimize(cond, bounds_check->GetDexPc()); 1803 block->InsertInstructionBefore(cond, bounds_check); 1804 block->InsertInstructionBefore(deoptimize, bounds_check); 1805 deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment()); 1806 } 1807 1808 void AddComparesWithDeoptimization(HBasicBlock* block) { 1809 for (ArenaSafeMap<int, HBoundsCheck*>::iterator it = 1810 first_constant_index_bounds_check_map_.begin(); 1811 it != first_constant_index_bounds_check_map_.end(); 1812 ++it) { 1813 HBoundsCheck* bounds_check = it->second; 1814 HInstruction* array_length = bounds_check->InputAt(1); 1815 if (!array_length->IsArrayLength()) { 1816 // Prior deoptimizations may have changed the array length to a phi. 1817 // TODO(mingyao): propagate the range to the phi? 1818 DCHECK(array_length->IsPhi()) << array_length->DebugName(); 1819 continue; 1820 } 1821 HIntConstant* lower_bound_const_instr = nullptr; 1822 int32_t lower_bound_const = std::numeric_limits<int32_t>::min(); 1823 size_t counter = 0; 1824 // Count the constant indexing for which bounds checks haven't 1825 // been removed yet. 1826 for (HUseIterator<HInstruction*> it2(array_length->GetUses()); 1827 !it2.Done(); 1828 it2.Advance()) { 1829 HInstruction* user = it2.Current()->GetUser(); 1830 if (user->GetBlock() == block && 1831 user->IsBoundsCheck() && 1832 user->AsBoundsCheck()->InputAt(0)->IsIntConstant()) { 1833 DCHECK_EQ(array_length, user->AsBoundsCheck()->InputAt(1)); 1834 HIntConstant* const_instr = user->AsBoundsCheck()->InputAt(0)->AsIntConstant(); 1835 if (const_instr->GetValue() > lower_bound_const) { 1836 lower_bound_const = const_instr->GetValue(); 1837 lower_bound_const_instr = const_instr; 1838 } 1839 counter++; 1840 } 1841 } 1842 if (counter >= kThresholdForAddingDeoptimize && 1843 lower_bound_const_instr->GetValue() <= kMaxConstantForAddingDeoptimize) { 1844 AddCompareWithDeoptimization(array_length, lower_bound_const_instr, block); 1845 } 1846 } 1847 } 1848 1849 ArenaVector<ArenaSafeMap<int, ValueRange*>> maps_; 1850 1851 // Map an HArrayLength instruction's id to the first HBoundsCheck instruction in 1852 // a block that checks a constant index against that HArrayLength. 1853 ArenaSafeMap<int, HBoundsCheck*> first_constant_index_bounds_check_map_; 1854 1855 // For the block, there is at least one HArrayLength instruction for which there 1856 // is more than one bounds check instruction with constant indexing. And it's 1857 // beneficial to add a compare instruction that has deoptimization fallback and 1858 // eliminate those bounds checks. 1859 bool need_to_revisit_block_; 1860 1861 // Initial number of blocks. 1862 uint32_t initial_block_size_; 1863 1864 // Range analysis based on induction variables. 1865 InductionVarRange induction_range_; 1866 1867 DISALLOW_COPY_AND_ASSIGN(BCEVisitor); 1868}; 1869 1870void BoundsCheckElimination::Run() { 1871 if (!graph_->HasBoundsChecks()) { 1872 return; 1873 } 1874 1875 BCEVisitor visitor(graph_, induction_analysis_); 1876 // Reverse post order guarantees a node's dominators are visited first. 1877 // We want to visit in the dominator-based order since if a value is known to 1878 // be bounded by a range at one instruction, it must be true that all uses of 1879 // that value dominated by that instruction fits in that range. Range of that 1880 // value can be narrowed further down in the dominator tree. 1881 // 1882 // TODO: only visit blocks that dominate some array accesses. 1883 HBasicBlock* last_visited_block = nullptr; 1884 for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { 1885 HBasicBlock* current = it.Current(); 1886 if (current == last_visited_block) { 1887 // We may insert blocks into the reverse post order list when processing 1888 // a loop header. Don't process it again. 1889 DCHECK(current->IsLoopHeader()); 1890 continue; 1891 } 1892 if (visitor.IsAddedBlock(current)) { 1893 // Skip added blocks. Their effects are already taken care of. 1894 continue; 1895 } 1896 visitor.VisitBasicBlock(current); 1897 last_visited_block = current; 1898 } 1899} 1900 1901} // namespace art 1902