blob: dc75ff1abc1b624b2fa9c86e20451c1ebe0d31af [file] [log] [blame]
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bounds_check_elimination.h"
#include <limits>
#include "base/arena_containers.h"
#include "induction_var_range.h"
#include "side_effects_analysis.h"
#include "nodes.h"
namespace art {
class MonotonicValueRange;
/**
* A value bound is represented as a pair of value and constant,
* e.g. array.length - 1.
*/
class ValueBound : public ValueObject {
public:
ValueBound(HInstruction* instruction, int32_t constant) {
if (instruction != nullptr && instruction->IsIntConstant()) {
// Normalize ValueBound with constant instruction.
int32_t instr_const = instruction->AsIntConstant()->GetValue();
if (!WouldAddOverflowOrUnderflow(instr_const, constant)) {
instruction_ = nullptr;
constant_ = instr_const + constant;
return;
}
}
instruction_ = instruction;
constant_ = constant;
}
// Return whether (left + right) overflows or underflows.
static bool WouldAddOverflowOrUnderflow(int32_t left, int32_t right) {
if (right == 0) {
return false;
}
if ((right > 0) && (left <= (std::numeric_limits<int32_t>::max() - right))) {
// No overflow.
return false;
}
if ((right < 0) && (left >= (std::numeric_limits<int32_t>::min() - right))) {
// No underflow.
return false;
}
return true;
}
static bool IsAddOrSubAConstant(HInstruction* instruction,
HInstruction** left_instruction,
int* right_constant) {
if (instruction->IsAdd() || instruction->IsSub()) {
HBinaryOperation* bin_op = instruction->AsBinaryOperation();
HInstruction* left = bin_op->GetLeft();
HInstruction* right = bin_op->GetRight();
if (right->IsIntConstant()) {
*left_instruction = left;
int32_t c = right->AsIntConstant()->GetValue();
*right_constant = instruction->IsAdd() ? c : -c;
return true;
}
}
*left_instruction = nullptr;
*right_constant = 0;
return false;
}
// Try to detect useful value bound format from an instruction, e.g.
// a constant or array length related value.
static ValueBound DetectValueBoundFromValue(HInstruction* instruction, bool* found) {
DCHECK(instruction != nullptr);
if (instruction->IsIntConstant()) {
*found = true;
return ValueBound(nullptr, instruction->AsIntConstant()->GetValue());
}
if (instruction->IsArrayLength()) {
*found = true;
return ValueBound(instruction, 0);
}
// Try to detect (array.length + c) format.
HInstruction *left;
int32_t right;
if (IsAddOrSubAConstant(instruction, &left, &right)) {
if (left->IsArrayLength()) {
*found = true;
return ValueBound(left, right);
}
}
// No useful bound detected.
*found = false;
return ValueBound::Max();
}
HInstruction* GetInstruction() const { return instruction_; }
int32_t GetConstant() const { return constant_; }
bool IsRelatedToArrayLength() const {
// Some bounds are created with HNewArray* as the instruction instead
// of HArrayLength*. They are treated the same.
return (instruction_ != nullptr) &&
(instruction_->IsArrayLength() || instruction_->IsNewArray());
}
bool IsConstant() const {
return instruction_ == nullptr;
}
static ValueBound Min() { return ValueBound(nullptr, std::numeric_limits<int32_t>::min()); }
static ValueBound Max() { return ValueBound(nullptr, std::numeric_limits<int32_t>::max()); }
bool Equals(ValueBound bound) const {
return instruction_ == bound.instruction_ && constant_ == bound.constant_;
}
/*
* Hunt "under the hood" of array lengths (leading to array references),
* null checks (also leading to array references), and new arrays
* (leading to the actual length). This makes it more likely related
* instructions become actually comparable.
*/
static HInstruction* HuntForDeclaration(HInstruction* instruction) {
while (instruction->IsArrayLength() ||
instruction->IsNullCheck() ||
instruction->IsNewArray()) {
instruction = instruction->InputAt(0);
}
return instruction;
}
static bool Equal(HInstruction* instruction1, HInstruction* instruction2) {
if (instruction1 == instruction2) {
return true;
}
if (instruction1 == nullptr || instruction2 == nullptr) {
return false;
}
instruction1 = HuntForDeclaration(instruction1);
instruction2 = HuntForDeclaration(instruction2);
return instruction1 == instruction2;
}
// Returns if it's certain this->bound >= `bound`.
bool GreaterThanOrEqualTo(ValueBound bound) const {
if (Equal(instruction_, bound.instruction_)) {
return constant_ >= bound.constant_;
}
// Not comparable. Just return false.
return false;
}
// Returns if it's certain this->bound <= `bound`.
bool LessThanOrEqualTo(ValueBound bound) const {
if (Equal(instruction_, bound.instruction_)) {
return constant_ <= bound.constant_;
}
// Not comparable. Just return false.
return false;
}
// Returns if it's certain this->bound > `bound`.
bool GreaterThan(ValueBound bound) const {
if (Equal(instruction_, bound.instruction_)) {
return constant_ > bound.constant_;
}
// Not comparable. Just return false.
return false;
}
// Returns if it's certain this->bound < `bound`.
bool LessThan(ValueBound bound) const {
if (Equal(instruction_, bound.instruction_)) {
return constant_ < bound.constant_;
}
// Not comparable. Just return false.
return false;
}
// Try to narrow lower bound. Returns the greatest of the two if possible.
// Pick one if they are not comparable.
static ValueBound NarrowLowerBound(ValueBound bound1, ValueBound bound2) {
if (bound1.GreaterThanOrEqualTo(bound2)) {
return bound1;
}
if (bound2.GreaterThanOrEqualTo(bound1)) {
return bound2;
}
// Not comparable. Just pick one. We may lose some info, but that's ok.
// Favor constant as lower bound.
return bound1.IsConstant() ? bound1 : bound2;
}
// Try to narrow upper bound. Returns the lowest of the two if possible.
// Pick one if they are not comparable.
static ValueBound NarrowUpperBound(ValueBound bound1, ValueBound bound2) {
if (bound1.LessThanOrEqualTo(bound2)) {
return bound1;
}
if (bound2.LessThanOrEqualTo(bound1)) {
return bound2;
}
// Not comparable. Just pick one. We may lose some info, but that's ok.
// Favor array length as upper bound.
return bound1.IsRelatedToArrayLength() ? bound1 : bound2;
}
// Add a constant to a ValueBound.
// `overflow` or `underflow` will return whether the resulting bound may
// overflow or underflow an int.
ValueBound Add(int32_t c, bool* overflow, bool* underflow) const {
*overflow = *underflow = false;
if (c == 0) {
return *this;
}
int32_t new_constant;
if (c > 0) {
if (constant_ > (std::numeric_limits<int32_t>::max() - c)) {
*overflow = true;
return Max();
}
new_constant = constant_ + c;
// (array.length + non-positive-constant) won't overflow an int.
if (IsConstant() || (IsRelatedToArrayLength() && new_constant <= 0)) {
return ValueBound(instruction_, new_constant);
}
// Be conservative.
*overflow = true;
return Max();
} else {
if (constant_ < (std::numeric_limits<int32_t>::min() - c)) {
*underflow = true;
return Min();
}
new_constant = constant_ + c;
// Regardless of the value new_constant, (array.length+new_constant) will
// never underflow since array.length is no less than 0.
if (IsConstant() || IsRelatedToArrayLength()) {
return ValueBound(instruction_, new_constant);
}
// Be conservative.
*underflow = true;
return Min();
}
}
private:
HInstruction* instruction_;
int32_t constant_;
};
/**
* Represent a range of lower bound and upper bound, both being inclusive.
* Currently a ValueRange may be generated as a result of the following:
* comparisons related to array bounds, array bounds check, add/sub on top
* of an existing value range, NewArray or a loop phi corresponding to an
* incrementing/decrementing array index (MonotonicValueRange).
*/
class ValueRange : public ArenaObject<kArenaAllocBoundsCheckElimination> {
public:
ValueRange(ArenaAllocator* allocator, ValueBound lower, ValueBound upper)
: allocator_(allocator), lower_(lower), upper_(upper) {}
virtual ~ValueRange() {}
virtual MonotonicValueRange* AsMonotonicValueRange() { return nullptr; }
bool IsMonotonicValueRange() {
return AsMonotonicValueRange() != nullptr;
}
ArenaAllocator* GetAllocator() const { return allocator_; }
ValueBound GetLower() const { return lower_; }
ValueBound GetUpper() const { return upper_; }
bool IsConstantValueRange() { return lower_.IsConstant() && upper_.IsConstant(); }
// If it's certain that this value range fits in other_range.
virtual bool FitsIn(ValueRange* other_range) const {
if (other_range == nullptr) {
return true;
}
DCHECK(!other_range->IsMonotonicValueRange());
return lower_.GreaterThanOrEqualTo(other_range->lower_) &&
upper_.LessThanOrEqualTo(other_range->upper_);
}
// Returns the intersection of this and range.
// If it's not possible to do intersection because some
// bounds are not comparable, it's ok to pick either bound.
virtual ValueRange* Narrow(ValueRange* range) {
if (range == nullptr) {
return this;
}
if (range->IsMonotonicValueRange()) {
return this;
}
return new (allocator_) ValueRange(
allocator_,
ValueBound::NarrowLowerBound(lower_, range->lower_),
ValueBound::NarrowUpperBound(upper_, range->upper_));
}
// Shift a range by a constant.
ValueRange* Add(int32_t constant) const {
bool overflow, underflow;
ValueBound lower = lower_.Add(constant, &overflow, &underflow);
if (underflow) {
// Lower bound underflow will wrap around to positive values
// and invalidate the upper bound.
return nullptr;
}
ValueBound upper = upper_.Add(constant, &overflow, &underflow);
if (overflow) {
// Upper bound overflow will wrap around to negative values
// and invalidate the lower bound.
return nullptr;
}
return new (allocator_) ValueRange(allocator_, lower, upper);
}
private:
ArenaAllocator* const allocator_;
const ValueBound lower_; // inclusive
const ValueBound upper_; // inclusive
DISALLOW_COPY_AND_ASSIGN(ValueRange);
};
/**
* A monotonically incrementing/decrementing value range, e.g.
* the variable i in "for (int i=0; i<array.length; i++)".
* Special care needs to be taken to account for overflow/underflow
* of such value ranges.
*/
class MonotonicValueRange : public ValueRange {
public:
MonotonicValueRange(ArenaAllocator* allocator,
HPhi* induction_variable,
HInstruction* initial,
int32_t increment,
ValueBound bound)
// To be conservative, give it full range [Min(), Max()] in case it's
// used as a regular value range, due to possible overflow/underflow.
: ValueRange(allocator, ValueBound::Min(), ValueBound::Max()),
induction_variable_(induction_variable),
initial_(initial),
increment_(increment),
bound_(bound) {}
virtual ~MonotonicValueRange() {}
int32_t GetIncrement() const { return increment_; }
ValueBound GetBound() const { return bound_; }
HBasicBlock* GetLoopHeader() const {
DCHECK(induction_variable_->GetBlock()->IsLoopHeader());
return induction_variable_->GetBlock();
}
MonotonicValueRange* AsMonotonicValueRange() OVERRIDE { return this; }
// If it's certain that this value range fits in other_range.
bool FitsIn(ValueRange* other_range) const OVERRIDE {
if (other_range == nullptr) {
return true;
}
DCHECK(!other_range->IsMonotonicValueRange());
return false;
}
// Try to narrow this MonotonicValueRange given another range.
// Ideally it will return a normal ValueRange. But due to
// possible overflow/underflow, that may not be possible.
ValueRange* Narrow(ValueRange* range) OVERRIDE {
if (range == nullptr) {
return this;
}
DCHECK(!range->IsMonotonicValueRange());
if (increment_ > 0) {
// Monotonically increasing.
ValueBound lower = ValueBound::NarrowLowerBound(bound_, range->GetLower());
if (!lower.IsConstant() || lower.GetConstant() == std::numeric_limits<int32_t>::min()) {
// Lower bound isn't useful. Leave it to deoptimization.
return this;
}
// We currently conservatively assume max array length is Max().
// If we can make assumptions about the max array length, e.g. due to the max heap size,
// divided by the element size (such as 4 bytes for each integer array), we can
// lower this number and rule out some possible overflows.
int32_t max_array_len = std::numeric_limits<int32_t>::max();
// max possible integer value of range's upper value.
int32_t upper = std::numeric_limits<int32_t>::max();
// Try to lower upper.
ValueBound upper_bound = range->GetUpper();
if (upper_bound.IsConstant()) {
upper = upper_bound.GetConstant();
} else if (upper_bound.IsRelatedToArrayLength() && upper_bound.GetConstant() <= 0) {
// Normal case. e.g. <= array.length - 1.
upper = max_array_len + upper_bound.GetConstant();
}
// If we can prove for the last number in sequence of initial_,
// initial_ + increment_, initial_ + 2 x increment_, ...
// that's <= upper, (last_num_in_sequence + increment_) doesn't trigger overflow,
// then this MonoticValueRange is narrowed to a normal value range.
// Be conservative first, assume last number in the sequence hits upper.
int32_t last_num_in_sequence = upper;
if (initial_->IsIntConstant()) {
int32_t initial_constant = initial_->AsIntConstant()->GetValue();
if (upper <= initial_constant) {
last_num_in_sequence = upper;
} else {
// Cast to int64_t for the substraction part to avoid int32_t overflow.
last_num_in_sequence = initial_constant +
((int64_t)upper - (int64_t)initial_constant) / increment_ * increment_;
}
}
if (last_num_in_sequence <= (std::numeric_limits<int32_t>::max() - increment_)) {
// No overflow. The sequence will be stopped by the upper bound test as expected.
return new (GetAllocator()) ValueRange(GetAllocator(), lower, range->GetUpper());
}
// There might be overflow. Give up narrowing.
return this;
} else {
DCHECK_NE(increment_, 0);
// Monotonically decreasing.
ValueBound upper = ValueBound::NarrowUpperBound(bound_, range->GetUpper());
if ((!upper.IsConstant() || upper.GetConstant() == std::numeric_limits<int32_t>::max()) &&
!upper.IsRelatedToArrayLength()) {
// Upper bound isn't useful. Leave it to deoptimization.
return this;
}
// Need to take care of underflow. Try to prove underflow won't happen
// for common cases.
if (range->GetLower().IsConstant()) {
int32_t constant = range->GetLower().GetConstant();
if (constant >= (std::numeric_limits<int32_t>::min() - increment_)) {
return new (GetAllocator()) ValueRange(GetAllocator(), range->GetLower(), upper);
}
}
// For non-constant lower bound, just assume might be underflow. Give up narrowing.
return this;
}
}
private:
HPhi* const induction_variable_; // Induction variable for this monotonic value range.
HInstruction* const initial_; // Initial value.
const int32_t increment_; // Increment for each loop iteration.
const ValueBound bound_; // Additional value bound info for initial_.
DISALLOW_COPY_AND_ASSIGN(MonotonicValueRange);
};
class BCEVisitor : public HGraphVisitor {
public:
// The least number of bounds checks that should be eliminated by triggering
// the deoptimization technique.
static constexpr size_t kThresholdForAddingDeoptimize = 2;
// Very large constant index is considered as an anomaly. This is a threshold
// beyond which we don't bother to apply the deoptimization technique since
// it's likely some AIOOBE will be thrown.
static constexpr int32_t kMaxConstantForAddingDeoptimize =
std::numeric_limits<int32_t>::max() - 1024 * 1024;
// Added blocks for loop body entry test.
bool IsAddedBlock(HBasicBlock* block) const {
return block->GetBlockId() >= initial_block_size_;
}
BCEVisitor(HGraph* graph,
const SideEffectsAnalysis& side_effects,
HInductionVarAnalysis* induction_analysis)
: HGraphVisitor(graph),
maps_(graph->GetBlocks().size(),
ArenaSafeMap<int, ValueRange*>(
std::less<int>(),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
first_constant_index_bounds_check_map_(
std::less<int>(),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
early_exit_loop_(
std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
taken_test_loop_(
std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
finite_loop_(graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
need_to_revisit_block_(false),
has_deoptimization_on_constant_subscripts_(false),
initial_block_size_(graph->GetBlocks().size()),
side_effects_(side_effects),
induction_range_(induction_analysis) {}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
DCHECK(!IsAddedBlock(block));
first_constant_index_bounds_check_map_.clear();
HGraphVisitor::VisitBasicBlock(block);
if (need_to_revisit_block_) {
AddComparesWithDeoptimization(block);
need_to_revisit_block_ = false;
first_constant_index_bounds_check_map_.clear();
GetValueRangeMap(block)->clear();
HGraphVisitor::VisitBasicBlock(block);
}
}
void Finish() {
// Preserve SSA structure which may have been broken by adding one or more
// new taken-test structures (see TransformLoopForDeoptimizationIfNeeded()).
InsertPhiNodes();
// Clear the loop data structures.
early_exit_loop_.clear();
taken_test_loop_.clear();
finite_loop_.clear();
}
private:
// Return the map of proven value ranges at the beginning of a basic block.
ArenaSafeMap<int, ValueRange*>* GetValueRangeMap(HBasicBlock* basic_block) {
if (IsAddedBlock(basic_block)) {
// Added blocks don't keep value ranges.
return nullptr;
}
uint32_t block_id = basic_block->GetBlockId();
return &maps_[block_id];
}
// Traverse up the dominator tree to look for value range info.
ValueRange* LookupValueRange(HInstruction* instruction, HBasicBlock* basic_block) {
while (basic_block != nullptr) {
ArenaSafeMap<int, ValueRange*>* map = GetValueRangeMap(basic_block);
if (map != nullptr) {
if (map->find(instruction->GetId()) != map->end()) {
return map->Get(instruction->GetId());
}
} else {
DCHECK(IsAddedBlock(basic_block));
}
basic_block = basic_block->GetDominator();
}
// Didn't find any.
return nullptr;
}
// Narrow the value range of `instruction` at the end of `basic_block` with `range`,
// and push the narrowed value range to `successor`.
void ApplyRangeFromComparison(HInstruction* instruction, HBasicBlock* basic_block,
HBasicBlock* successor, ValueRange* range) {
ValueRange* existing_range = LookupValueRange(instruction, basic_block);
if (existing_range == nullptr) {
if (range != nullptr) {
GetValueRangeMap(successor)->Overwrite(instruction->GetId(), range);
}
return;
}
if (existing_range->IsMonotonicValueRange()) {
DCHECK(instruction->IsLoopHeaderPhi());
// Make sure the comparison is in the loop header so each increment is
// checked with a comparison.
if (instruction->GetBlock() != basic_block) {
return;
}
}
ValueRange* narrowed_range = existing_range->Narrow(range);
GetValueRangeMap(successor)->Overwrite(instruction->GetId(), narrowed_range);
}
// Special case that we may simultaneously narrow two MonotonicValueRange's to
// regular value ranges.
void HandleIfBetweenTwoMonotonicValueRanges(HIf* instruction,
HInstruction* left,
HInstruction* right,
IfCondition cond,
MonotonicValueRange* left_range,
MonotonicValueRange* right_range) {
DCHECK(left->IsLoopHeaderPhi());
DCHECK(right->IsLoopHeaderPhi());
if (instruction->GetBlock() != left->GetBlock()) {
// Comparison needs to be in loop header to make sure it's done after each
// increment/decrement.
return;
}
// Handle common cases which also don't have overflow/underflow concerns.
if (left_range->GetIncrement() == 1 &&
left_range->GetBound().IsConstant() &&
right_range->GetIncrement() == -1 &&
right_range->GetBound().IsRelatedToArrayLength() &&
right_range->GetBound().GetConstant() < 0) {
HBasicBlock* successor = nullptr;
int32_t left_compensation = 0;
int32_t right_compensation = 0;
if (cond == kCondLT) {
left_compensation = -1;
right_compensation = 1;
successor = instruction->IfTrueSuccessor();
} else if (cond == kCondLE) {
successor = instruction->IfTrueSuccessor();
} else if (cond == kCondGT) {
successor = instruction->IfFalseSuccessor();
} else if (cond == kCondGE) {
left_compensation = -1;
right_compensation = 1;
successor = instruction->IfFalseSuccessor();
} else {
// We don't handle '=='/'!=' test in case left and right can cross and
// miss each other.
return;
}
if (successor != nullptr) {
bool overflow;
bool underflow;
ValueRange* new_left_range = new (GetGraph()->GetArena()) ValueRange(
GetGraph()->GetArena(),
left_range->GetBound(),
right_range->GetBound().Add(left_compensation, &overflow, &underflow));
if (!overflow && !underflow) {
ApplyRangeFromComparison(left, instruction->GetBlock(), successor,
new_left_range);
}
ValueRange* new_right_range = new (GetGraph()->GetArena()) ValueRange(
GetGraph()->GetArena(),
left_range->GetBound().Add(right_compensation, &overflow, &underflow),
right_range->GetBound());
if (!overflow && !underflow) {
ApplyRangeFromComparison(right, instruction->GetBlock(), successor,
new_right_range);
}
}
}
}
// Handle "if (left cmp_cond right)".
void HandleIf(HIf* instruction, HInstruction* left, HInstruction* right, IfCondition cond) {
HBasicBlock* block = instruction->GetBlock();
HBasicBlock* true_successor = instruction->IfTrueSuccessor();
// There should be no critical edge at this point.
DCHECK_EQ(true_successor->GetPredecessors().size(), 1u);
HBasicBlock* false_successor = instruction->IfFalseSuccessor();
// There should be no critical edge at this point.
DCHECK_EQ(false_successor->GetPredecessors().size(), 1u);
ValueRange* left_range = LookupValueRange(left, block);
MonotonicValueRange* left_monotonic_range = nullptr;
if (left_range != nullptr) {
left_monotonic_range = left_range->AsMonotonicValueRange();
if (left_monotonic_range != nullptr) {
HBasicBlock* loop_head = left_monotonic_range->GetLoopHeader();
if (instruction->GetBlock() != loop_head) {
// For monotonic value range, don't handle `instruction`
// if it's not defined in the loop header.
return;
}
}
}
bool found;
ValueBound bound = ValueBound::DetectValueBoundFromValue(right, &found);
// Each comparison can establish a lower bound and an upper bound
// for the left hand side.
ValueBound lower = bound;
ValueBound upper = bound;
if (!found) {
// No constant or array.length+c format bound found.
// For i<j, we can still use j's upper bound as i's upper bound. Same for lower.
ValueRange* right_range = LookupValueRange(right, block);
if (right_range != nullptr) {
if (right_range->IsMonotonicValueRange()) {
if (left_range != nullptr && left_range->IsMonotonicValueRange()) {
HandleIfBetweenTwoMonotonicValueRanges(instruction, left, right, cond,
left_range->AsMonotonicValueRange(),
right_range->AsMonotonicValueRange());
return;
}
}
lower = right_range->GetLower();
upper = right_range->GetUpper();
} else {
lower = ValueBound::Min();
upper = ValueBound::Max();
}
}
bool overflow, underflow;
if (cond == kCondLT || cond == kCondLE) {
if (!upper.Equals(ValueBound::Max())) {
int32_t compensation = (cond == kCondLT) ? -1 : 0; // upper bound is inclusive
ValueBound new_upper = upper.Add(compensation, &overflow, &underflow);
if (overflow || underflow) {
return;
}
ValueRange* new_range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, true_successor, new_range);
}
// array.length as a lower bound isn't considered useful.
if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) {
int32_t compensation = (cond == kCondLE) ? 1 : 0; // lower bound is inclusive
ValueBound new_lower = lower.Add(compensation, &overflow, &underflow);
if (overflow || underflow) {
return;
}
ValueRange* new_range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondGT || cond == kCondGE) {
// array.length as a lower bound isn't considered useful.
if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) {
int32_t compensation = (cond == kCondGT) ? 1 : 0; // lower bound is inclusive
ValueBound new_lower = lower.Add(compensation, &overflow, &underflow);
if (overflow || underflow) {
return;
}
ValueRange* new_range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, true_successor, new_range);
}
if (!upper.Equals(ValueBound::Max())) {
int32_t compensation = (cond == kCondGE) ? -1 : 0; // upper bound is inclusive
ValueBound new_upper = upper.Add(compensation, &overflow, &underflow);
if (overflow || underflow) {
return;
}
ValueRange* new_range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
}
}
void VisitBoundsCheck(HBoundsCheck* bounds_check) OVERRIDE {
HBasicBlock* block = bounds_check->GetBlock();
HInstruction* index = bounds_check->InputAt(0);
HInstruction* array_length = bounds_check->InputAt(1);
DCHECK(array_length->IsIntConstant() ||
array_length->IsArrayLength() ||
array_length->IsPhi());
bool try_dynamic_bce = true;
if (!index->IsIntConstant()) {
// Non-constant subscript.
ValueBound lower = ValueBound(nullptr, 0); // constant 0
ValueBound upper = ValueBound(array_length, -1); // array_length - 1
ValueRange array_range(GetGraph()->GetArena(), lower, upper);
// Try range obtained by dominator-based analysis.
ValueRange* index_range = LookupValueRange(index, block);
if (index_range != nullptr && index_range->FitsIn(&array_range)) {
ReplaceInstruction(bounds_check, index);
return;
}
// Try range obtained by induction variable analysis.
// Disables dynamic bce if OOB is certain.
if (InductionRangeFitsIn(&array_range, bounds_check, index, &try_dynamic_bce)) {
ReplaceInstruction(bounds_check, index);
return;
}
} else {
// Constant subscript.
int32_t constant = index->AsIntConstant()->GetValue();
if (constant < 0) {
// Will always throw exception.
return;
}
if (array_length->IsIntConstant()) {
if (constant < array_length->AsIntConstant()->GetValue()) {
ReplaceInstruction(bounds_check, index);
}
return;
}
DCHECK(array_length->IsArrayLength());
ValueRange* existing_range = LookupValueRange(array_length, block);
if (existing_range != nullptr) {
ValueBound lower = existing_range->GetLower();
DCHECK(lower.IsConstant());
if (constant < lower.GetConstant()) {
ReplaceInstruction(bounds_check, index);
return;
} else {
// Existing range isn't strong enough to eliminate the bounds check.
// Fall through to update the array_length range with info from this
// bounds check.
}
}
if (first_constant_index_bounds_check_map_.find(array_length->GetId()) ==
first_constant_index_bounds_check_map_.end()) {
// Remember the first bounds check against array_length of a constant index.
// That bounds check instruction has an associated HEnvironment where we
// may add an HDeoptimize to eliminate bounds checks of constant indices
// against array_length.
first_constant_index_bounds_check_map_.Put(array_length->GetId(), bounds_check);
} else {
// We've seen it at least twice. It's beneficial to introduce a compare with
// deoptimization fallback to eliminate the bounds checks.
need_to_revisit_block_ = true;
}
// Once we have an array access like 'array[5] = 1', we record array.length >= 6.
// We currently don't do it for non-constant index since a valid array[i] can't prove
// a valid array[i-1] yet due to the lower bound side.
if (constant == std::numeric_limits<int32_t>::max()) {
// Max() as an index will definitely throw AIOOBE.
return;
}
ValueBound lower = ValueBound(nullptr, constant + 1);
ValueBound upper = ValueBound::Max();
ValueRange* range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), lower, upper);
GetValueRangeMap(block)->Overwrite(array_length->GetId(), range);
}
// If static analysis fails, and OOB is not certain, try dynamic elimination.
if (try_dynamic_bce) {
TryDynamicBCE(bounds_check);
}
}
static bool HasSameInputAtBackEdges(HPhi* phi) {
DCHECK(phi->IsLoopHeaderPhi());
// Start with input 1. Input 0 is from the incoming block.
HInstruction* input1 = phi->InputAt(1);
DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
*phi->GetBlock()->GetPredecessors()[1]));
for (size_t i = 2, e = phi->InputCount(); i < e; ++i) {
DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
*phi->GetBlock()->GetPredecessors()[i]));
if (input1 != phi->InputAt(i)) {
return false;
}
}
return true;
}
void VisitPhi(HPhi* phi) OVERRIDE {
if (phi->IsLoopHeaderPhi()
&& (phi->GetType() == Primitive::kPrimInt)
&& HasSameInputAtBackEdges(phi)) {
HInstruction* instruction = phi->InputAt(1);
HInstruction *left;
int32_t increment;
if (ValueBound::IsAddOrSubAConstant(instruction, &left, &increment)) {
if (left == phi) {
HInstruction* initial_value = phi->InputAt(0);
ValueRange* range = nullptr;
if (increment == 0) {
// Add constant 0. It's really a fixed value.
range = new (GetGraph()->GetArena()) ValueRange(
GetGraph()->GetArena(),
ValueBound(initial_value, 0),
ValueBound(initial_value, 0));
} else {
// Monotonically increasing/decreasing.
bool found;
ValueBound bound = ValueBound::DetectValueBoundFromValue(
initial_value, &found);
if (!found) {
// No constant or array.length+c bound found.
// For i=j, we can still use j's upper bound as i's upper bound.
// Same for lower.
ValueRange* initial_range = LookupValueRange(initial_value, phi->GetBlock());
if (initial_range != nullptr) {
bound = increment > 0 ? initial_range->GetLower() :
initial_range->GetUpper();
} else {
bound = increment > 0 ? ValueBound::Min() : ValueBound::Max();
}
}
range = new (GetGraph()->GetArena()) MonotonicValueRange(
GetGraph()->GetArena(),
phi,
initial_value,
increment,
bound);
}
GetValueRangeMap(phi->GetBlock())->Overwrite(phi->GetId(), range);
}
}
}
}
void VisitIf(HIf* instruction) OVERRIDE {
if (instruction->InputAt(0)->IsCondition()) {
HCondition* cond = instruction->InputAt(0)->AsCondition();
IfCondition cmp = cond->GetCondition();
if (cmp == kCondGT || cmp == kCondGE ||
cmp == kCondLT || cmp == kCondLE) {
HInstruction* left = cond->GetLeft();
HInstruction* right = cond->GetRight();
HandleIf(instruction, left, right, cmp);
}
}
}
void VisitAdd(HAdd* add) OVERRIDE {
HInstruction* right = add->GetRight();
if (right->IsIntConstant()) {
ValueRange* left_range = LookupValueRange(add->GetLeft(), add->GetBlock());
if (left_range == nullptr) {
return;
}
ValueRange* range = left_range->Add(right->AsIntConstant()->GetValue());
if (range != nullptr) {
GetValueRangeMap(add->GetBlock())->Overwrite(add->GetId(), range);
}
}
}
void VisitSub(HSub* sub) OVERRIDE {
HInstruction* left = sub->GetLeft();
HInstruction* right = sub->GetRight();
if (right->IsIntConstant()) {
ValueRange* left_range = LookupValueRange(left, sub->GetBlock());
if (left_range == nullptr) {
return;
}
ValueRange* range = left_range->Add(-right->AsIntConstant()->GetValue());
if (range != nullptr) {
GetValueRangeMap(sub->GetBlock())->Overwrite(sub->GetId(), range);
return;
}
}
// Here we are interested in the typical triangular case of nested loops,
// such as the inner loop 'for (int j=0; j<array.length-i; j++)' where i
// is the index for outer loop. In this case, we know j is bounded by array.length-1.
// Try to handle (array.length - i) or (array.length + c - i) format.
HInstruction* left_of_left; // left input of left.
int32_t right_const = 0;
if (ValueBound::IsAddOrSubAConstant(left, &left_of_left, &right_const)) {
left = left_of_left;
}
// The value of left input of the sub equals (left + right_const).
if (left->IsArrayLength()) {
HInstruction* array_length = left->AsArrayLength();
ValueRange* right_range = LookupValueRange(right, sub->GetBlock());
if (right_range != nullptr) {
ValueBound lower = right_range->GetLower();
ValueBound upper = right_range->GetUpper();
if (lower.IsConstant() && upper.IsRelatedToArrayLength()) {
HInstruction* upper_inst = upper.GetInstruction();
// Make sure it's the same array.
if (ValueBound::Equal(array_length, upper_inst)) {
int32_t c0 = right_const;
int32_t c1 = lower.GetConstant();
int32_t c2 = upper.GetConstant();
// (array.length + c0 - v) where v is in [c1, array.length + c2]
// gets [c0 - c2, array.length + c0 - c1] as its value range.
if (!ValueBound::WouldAddOverflowOrUnderflow(c0, -c2) &&
!ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) {
if ((c0 - c1) <= 0) {
// array.length + (c0 - c1) won't overflow/underflow.
ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
GetGraph()->GetArena(),
ValueBound(nullptr, right_const - upper.GetConstant()),
ValueBound(array_length, right_const - lower.GetConstant()));
GetValueRangeMap(sub->GetBlock())->Overwrite(sub->GetId(), range);
}
}
}
}
}
}
}
void FindAndHandlePartialArrayLength(HBinaryOperation* instruction) {
DCHECK(instruction->IsDiv() || instruction->IsShr() || instruction->IsUShr());
HInstruction* right = instruction->GetRight();
int32_t right_const;
if (right->IsIntConstant()) {
right_const = right->AsIntConstant()->GetValue();
// Detect division by two or more.
if ((instruction->IsDiv() && right_const <= 1) ||
(instruction->IsShr() && right_const < 1) ||
(instruction->IsUShr() && right_const < 1)) {
return;
}
} else {
return;
}
// Try to handle array.length/2 or (array.length-1)/2 format.
HInstruction* left = instruction->GetLeft();
HInstruction* left_of_left; // left input of left.
int32_t c = 0;
if (ValueBound::IsAddOrSubAConstant(left, &left_of_left, &c)) {
left = left_of_left;
}
// The value of left input of instruction equals (left + c).
// (array_length + 1) or smaller divided by two or more
// always generate a value in [Min(), array_length].
// This is true even if array_length is Max().
if (left->IsArrayLength() && c <= 1) {
if (instruction->IsUShr() && c < 0) {
// Make sure for unsigned shift, left side is not negative.
// e.g. if array_length is 2, ((array_length - 3) >>> 2) is way bigger
// than array_length.
return;
}
ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
GetGraph()->GetArena(),
ValueBound(nullptr, std::numeric_limits<int32_t>::min()),
ValueBound(left, 0));
GetValueRangeMap(instruction->GetBlock())->Overwrite(instruction->GetId(), range);
}
}
void VisitDiv(HDiv* div) OVERRIDE {
FindAndHandlePartialArrayLength(div);
}
void VisitShr(HShr* shr) OVERRIDE {
FindAndHandlePartialArrayLength(shr);
}
void VisitUShr(HUShr* ushr) OVERRIDE {
FindAndHandlePartialArrayLength(ushr);
}
void VisitAnd(HAnd* instruction) OVERRIDE {
if (instruction->GetRight()->IsIntConstant()) {
int32_t constant = instruction->GetRight()->AsIntConstant()->GetValue();
if (constant > 0) {
// constant serves as a mask so any number masked with it
// gets a [0, constant] value range.
ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
GetGraph()->GetArena(),
ValueBound(nullptr, 0),
ValueBound(nullptr, constant));
GetValueRangeMap(instruction->GetBlock())->Overwrite(instruction->GetId(), range);
}
}
}
void VisitNewArray(HNewArray* new_array) OVERRIDE {
HInstruction* len = new_array->InputAt(0);
if (!len->IsIntConstant()) {
HInstruction *left;
int32_t right_const;
if (ValueBound::IsAddOrSubAConstant(len, &left, &right_const)) {
// (left + right_const) is used as size to new the array.
// We record "-right_const <= left <= new_array - right_const";
ValueBound lower = ValueBound(nullptr, -right_const);
// We use new_array for the bound instead of new_array.length,
// which isn't available as an instruction yet. new_array will
// be treated the same as new_array.length when it's used in a ValueBound.
ValueBound upper = ValueBound(new_array, -right_const);
ValueRange* range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), lower, upper);
ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock());
if (existing_range != nullptr) {
range = existing_range->Narrow(range);
}
GetValueRangeMap(new_array->GetBlock())->Overwrite(left->GetId(), range);
}
}
}
void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE {
if (!deoptimize->InputAt(0)->IsLessThanOrEqual()) {
return;
}
// If this instruction was added by AddCompareWithDeoptimization(), narrow
// the range accordingly in subsequent basic blocks.
HLessThanOrEqual* less_than_or_equal = deoptimize->InputAt(0)->AsLessThanOrEqual();
HInstruction* instruction = less_than_or_equal->InputAt(0);
if (instruction->IsArrayLength()) {
HInstruction* constant = less_than_or_equal->InputAt(1);
DCHECK(constant->IsIntConstant());
DCHECK(constant->AsIntConstant()->GetValue() <= kMaxConstantForAddingDeoptimize);
ValueBound lower = ValueBound(nullptr, constant->AsIntConstant()->GetValue() + 1);
ValueRange* range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), lower, ValueBound::Max());
GetValueRangeMap(deoptimize->GetBlock())->Overwrite(instruction->GetId(), range);
}
}
/**
* After null/bounds checks are eliminated, some invariant array references
* may be exposed underneath which can be hoisted out of the loop to the
* preheader or, in combination with dynamic bce, the deoptimization block.
*
* for (int i = 0; i < n; i++) {
* <-------+
* for (int j = 0; j < n; j++) |
* a[i][j] = 0; --a[i]--+
* }
*
* Note: this optimization is no longer applied after deoptimization on array references
* with constant subscripts has occurred (see AddCompareWithDeoptimization()), since in
* those cases it would be unsafe to hoist array references across their deoptimization
* instruction inside a loop.
*/
void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
if (!has_deoptimization_on_constant_subscripts_ && array_get->IsInLoop()) {
HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation();
if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) &&
loop->IsDefinedOutOfTheLoop(array_get->InputAt(1))) {
SideEffects loop_effects = side_effects_.GetLoopEffects(loop->GetHeader());
if (!array_get->GetSideEffects().MayDependOn(loop_effects)) {
HoistToPreheaderOrDeoptBlock(loop, array_get);
}
}
}
}
void AddCompareWithDeoptimization(HInstruction* array_length,
HIntConstant* const_instr,
HBasicBlock* block) {
DCHECK(array_length->IsArrayLength());
ValueRange* range = LookupValueRange(array_length, block);
ValueBound lower_bound = range->GetLower();
DCHECK(lower_bound.IsConstant());
DCHECK(const_instr->GetValue() <= kMaxConstantForAddingDeoptimize);
// Note that the lower bound of the array length may have been refined
// through other instructions (such as `HNewArray(length - 4)`).
DCHECK_LE(const_instr->GetValue() + 1, lower_bound.GetConstant());
// If array_length is less than lower_const, deoptimize.
HBoundsCheck* bounds_check = first_constant_index_bounds_check_map_.Get(
array_length->GetId())->AsBoundsCheck();
HCondition* cond = new (GetGraph()->GetArena()) HLessThanOrEqual(array_length, const_instr);
HDeoptimize* deoptimize = new (GetGraph()->GetArena())
HDeoptimize(cond, bounds_check->GetDexPc());
block->InsertInstructionBefore(cond, bounds_check);
block->InsertInstructionBefore(deoptimize, bounds_check);
deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment());
// Flag that this kind of deoptimization on array references with constant
// subscripts has occurred to prevent further hoisting of these references.
has_deoptimization_on_constant_subscripts_ = true;
}
void AddComparesWithDeoptimization(HBasicBlock* block) {
for (ArenaSafeMap<int, HBoundsCheck*>::iterator it =
first_constant_index_bounds_check_map_.begin();
it != first_constant_index_bounds_check_map_.end();
++it) {
HBoundsCheck* bounds_check = it->second;
HInstruction* array_length = bounds_check->InputAt(1);
if (!array_length->IsArrayLength()) {
// Prior deoptimizations may have changed the array length to a phi.
// TODO(mingyao): propagate the range to the phi?
DCHECK(array_length->IsPhi()) << array_length->DebugName();
continue;
}
HIntConstant* lower_bound_const_instr = nullptr;
int32_t lower_bound_const = std::numeric_limits<int32_t>::min();
size_t counter = 0;
// Count the constant indexing for which bounds checks haven't
// been removed yet.
for (HUseIterator<HInstruction*> it2(array_length->GetUses());
!it2.Done();
it2.Advance()) {
HInstruction* user = it2.Current()->GetUser();
if (user->GetBlock() == block &&
user->IsBoundsCheck() &&
user->AsBoundsCheck()->InputAt(0)->IsIntConstant()) {
DCHECK_EQ(array_length, user->AsBoundsCheck()->InputAt(1));
HIntConstant* const_instr = user->AsBoundsCheck()->InputAt(0)->AsIntConstant();
if (const_instr->GetValue() > lower_bound_const) {
lower_bound_const = const_instr->GetValue();
lower_bound_const_instr = const_instr;
}
counter++;
}
}
if (counter >= kThresholdForAddingDeoptimize &&
lower_bound_const_instr->GetValue() <= kMaxConstantForAddingDeoptimize) {
AddCompareWithDeoptimization(array_length, lower_bound_const_instr, block);
}
}
}
/**
* Returns true if static range analysis based on induction variables can determine the bounds
* check on the given array range is always satisfied with the computed index range. The output
* parameter try_dynamic_bce is set to false if OOB is certain.
*/
bool InductionRangeFitsIn(ValueRange* array_range,
HInstruction* context,
HInstruction* index,
bool* try_dynamic_bce) {
InductionVarRange::Value v1;
InductionVarRange::Value v2;
bool needs_finite_test = false;
induction_range_.GetInductionRange(context, index, &v1, &v2, &needs_finite_test);
do {
if (v1.is_known && (v1.a_constant == 0 || v1.a_constant == 1) &&
v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
ValueRange index_range(GetGraph()->GetArena(),
ValueBound(v1.instruction, v1.b_constant),
ValueBound(v2.instruction, v2.b_constant));
// If analysis reveals a certain OOB, disable dynamic BCE.
if (index_range.GetLower().LessThan(array_range->GetLower()) ||
index_range.GetUpper().GreaterThan(array_range->GetUpper())) {
*try_dynamic_bce = false;
return false;
}
// Use analysis for static bce only if loop is finite.
if (!needs_finite_test && index_range.FitsIn(array_range)) {
return true;
}
}
} while (induction_range_.RefineOuter(&v1, &v2));
return false;
}
/**
* When the compiler fails to remove a bounds check statically, we try to remove the bounds
* check dynamically by adding runtime tests that trigger a deoptimization in case bounds
* will go out of range (we want to be rather certain of that given the slowdown of
* deoptimization). If no deoptimization occurs, the loop is executed with all corresponding
* bounds checks and related null checks removed.
*/
void TryDynamicBCE(HBoundsCheck* instruction) {
HLoopInformation* loop = instruction->GetBlock()->GetLoopInformation();
HInstruction* index = instruction->InputAt(0);
HInstruction* length = instruction->InputAt(1);
// If dynamic bounds check elimination seems profitable and is possible, then proceed.
bool needs_finite_test = false;
bool needs_taken_test = false;
if (DynamicBCESeemsProfitable(loop, instruction->GetBlock()) &&
induction_range_.CanGenerateCode(
instruction, index, &needs_finite_test, &needs_taken_test) &&
CanHandleInfiniteLoop(loop, index, needs_finite_test) &&
CanHandleLength(loop, length, needs_taken_test)) { // do this test last (may code gen)
HInstruction* lower = nullptr;
HInstruction* upper = nullptr;
// Generate the following unsigned comparisons
// if (lower > upper) deoptimize;
// if (upper >= length) deoptimize;
// or, for a non-induction index, just the unsigned comparison on its 'upper' value
// if (upper >= length) deoptimize;
// as runtime test. By restricting dynamic bce to unit strides (with a maximum of 32-bit
// iterations) and by not combining access (e.g. a[i], a[i-3], a[i+5] etc.), these tests
// correctly guard against any possible OOB (including arithmetic wrap-around cases).
HBasicBlock* block = TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
induction_range_.GenerateRangeCode(instruction, index, GetGraph(), block, &lower, &upper);
if (lower != nullptr) {
InsertDeopt(loop, block, new (GetGraph()->GetArena()) HAbove(lower, upper));
}
InsertDeopt(loop, block, new (GetGraph()->GetArena()) HAboveOrEqual(upper, length));
ReplaceInstruction(instruction, index);
}
}
/**
* Returns true if heuristics indicate that dynamic bce may be profitable.
*/
bool DynamicBCESeemsProfitable(HLoopInformation* loop, HBasicBlock* block) {
if (loop != nullptr) {
// A try boundary preheader is hard to handle.
// TODO: remove this restriction
if (loop->GetPreHeader()->GetLastInstruction()->IsTryBoundary()) {
return false;
}
// Does loop have early-exits? If so, the full range may not be covered by the loop
// at runtime and testing the range may apply deoptimization unnecessarily.
if (IsEarlyExitLoop(loop)) {
return false;
}
// Does the current basic block dominate all back edges? If not,
// don't apply dynamic bce to something that may not be executed.
for (HBasicBlock* back_edge : loop->GetBackEdges()) {
if (!block->Dominates(back_edge)) {
return false;
}
}
// Success!
return true;
}
return false;
}
/**
* Returns true if the loop has early exits, which implies it may not cover
* the full range computed by range analysis based on induction variables.
*/
bool IsEarlyExitLoop(HLoopInformation* loop) {
const uint32_t loop_id = loop->GetHeader()->GetBlockId();
// If loop has been analyzed earlier for early-exit, don't repeat the analysis.
auto it = early_exit_loop_.find(loop_id);
if (it != early_exit_loop_.end()) {
return it->second;
}
// First time early-exit analysis for this loop. Since analysis requires scanning
// the full loop-body, results of the analysis is stored for subsequent queries.
HBlocksInLoopReversePostOrderIterator it_loop(*loop);
for (it_loop.Advance(); !it_loop.Done(); it_loop.Advance()) {
for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) {
if (!loop->Contains(*successor)) {
early_exit_loop_.Put(loop_id, true);
return true;
}
}
}
early_exit_loop_.Put(loop_id, false);
return false;
}
/**
* Returns true if the array length is already loop invariant, or can be made so
* by handling the null check under the hood of the array length operation.
*/
bool CanHandleLength(HLoopInformation* loop, HInstruction* length, bool needs_taken_test) {
if (loop->IsDefinedOutOfTheLoop(length)) {
return true;
} else if (length->IsArrayLength() && length->GetBlock()->GetLoopInformation() == loop) {
if (CanHandleNullCheck(loop, length->InputAt(0), needs_taken_test)) {
HoistToPreheaderOrDeoptBlock(loop, length);
return true;
}
}
return false;
}
/**
* Returns true if the null check is already loop invariant, or can be made so
* by generating a deoptimization test.
*/
bool CanHandleNullCheck(HLoopInformation* loop, HInstruction* check, bool needs_taken_test) {
if (loop->IsDefinedOutOfTheLoop(check)) {
return true;
} else if (check->IsNullCheck() && check->GetBlock()->GetLoopInformation() == loop) {
HInstruction* array = check->InputAt(0);
if (loop->IsDefinedOutOfTheLoop(array)) {
// Generate: if (array == null) deoptimize;
HBasicBlock* block = TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
HInstruction* cond =
new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant());
InsertDeopt(loop, block, cond);
ReplaceInstruction(check, array);
return true;
}
}
return false;
}
/**
* Returns true if compiler can apply dynamic bce to loops that may be infinite
* (e.g. for (int i = 0; i <= U; i++) with U = MAX_INT), which would invalidate
* the range analysis evaluation code by "overshooting" the computed range.
* Since deoptimization would be a bad choice, and there is no other version
* of the loop to use, dynamic bce in such cases is only allowed if other tests
* ensure the loop is finite.
*/
bool CanHandleInfiniteLoop(
HLoopInformation* loop, HInstruction* index, bool needs_infinite_test) {
if (needs_infinite_test) {
// If we already forced the loop to be finite, allow directly.
const uint32_t loop_id = loop->GetHeader()->GetBlockId();
if (finite_loop_.find(loop_id) != finite_loop_.end()) {
return true;
}
// Otherwise, allow dynamic bce if the index (which is necessarily an induction at
// this point) is the direct loop index (viz. a[i]), since then the runtime tests
// ensure upper bound cannot cause an infinite loop.
HInstruction* control = loop->GetHeader()->GetLastInstruction();
if (control->IsIf()) {
HInstruction* if_expr = control->AsIf()->InputAt(0);
if (if_expr->IsCondition()) {
HCondition* condition = if_expr->AsCondition();
if (index == condition->InputAt(0) ||
index == condition->InputAt(1)) {
finite_loop_.insert(loop_id);
return true;
}
}
}
return false;
}
return true;
}
/** Inserts a deoptimization test. */
void InsertDeopt(HLoopInformation* loop, HBasicBlock* block, HInstruction* condition) {
HInstruction* suspend = loop->GetSuspendCheck();
block->InsertInstructionBefore(condition, block->GetLastInstruction());
HDeoptimize* deoptimize =
new (GetGraph()->GetArena()) HDeoptimize(condition, suspend->GetDexPc());
block->InsertInstructionBefore(deoptimize, block->GetLastInstruction());
if (suspend->HasEnvironment()) {
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
suspend->GetEnvironment(), loop->GetHeader());
}
}
/** Hoists instruction out of the loop to preheader or deoptimization block. */
void HoistToPreheaderOrDeoptBlock(HLoopInformation* loop, HInstruction* instruction) {
// Use preheader unless there is an earlier generated deoptimization block since
// hoisted expressions may depend on and/or used by the deoptimization tests.
const uint32_t loop_id = loop->GetHeader()->GetBlockId();
HBasicBlock* preheader = loop->GetPreHeader();
HBasicBlock* block = preheader;
auto it = taken_test_loop_.find(loop_id);
if (it != taken_test_loop_.end()) {
block = it->second;
}
// Hoist the instruction.
DCHECK(!instruction->HasEnvironment());
instruction->MoveBefore(block->GetLastInstruction());
}
/**
* Adds a new taken-test structure to a loop if needed (and not already done).
* The taken-test protects range analysis evaluation code to avoid any
* deoptimization caused by incorrect trip-count evaluation in non-taken loops.
*
* Returns block in which deoptimizations/invariants can be put.
*
* old_preheader
* |
* if_block <- taken-test protects deoptimization block
* / \
* true_block false_block <- deoptimizations/invariants are placed in true_block
* \ /
* new_preheader <- may require phi nodes to preserve SSA structure
* |
* header
*
* For example, this loop:
*
* for (int i = lower; i < upper; i++) {
* array[i] = 0;
* }
*
* will be transformed to:
*
* if (lower < upper) {
* if (array == null) deoptimize;
* array_length = array.length;
* if (lower > upper) deoptimize; // unsigned
* if (upper >= array_length) deoptimize; // unsigned
* } else {
* array_length = 0;
* }
* for (int i = lower; i < upper; i++) {
* // Loop without null check and bounds check, and any array.length replaced with array_length.
* array[i] = 0;
* }
*/
HBasicBlock* TransformLoopForDeoptimizationIfNeeded(HLoopInformation* loop, bool needs_taken_test) {
// Not needed (can use preheader), or already done (can reuse)?
const uint32_t loop_id = loop->GetHeader()->GetBlockId();
if (!needs_taken_test) {
return loop->GetPreHeader();
} else {
auto it = taken_test_loop_.find(loop_id);
if (it != taken_test_loop_.end()) {
return it->second;
}
}
// Generate top test structure.
HBasicBlock* header = loop->GetHeader();
GetGraph()->TransformLoopHeaderForBCE(header);
HBasicBlock* new_preheader = loop->GetPreHeader();
HBasicBlock* if_block = new_preheader->GetDominator();
HBasicBlock* true_block = if_block->GetSuccessors()[0]; // True successor.
HBasicBlock* false_block = if_block->GetSuccessors()[1]; // False successor.
// Goto instructions.
true_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
false_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
new_preheader->AddInstruction(new (GetGraph()->GetArena()) HGoto());
// Insert the taken-test to see if the loop body is entered. If the
// loop isn't entered at all, it jumps around the deoptimization block.
if_block->AddInstruction(new (GetGraph()->GetArena()) HGoto()); // placeholder
HInstruction* condition = nullptr;
induction_range_.GenerateTakenTest(header->GetLastInstruction(),
GetGraph(),
if_block,
&condition);
DCHECK(condition != nullptr);
if_block->RemoveInstruction(if_block->GetLastInstruction());
if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition));
taken_test_loop_.Put(loop_id, true_block);
return true_block;
}
/**
* Inserts phi nodes that preserve SSA structure in generated top test structures.
* All uses of instructions in the deoptimization block that reach the loop need
* a phi node in the new loop preheader to fix the dominance relation.
*
* Example:
* if_block
* / \
* x_0 = .. false_block
* \ /
* x_1 = phi(x_0, null) <- synthetic phi
* |
* header
*/
void InsertPhiNodes() {
// Scan all new deoptimization blocks.
for (auto it1 = taken_test_loop_.begin(); it1 != taken_test_loop_.end(); ++it1) {
HBasicBlock* true_block = it1->second;
HBasicBlock* new_preheader = true_block->GetSingleSuccessor();
// Scan all instructions in a new deoptimization block.
for (HInstructionIterator it(true_block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
Primitive::Type type = instruction->GetType();
HPhi* phi = nullptr;
// Scan all uses of an instruction and replace each later use with a phi node.
for (HUseIterator<HInstruction*> it2(instruction->GetUses());
!it2.Done();
it2.Advance()) {
HInstruction* user = it2.Current()->GetUser();
if (user->GetBlock() != true_block) {
if (phi == nullptr) {
phi = NewPhi(new_preheader, instruction, type);
}
user->ReplaceInput(phi, it2.Current()->GetIndex());
}
}
// Scan all environment uses of an instruction and replace each later use with a phi node.
for (HUseIterator<HEnvironment*> it2(instruction->GetEnvUses());
!it2.Done();
it2.Advance()) {
HEnvironment* user = it2.Current()->GetUser();
if (user->GetHolder()->GetBlock() != true_block) {
if (phi == nullptr) {
phi = NewPhi(new_preheader, instruction, type);
}
user->RemoveAsUserOfInput(it2.Current()->GetIndex());
user->SetRawEnvAt(it2.Current()->GetIndex(), phi);
phi->AddEnvUseAt(user, it2.Current()->GetIndex());
}
}
}
}
}
/**
* Construct a phi(instruction, 0) in the new preheader to fix the dominance relation.
* These are synthetic phi nodes without a virtual register.
*/
HPhi* NewPhi(HBasicBlock* new_preheader,
HInstruction* instruction,
Primitive::Type type) {
HGraph* graph = GetGraph();
HInstruction* zero;
switch (type) {
case Primitive::kPrimNot: zero = graph->GetNullConstant(); break;
case Primitive::kPrimFloat: zero = graph->GetFloatConstant(0); break;
case Primitive::kPrimDouble: zero = graph->GetDoubleConstant(0); break;
default: zero = graph->GetConstant(type, 0); break;
}
HPhi* phi = new (graph->GetArena())
HPhi(graph->GetArena(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type));
phi->SetRawInputAt(0, instruction);
phi->SetRawInputAt(1, zero);
if (type == Primitive::kPrimNot) {
phi->SetReferenceTypeInfo(instruction->GetReferenceTypeInfo());
}
new_preheader->AddPhi(phi);
return phi;
}
/** Helper method to replace an instruction with another instruction. */
static void ReplaceInstruction(HInstruction* instruction, HInstruction* replacement) {
instruction->ReplaceWith(replacement);
instruction->GetBlock()->RemoveInstruction(instruction);
}
// A set of maps, one per basic block, from instruction to range.
ArenaVector<ArenaSafeMap<int, ValueRange*>> maps_;
// Map an HArrayLength instruction's id to the first HBoundsCheck instruction in
// a block that checks a constant index against that HArrayLength.
ArenaSafeMap<int, HBoundsCheck*> first_constant_index_bounds_check_map_;
// Early-exit loop bookkeeping.
ArenaSafeMap<uint32_t, bool> early_exit_loop_;
// Taken-test loop bookkeeping.
ArenaSafeMap<uint32_t, HBasicBlock*> taken_test_loop_;
// Finite loop bookkeeping.
ArenaSet<uint32_t> finite_loop_;
// For the block, there is at least one HArrayLength instruction for which there
// is more than one bounds check instruction with constant indexing. And it's
// beneficial to add a compare instruction that has deoptimization fallback and
// eliminate those bounds checks.
bool need_to_revisit_block_;
// Flag that denotes whether deoptimization has occurred on array references
// with constant subscripts (see AddCompareWithDeoptimization()).
bool has_deoptimization_on_constant_subscripts_;
// Initial number of blocks.
uint32_t initial_block_size_;
// Side effects.
const SideEffectsAnalysis& side_effects_;
// Range analysis based on induction variables.
InductionVarRange induction_range_;
DISALLOW_COPY_AND_ASSIGN(BCEVisitor);
};
void BoundsCheckElimination::Run() {
if (!graph_->HasBoundsChecks()) {
return;
}
// Reverse post order guarantees a node's dominators are visited first.
// We want to visit in the dominator-based order since if a value is known to
// be bounded by a range at one instruction, it must be true that all uses of
// that value dominated by that instruction fits in that range. Range of that
// value can be narrowed further down in the dominator tree.
BCEVisitor visitor(graph_, side_effects_, induction_analysis_);
HBasicBlock* last_visited_block = nullptr;
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* current = it.Current();
if (current == last_visited_block) {
// We may insert blocks into the reverse post order list when processing
// a loop header. Don't process it again.
DCHECK(current->IsLoopHeader());
continue;
}
if (visitor.IsAddedBlock(current)) {
// Skip added blocks. Their effects are already taken care of.
continue;
}
visitor.VisitBasicBlock(current);
last_visited_block = current;
}
// Perform cleanup.
visitor.Finish();
}
} // namespace art