blob: 6f9bddd6280dd1232e3161ac98040abfccdc22c3 [file] [log] [blame]
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_COMPILER_OPTIMIZING_SSA_LIVENESS_ANALYSIS_H_
#define ART_COMPILER_OPTIMIZING_SSA_LIVENESS_ANALYSIS_H_
#include <iostream>
#include "base/array_ref.h"
#include "base/bit_vector.h"
#include "base/intrusive_forward_list.h"
#include "base/iteration_range.h"
#include "base/macros.h"
#include "base/scoped_arena_allocator.h"
#include "base/scoped_arena_containers.h"
#include "nodes.h"
namespace art HIDDEN {
class CodeGenerator;
class SsaLivenessAnalysis;
static constexpr int kNoRegister = -1;
// Constants describing positions assigned to various data for an instruction.
static constexpr size_t kLivenessPositionsPerInstruction = 4u;
static constexpr size_t kLivenessPositionsForTemp = kLivenessPositionsPerInstruction - 1u;
static constexpr size_t kLivenessPositionsToBlock = kLivenessPositionsPerInstruction - 1u;
static constexpr size_t kLivenessPositionOfNormalUse = 1u; // Inside instruction.
static constexpr size_t kLivenessPositionOfFixedOutput = kLivenessPositionsPerInstruction - 1u;
static constexpr size_t kLivenessPositionForMoveAfter = kLivenessPositionsPerInstruction - 1u;
class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> {
public:
BlockInfo(ScopedArenaAllocator* allocator, size_t number_of_ssa_values);
private:
BitVectorView<size_t> live_in_;
BitVectorView<size_t> live_out_;
BitVectorView<size_t> kill_;
friend class SsaLivenessAnalysis;
DISALLOW_COPY_AND_ASSIGN(BlockInfo);
};
/**
* A live range contains the start and end of a range where an instruction or a temporary
* is live.
*/
class LiveRange final : public ArenaObject<kArenaAllocSsaLiveness> {
public:
LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
DCHECK_LT(start, end);
DCHECK(next_ == nullptr || next_->GetStart() > GetEnd());
}
size_t GetStart() const { return start_; }
size_t GetEnd() const { return end_; }
LiveRange* GetNext() const { return next_; }
bool IntersectsWith(const LiveRange& other) const {
return (start_ >= other.start_ && start_ < other.end_)
|| (other.start_ >= start_ && other.start_ < end_);
}
bool IsBefore(const LiveRange& other) const {
return end_ <= other.start_;
}
void Dump(std::ostream& stream) const {
stream << "[" << start_ << "," << end_ << ")";
}
LiveRange* Dup(ScopedArenaAllocator* allocator) const {
return new (allocator) LiveRange(
start_, end_, next_ == nullptr ? nullptr : next_->Dup(allocator));
}
LiveRange* GetLastRange() {
return next_ == nullptr ? this : next_->GetLastRange();
}
private:
size_t start_;
size_t end_;
LiveRange* next_;
friend class LiveInterval;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
/**
* A use position represents a live interval use at a given position.
*/
class UsePosition : public ArenaObject<kArenaAllocSsaLiveness>,
public IntrusiveForwardListNode<UsePosition> {
public:
UsePosition(HInstruction* user, size_t input_index, size_t position)
: user_(user),
input_index_(input_index),
position_(position) {
}
explicit UsePosition(size_t position)
: user_(nullptr),
input_index_(kNoInput),
position_(dchecked_integral_cast<uint32_t>(position)) {
}
size_t GetPosition() const { return position_; }
HInstruction* GetUser() const { return user_; }
bool IsSynthesized() const { return user_ == nullptr; }
size_t GetInputIndex() const { return input_index_; }
void Dump(std::ostream& stream) const {
stream << position_;
}
HLoopInformation* GetLoopInformation() const {
return user_->GetBlock()->GetLoopInformation();
}
UsePosition* Clone(ScopedArenaAllocator* allocator) const {
return new (allocator) UsePosition(user_, input_index_, position_);
}
bool RequiresRegister() const {
if (IsSynthesized()) return false;
Location location = GetUser()->GetLocations()->InAt(GetInputIndex());
return location.IsUnallocated() && location.RequiresRegisterKind();
}
private:
static constexpr uint32_t kNoInput = static_cast<uint32_t>(-1);
HInstruction* const user_;
const size_t input_index_;
const size_t position_;
DISALLOW_COPY_AND_ASSIGN(UsePosition);
};
using UsePositionList = IntrusiveForwardList<UsePosition>;
/**
* An environment use position represents a live interval for environment use at a given position.
*/
class EnvUsePosition : public ArenaObject<kArenaAllocSsaLiveness>,
public IntrusiveForwardListNode<EnvUsePosition> {
public:
EnvUsePosition(HEnvironment* environment,
size_t input_index,
size_t position)
: environment_(environment),
input_index_(input_index),
position_(position) {
DCHECK(environment != nullptr);
}
size_t GetPosition() const { return position_; }
HEnvironment* GetEnvironment() const { return environment_; }
size_t GetInputIndex() const { return input_index_; }
void Dump(std::ostream& stream) const {
stream << position_;
}
EnvUsePosition* Clone(ScopedArenaAllocator* allocator) const {
return new (allocator) EnvUsePosition(environment_, input_index_, position_);
}
private:
HEnvironment* const environment_;
const size_t input_index_;
const size_t position_;
DISALLOW_COPY_AND_ASSIGN(EnvUsePosition);
};
using EnvUsePositionList = IntrusiveForwardList<EnvUsePosition>;
template <typename Iterator>
inline Iterator FindUseAtOrAfterPosition(Iterator first, Iterator last, size_t position) {
using value_type = const typename Iterator::value_type;
static_assert(std::is_same<value_type, const UsePosition>::value ||
std::is_same<value_type, const EnvUsePosition>::value,
"Expecting value type UsePosition or EnvUsePosition.");
Iterator ret = std::find_if(
first, last, [position](const value_type& use) { return use.GetPosition() >= position; });
// Check that the processed range is sorted. Do not check the rest of the range to avoid
// increasing the complexity of callers from O(n) to O(n^2).
DCHECK(std::is_sorted(
first,
ret,
[](const value_type& lhs, const value_type& rhs) {
return lhs.GetPosition() < rhs.GetPosition();
}));
return ret;
}
template <typename Iterator>
inline IterationRange<Iterator> FindMatchingUseRange(Iterator first,
Iterator last,
size_t position_begin,
size_t position_end) {
Iterator begin = FindUseAtOrAfterPosition(first, last, position_begin);
Iterator end = FindUseAtOrAfterPosition(begin, last, position_end);
return MakeIterationRange(begin, end);
}
class SafepointPosition : public ArenaObject<kArenaAllocSsaLiveness>,
public IntrusiveForwardListNode<SafepointPosition> {
public:
explicit SafepointPosition(HInstruction* instruction)
: instruction_(instruction) {}
static size_t ComputePosition(HInstruction* instruction) {
// We special case instructions emitted at use site, as their
// safepoint position needs to be at their use.
if (instruction->IsEmittedAtUseSite()) {
// Currently only applies to implicit null checks, which are emitted
// at the next instruction.
DCHECK(instruction->IsNullCheck()) << instruction->DebugName();
return instruction->GetLifetimePosition() + kLivenessPositionsPerInstruction;
} else {
return instruction->GetLifetimePosition();
}
}
size_t GetPosition() const {
return ComputePosition(instruction_);
}
LocationSummary* GetLocations() const {
return instruction_->GetLocations();
}
HInstruction* GetInstruction() const {
return instruction_;
}
private:
HInstruction* const instruction_;
DISALLOW_COPY_AND_ASSIGN(SafepointPosition);
};
using SafepointPositionList = IntrusiveForwardList<SafepointPosition>;
/**
* An interval is a list of disjoint live ranges where an instruction is live.
* Each instruction that has uses gets an interval.
*/
class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
public:
static LiveInterval* MakeInterval(ScopedArenaAllocator* allocator,
DataType::Type type,
HInstruction* instruction = nullptr) {
return new (allocator) LiveInterval(allocator, type, instruction);
}
static LiveInterval* MakeFixedInterval(ScopedArenaAllocator* allocator,
int reg,
DataType::Type type) {
return new (allocator) LiveInterval(allocator, type, nullptr, true, reg);
}
static LiveInterval* MakeTempInterval(ScopedArenaAllocator* allocator,
DataType::Type type,
size_t temp_index,
size_t position) {
int8_t checked_index = dchecked_integral_cast<int8_t>(temp_index);
LiveInterval* temp = new (allocator) LiveInterval(allocator,
type,
/*defined_by*/ nullptr,
/*is_fixed=*/ false,
/*reg=*/ kNoRegister,
checked_index);
temp->AddRange(position, position + kLivenessPositionsForTemp);
return temp;
}
bool IsTemp() const {
static_assert(kNoTempIndex < 0);
return temp_index_ >= 0;
}
size_t GetTempIndex() const {
DCHECK(IsTemp());
return dchecked_integral_cast<size_t>(temp_index_);
}
bool IsFixed() const { return is_fixed_; }
// This interval is the result of a split.
bool IsSplit() const { return parent_ != this; }
// Record use of an input. The use will be recorded as an environment use if
// `environment` is not null and as register use otherwise. If `actual_user`
// is specified, the use will be recorded at `actual_user`'s lifetime position.
void AddUse(HInstruction* instruction,
HEnvironment* environment,
size_t input_index,
HInstruction* actual_user = nullptr);
void AddPhiUse(HInstruction* instruction, size_t input_index, HBasicBlock* block) {
DCHECK(instruction->IsPhi());
if (block->IsInLoop()) {
AddBackEdgeUses(*block);
}
UsePosition* new_use =
new (allocator_) UsePosition(instruction, input_index, block->GetLifetimeEnd());
uses_.push_front(*new_use);
}
ALWAYS_INLINE void AddRange(size_t start, size_t end) {
if (first_range_ == nullptr) {
first_range_ = last_range_ = range_search_start_ =
new (allocator_) LiveRange(start, end, first_range_);
} else if (first_range_->GetStart() == end) {
// There is a use in the following block.
first_range_->start_ = start;
} else if (first_range_->GetStart() == start && first_range_->GetEnd() == end) {
DCHECK(is_fixed_);
} else {
DCHECK_GT(first_range_->GetStart(), end);
// There is a hole in the interval. Create a new range.
first_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, first_range_);
}
}
void AddLoopRange(size_t start, size_t end) {
DCHECK(first_range_ != nullptr);
DCHECK_LE(start, first_range_->GetStart());
// Find the range that covers the positions after the loop.
LiveRange* after_loop = first_range_;
LiveRange* last_in_loop = nullptr;
while (after_loop != nullptr && after_loop->GetEnd() < end) {
DCHECK_LE(start, after_loop->GetStart());
last_in_loop = after_loop;
after_loop = after_loop->GetNext();
}
if (after_loop == nullptr) {
// Uses are only in the loop.
first_range_ = last_range_ = range_search_start_ =
new (allocator_) LiveRange(start, end, nullptr);
} else if (after_loop->GetStart() <= end) {
first_range_ = range_search_start_ = after_loop;
// There are uses after the loop.
first_range_->start_ = start;
} else {
// The use after the loop is after a lifetime hole.
DCHECK(last_in_loop != nullptr);
first_range_ = range_search_start_ = last_in_loop;
first_range_->start_ = start;
first_range_->end_ = end;
}
}
bool HasSpillSlot() const {
static_assert(kNoSpillSlot == -1);
return spill_slot_or_hint_ >= 0;
}
void SetSpillSlot(int slot) {
DCHECK(!IsFixed());
DCHECK(!IsTemp());
spill_slot_or_hint_ = slot;
DCHECK(HasSpillSlot());
}
int GetSpillSlot() const { return spill_slot_or_hint_; }
bool HasSpillSlotOrHint() const {
return spill_slot_or_hint_ != kNoSpillSlot;
}
bool HasSpillSlotHint() const {
return spill_slot_or_hint_ < kNoSpillSlot;
}
void SetSpillSlotHint(int hint) {
static_assert(kNoSpillSlot == -1);
DCHECK(!HasSpillSlotOrHint());
DCHECK_GE(hint, 0);
spill_slot_or_hint_ = -2 - hint;
}
int GetSpillSlotHint() const {
DCHECK(HasSpillSlotOrHint());
return HasSpillSlot() ? GetSpillSlot() : -(spill_slot_or_hint_ + 2);
}
void SetFrom(size_t from) {
if (first_range_ != nullptr) {
first_range_->start_ = from;
} else {
// Instruction without uses.
DCHECK(uses_.empty());
DCHECK(from == defined_by_->GetLifetimePosition());
// TODO: The `kLivenessPositionsPerInstruction` below looks like a bug for calls coming
// from `RegisterAllocatorLinearScan::CheckForFixedOutput()` as the new range reaches
// into the next instruction. However, those call always take the `first_range_ != nullptr`
// path above. We should use another, simpler function for that.
first_range_ = last_range_ = range_search_start_ =
new (allocator_) LiveRange(from, from + kLivenessPositionsPerInstruction, nullptr);
}
}
LiveInterval* GetParent() const { return parent_; }
// Returns whether this interval is the parent interval, that is, the interval
// that starts where the HInstruction is defined.
bool IsParent() const { return parent_ == this; }
LiveRange* GetFirstRange() const { return first_range_; }
LiveRange* GetLastRange() const { return last_range_; }
int GetRegister() const { return register_; }
void SetRegister(int reg) { register_ = reg; }
void ClearRegister() { register_ = kNoRegister; }
bool HasRegister() const { return register_ != kNoRegister; }
bool IsDeadAt(size_t position) const {
return GetEnd() <= position;
}
bool IsDefinedAt(size_t position) const {
return GetStart() <= position && !IsDeadAt(position);
}
// Returns true if the interval contains a LiveRange covering `position`.
// The range at or immediately after the current position of linear scan
// is cached for better performance. If `position` can be smaller than
// that, CoversSlow should be used instead.
bool Covers(size_t position) {
LiveRange* candidate = FindRangeAtOrAfter(position, range_search_start_);
range_search_start_ = candidate;
return (candidate != nullptr && candidate->GetStart() <= position);
}
// Same as Covers but always tests all ranges.
bool CoversSlow(size_t position) const {
LiveRange* candidate = FindRangeAtOrAfter(position, first_range_);
return candidate != nullptr && candidate->GetStart() <= position;
}
// Returns the first intersection of this interval with `current`, which
// must be the interval currently being allocated by linear scan.
size_t FirstIntersectionWith(LiveInterval* current) const {
// Find the first range after the start of `current`. We use the search
// cache to improve performance.
DCHECK(GetStart() <= current->GetStart() || IsFixed());
LiveRange* other_range = current->first_range_;
LiveRange* my_range = FindRangeAtOrAfter(other_range->GetStart(), range_search_start_);
if (my_range == nullptr) {
return kNoLifetime;
}
// Advance both intervals and find the first matching range start in
// this interval.
do {
if (my_range->IsBefore(*other_range)) {
my_range = my_range->GetNext();
if (my_range == nullptr) {
return kNoLifetime;
}
} else if (other_range->IsBefore(*my_range)) {
other_range = other_range->GetNext();
if (other_range == nullptr) {
return kNoLifetime;
}
} else {
DCHECK(my_range->IntersectsWith(*other_range));
return std::max(my_range->GetStart(), other_range->GetStart());
}
} while (true);
}
size_t GetStart() const {
return first_range_->GetStart();
}
size_t GetEnd() const {
return last_range_->GetEnd();
}
size_t GetLength() const {
return GetEnd() - GetStart();
}
size_t FirstRegisterUseAfter(size_t position) const {
DCHECK(!IsTemp());
if (IsDefiningPosition(position) && DefinitionRequiresRegister()) {
return position;
}
size_t end = GetEnd();
for (const UsePosition& use : GetUses()) {
size_t use_position = use.GetPosition();
if (use_position > end) {
break;
}
if (use_position > position) {
if (use.RequiresRegister()) {
return use_position;
}
}
}
return kNoLifetime;
}
// Returns the location of the first register use for this live interval,
// including a register definition if applicable.
size_t FirstRegisterUse() const {
size_t start = GetStart();
return IsTemp() ? start : FirstRegisterUseAfter(start);
}
// Whether the interval requires a register rather than a stack location.
// If needed for performance, this could be cached.
bool RequiresRegister() const {
return !HasRegister() && FirstRegisterUse() != kNoLifetime;
}
size_t FirstUseAfter(size_t position) const {
DCHECK(!IsTemp());
if (IsDefiningPosition(position)) {
DCHECK(defined_by_->GetLocations()->Out().IsValid());
return position;
}
size_t end = GetEnd();
for (const UsePosition& use : GetUses()) {
size_t use_position = use.GetPosition();
if (use_position > end) {
break;
}
if (use_position > position) {
return use_position;
}
}
return kNoLifetime;
}
const UsePositionList& GetUses() const {
return parent_->uses_;
}
const EnvUsePositionList& GetEnvironmentUses() const {
return parent_->env_uses_;
}
DataType::Type GetType() const {
return type_;
}
HInstruction* GetDefinedBy() const {
return defined_by_;
}
bool HasWillCallSafepoint() const {
return std::any_of(
safepoints_.begin(),
safepoints_.end(),
[](const SafepointPosition& safepoint) { return safepoint.GetLocations()->WillCall(); });
}
/**
* Split this interval at `position`. This interval is changed to:
* [start ... position).
*
* The new interval covers:
* [position ... end)
*/
LiveInterval* SplitAt(size_t position);
bool StartsBeforeOrAt(LiveInterval* other) const {
return GetStart() <= other->GetStart();
}
bool StartsAfter(LiveInterval* other) const {
return GetStart() > other->GetStart();
}
void Dump(std::ostream& stream) const;
// Same as Dump, but adds context such as the instruction defining this interval, and
// the register currently assigned to this interval.
void DumpWithContext(std::ostream& stream, const CodeGenerator& codegen) const;
LiveInterval* GetNextSibling() const { return next_sibling_; }
LiveInterval* GetLastSibling() {
LiveInterval* result = this;
while (result->next_sibling_ != nullptr) {
result = result->next_sibling_;
}
return result;
}
// Returns the first register hint that is at least free before
// the value contained in `free_until`. If none is found, returns
// `kNoRegister`.
int FindFirstRegisterHint(ArrayRef<size_t> free_until,
ArrayRef<HInstruction* const> instructions_from_positions) const;
// If there is enough at the definition site to find a register (for example
// it uses the same input as the first input), returns the register as a hint.
// Returns kNoRegister otherwise.
int FindHintAtDefinition() const;
// Returns the number of required spilling slots (measured as a multiple of the
// Dex virtual register size `kVRegSize`).
size_t NumberOfSpillSlotsNeeded() const;
bool IsFloatingPoint() const {
return type_ == DataType::Type::kFloat32 || type_ == DataType::Type::kFloat64;
}
// Converts the location of the interval to a `Location` object.
Location ToLocation() const;
// Returns the location of the interval following its siblings at `position`.
Location GetLocationAt(size_t position);
// Finds the sibling that is defined at `position`.
LiveInterval* GetSiblingAt(size_t position);
// Returns whether `other` and `this` share the same kind of register.
bool SameRegisterKind(Location other) const;
bool SameRegisterKind(const LiveInterval& other) const {
return IsFloatingPoint() == other.IsFloatingPoint();
}
bool HasHighInterval() const {
return IsLowInterval();
}
bool HasLowInterval() const {
return IsHighInterval();
}
LiveInterval* GetLowInterval() const {
DCHECK(HasLowInterval());
return high_or_low_interval_;
}
LiveInterval* GetHighInterval() const {
DCHECK(HasHighInterval());
return high_or_low_interval_;
}
bool IsHighInterval() const {
return GetParent()->is_high_interval_;
}
bool IsLowInterval() const {
return !IsHighInterval() && (GetParent()->high_or_low_interval_ != nullptr);
}
void SetLowInterval(LiveInterval* low) {
DCHECK(IsHighInterval());
high_or_low_interval_ = low;
}
void SetHighInterval(LiveInterval* high) {
DCHECK(IsLowInterval());
high_or_low_interval_ = high;
}
void AddHighTempInterval() {
DCHECK(IsParent());
DCHECK(!HasHighInterval());
DCHECK(!HasLowInterval());
DCHECK(IsTemp());
LiveInterval* high = new (allocator_) LiveInterval(allocator_,
type_,
/*defined_by=*/ nullptr,
/*is_fixed=*/ false,
/*reg=*/ kNoRegister,
temp_index_,
/*is_high_interval=*/ true);
DCHECK(first_range_ != nullptr);
DCHECK(first_range_->GetNext() == nullptr);
high->AddRange(GetStart(), GetStart() + kLivenessPositionsForTemp);
DCHECK(uses_.empty());
DCHECK(env_uses_.empty());
high_or_low_interval_ = high;
high->high_or_low_interval_ = this;
}
void AddHighInterval() {
DCHECK(IsParent());
DCHECK(!HasHighInterval());
DCHECK(!HasLowInterval());
DCHECK(!IsTemp());
LiveInterval* high = new (allocator_) LiveInterval(allocator_,
type_,
defined_by_,
/*is_fixed=*/ false,
/*reg=*/ kNoRegister,
kNoTempIndex,
/*is_high_interval=*/ true);
if (first_range_ != nullptr) {
high->first_range_ = first_range_->Dup(allocator_);
high->last_range_ = high->first_range_->GetLastRange();
high->range_search_start_ = high->first_range_;
}
auto pos = high->uses_.before_begin();
for (const UsePosition& use : uses_) {
UsePosition* new_use = use.Clone(allocator_);
pos = high->uses_.insert_after(pos, *new_use);
}
auto env_pos = high->env_uses_.before_begin();
for (const EnvUsePosition& env_use : env_uses_) {
EnvUsePosition* new_env_use = env_use.Clone(allocator_);
env_pos = high->env_uses_.insert_after(env_pos, *new_env_use);
}
high_or_low_interval_ = high;
high->high_or_low_interval_ = this;
}
// Returns whether an interval, when it is non-split, is using
// the same register of one of its input. This function should
// be used only for DCHECKs.
bool IsUsingInputRegister() const {
if (defined_by_ != nullptr && !IsSplit()) {
for (const HInstruction* input : defined_by_->GetInputs()) {
LiveInterval* interval = input->GetLiveInterval();
// Find the interval that covers `defined_by`_. Calls to this function
// are made outside the linear scan, hence we need to use CoversSlow.
while (interval != nullptr && !interval->CoversSlow(defined_by_->GetLifetimePosition())) {
interval = interval->GetNextSibling();
}
// Check if both intervals have the same register of the same kind.
if (interval != nullptr
&& interval->SameRegisterKind(*this)
&& interval->GetRegister() == GetRegister()) {
return true;
}
}
}
return false;
}
// Returns whether an interval, when it is non-split, can safely use
// the same register of one of its input. Note that this method requires
// IsUsingInputRegister() to be true. This function should be used only
// for DCHECKs.
bool CanUseInputRegister() const {
DCHECK(IsUsingInputRegister());
if (defined_by_ != nullptr && !IsSplit()) {
LocationSummary* locations = defined_by_->GetLocations();
if (locations->OutputCanOverlapWithInputs()) {
return false;
}
for (const HInstruction* input : defined_by_->GetInputs()) {
LiveInterval* interval = input->GetLiveInterval();
// Find the interval that covers `defined_by`_. Calls to this function
// are made outside the linear scan, hence we need to use CoversSlow.
while (interval != nullptr && !interval->CoversSlow(defined_by_->GetLifetimePosition())) {
interval = interval->GetNextSibling();
}
if (interval != nullptr
&& interval->SameRegisterKind(*this)
&& interval->GetRegister() == GetRegister()) {
// We found the input that has the same register. Check if it is live after
// `defined_by_`.
return !interval->CoversSlow(
defined_by_->GetLifetimePosition() + kLivenessPositionOfNormalUse);
}
}
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
SafepointPosition* CreateSafepointPosition(HInstruction* instruction) {
return new (allocator_) SafepointPosition(instruction);
}
void SetSafepointPositions(SafepointPositionList&& list) {
DCHECK(safepoints_.empty());
safepoints_.swap(list);
}
const SafepointPositionList& GetSafepoints() const {
return safepoints_;
}
// Resets the starting point for range-searching queries to the first range.
// Intervals must be reset prior to starting a new linear scan over them.
void ResetSearchCache() {
range_search_start_ = first_range_;
}
bool DefinitionRequiresRegister() const {
DCHECK(IsParent());
LocationSummary* locations = defined_by_->GetLocations();
Location location = locations->Out();
// This interval is the first interval of the instruction. If the output
// of the instruction requires a register, we return the position of that instruction
// as the first register use.
if (location.IsUnallocated()) {
if ((location.GetPolicy() == Location::kRequiresRegister)
|| (location.GetPolicy() == Location::kSameAsFirstInput
&& (locations->InAt(0).IsRegister()
|| locations->InAt(0).IsRegisterPair()
|| locations->InAt(0).GetPolicy() == Location::kRequiresRegister))) {
return true;
} else if ((location.GetPolicy() == Location::kRequiresFpuRegister)
|| (location.GetPolicy() == Location::kSameAsFirstInput
&& (locations->InAt(0).IsFpuRegister()
|| locations->InAt(0).IsFpuRegisterPair()
|| locations->InAt(0).GetPolicy() == Location::kRequiresFpuRegister))) {
return true;
}
} else if (location.IsRegister() || location.IsRegisterPair()) {
return true;
}
return false;
}
void SetHintPhiInterval(LiveInterval* hint_phi_interval) {
DCHECK(hint_phi_interval->GetDefinedBy() != nullptr);
DCHECK(hint_phi_interval->GetDefinedBy()->IsPhi());
hint_phi_interval_ = hint_phi_interval;
}
LiveInterval* GetHintPhiInterval() {
return hint_phi_interval_;
}
private:
LiveInterval(ScopedArenaAllocator* allocator,
DataType::Type type,
HInstruction* defined_by = nullptr,
bool is_fixed = false,
int reg = kNoRegister,
int8_t temp_index = kNoTempIndex,
bool is_high_interval = false)
: allocator_(allocator),
first_range_(nullptr),
last_range_(nullptr),
range_search_start_(nullptr),
safepoints_(),
uses_(),
env_uses_(),
next_sibling_(nullptr),
parent_(this),
defined_by_(defined_by),
high_or_low_interval_(nullptr),
hint_phi_interval_(nullptr),
register_(reg),
spill_slot_or_hint_(kNoSpillSlot),
type_(type),
temp_index_(temp_index),
is_fixed_(is_fixed),
is_high_interval_(is_high_interval) {}
// Searches for a LiveRange that either covers the given position or is the
// first next LiveRange. Returns null if no such LiveRange exists. Ranges
// known to end before `position` can be skipped with `search_start`.
LiveRange* FindRangeAtOrAfter(size_t position, LiveRange* search_start) const {
if (kIsDebugBuild) {
if (search_start != first_range_) {
// If we are not searching the entire list of ranges, make sure we do
// not skip the range we are searching for.
if (search_start == nullptr) {
DCHECK(IsDeadAt(position));
} else if (search_start->GetStart() > position) {
DCHECK_EQ(search_start, FindRangeAtOrAfter(position, first_range_));
}
}
}
LiveRange* range;
for (range = search_start;
range != nullptr && range->GetEnd() <= position;
range = range->GetNext()) {
continue;
}
return range;
}
bool IsDefiningPosition(size_t position) const {
return IsParent() && (position == GetStart());
}
bool HasSynthesizeUseAt(size_t position) const {
for (const UsePosition& use : GetUses()) {
size_t use_position = use.GetPosition();
if ((use_position == position) && use.IsSynthesized()) {
return true;
}
if (use_position > position) break;
}
return false;
}
void AddBackEdgeUses(const HBasicBlock& block_at_use);
ScopedArenaAllocator* const allocator_;
// Ranges of this interval. We need a quick access to the last range to test
// for liveness (see `IsDeadAt`).
LiveRange* first_range_;
LiveRange* last_range_;
// The first range at or after the current position of a linear scan. It is
// used to optimize range-searching queries.
LiveRange* range_search_start_;
// Safepoints where this interval is live.
SafepointPositionList safepoints_;
// Uses of this interval. Only the parent interval keeps these lists.
UsePositionList uses_;
EnvUsePositionList env_uses_;
// Live interval that is the result of a split.
LiveInterval* next_sibling_;
// The first interval from which split intervals come from.
LiveInterval* parent_;
// The instruction represented by this interval.
HInstruction* const defined_by_;
// If this interval needs a register pair, the high or low equivalent.
// `is_high_interval_` tells whether this holds the low or the high.
LiveInterval* high_or_low_interval_;
// If the last use of the instruction is a Phi, keep a record of that Phi's interval
// for hints, except if the Phi is a loop Phi in an irreducible loop.
LiveInterval* hint_phi_interval_;
// The register allocated to this interval.
int register_;
// The spill slot allocated to this interval, or a spill slot hint, `kNoSpillSlot` if neither.
//
// Values >= 0 represent an actual spill slot, -1 is reserved for `kNoSpillSlot`
// and values <= -2 encode a non-negative spill slot hint as `-2 - hint`.
int spill_slot_or_hint_;
// The instruction type this interval corresponds to.
const DataType::Type type_;
// The index of the temporary, `kNoTempIndex` if not a temporary.
// Currently, we support only 32 core and 32 FP registers. We should never request more
// temps than that, so `int8_t` is enough. (Even if we added another register type.)
const int8_t temp_index_;
// Whether the interval is for a fixed register.
const bool is_fixed_;
// Whether this interval is a synthesized interval for register pair.
const bool is_high_interval_;
static constexpr int kNoRegister = -1;
static constexpr int kNoSpillSlot = -1;
static constexpr int8_t kNoTempIndex = -1;
friend class RegisterAllocatorTest;
DISALLOW_COPY_AND_ASSIGN(LiveInterval);
};
/**
* Analysis that computes the liveness of instructions:
*
* (a) Non-environment uses of an instruction always make
* the instruction live.
* (b) Environment uses of an instruction whose type is object (that is, non-primitive), make the
* instruction live, unless the class has an @DeadReferenceSafe annotation.
* This avoids unexpected premature reference enqueuing or finalization, which could
* result in premature deletion of native objects. In the presence of @DeadReferenceSafe,
* object references are treated like primitive types.
* (c) When the graph has the debuggable property, environment uses
* of an instruction that has a primitive type make the instruction live.
* If the graph does not have the debuggable property, the environment
* use has no effect, and may get a 'none' value after register allocation.
* (d) When compiling in OSR mode, all loops in the compiled method may be entered
* from the interpreter via SuspendCheck; such use in SuspendCheck makes the instruction
* live.
*
* (b), (c) and (d) are implemented through SsaLivenessAnalysis::ShouldBeLiveForEnvironment.
*/
class SsaLivenessAnalysis : public ValueObject {
public:
SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen, ScopedArenaAllocator* allocator)
: graph_(graph),
codegen_(codegen),
allocator_(allocator),
block_infos_(graph->GetBlocks().size(),
nullptr,
allocator_->Adapter(kArenaAllocSsaLiveness)),
instructions_from_ssa_index_(allocator_->Adapter(kArenaAllocSsaLiveness)),
instructions_from_lifetime_position_(allocator_->Adapter(kArenaAllocSsaLiveness)) {
}
void Analyze();
BitVectorView<size_t> GetLiveInSet(const HBasicBlock& block) const {
return block_infos_[block.GetBlockId()]->live_in_;
}
BitVectorView<size_t> GetLiveOutSet(const HBasicBlock& block) const {
return block_infos_[block.GetBlockId()]->live_out_;
}
BitVectorView<size_t> GetKillSet(const HBasicBlock& block) const {
return block_infos_[block.GetBlockId()]->kill_;
}
HInstruction* GetInstructionFromSsaIndex(size_t index) const {
return instructions_from_ssa_index_[index];
}
ArrayRef<HInstruction* const> GetInstructionsFromSsaIndexes() const {
return ArrayRef<HInstruction* const>(instructions_from_ssa_index_);
}
HInstruction* GetInstructionFromPosition(size_t index) const {
return instructions_from_lifetime_position_[index];
}
ArrayRef<HInstruction* const> GetInstructionsFromPositions() const {
return ArrayRef<HInstruction* const>(instructions_from_lifetime_position_);
}
static HBasicBlock* GetBlockFromPosition(
size_t index, ArrayRef<HInstruction* const> instructions_from_positions) {
HInstruction* instruction = instructions_from_positions[index];
if (instruction == nullptr) {
// If we are at a block boundary, get the block following.
instruction = instructions_from_positions[index + 1];
}
return instruction->GetBlock();
}
static bool IsAtBlockBoundary(
size_t index, ArrayRef<HInstruction* const> instructions_from_positions) {
return instructions_from_positions[index] == nullptr;
}
HInstruction* GetTempUser(LiveInterval* temp) const {
// A temporary shares the same lifetime start as the instruction that requires it.
DCHECK(temp->IsTemp());
HInstruction* user =
GetInstructionFromPosition(temp->GetStart() / kLivenessPositionsPerInstruction);
DCHECK(user != nullptr);
DCHECK_EQ(temp->GetStart(), user->GetLifetimePosition());
return user;
}
size_t GetNumberOfSsaValues() const {
return instructions_from_ssa_index_.size();
}
static constexpr const char* kLivenessPassName = "liveness";
private:
// Give an SSA number to each instruction that defines a value used by another instruction,
// and setup the lifetime information of each instruction and block.
void NumberInstructions();
// Compute live ranges of instructions, as well as live_in, live_out and kill sets.
void ComputeLiveness();
// Compute the live ranges of instructions, as well as the initial live_in, live_out and
// kill sets, that do not take into account backward branches.
void ComputeLiveRanges();
// After computing the initial sets, this method does a fixed point
// calculation over the live_in and live_out set to take into account
// backwards branches.
void ComputeLiveInAndLiveOutSets();
// Update the live_in set of the block and returns whether it has changed.
bool UpdateLiveIn(const HBasicBlock& block);
// Update the live_out set of the block and returns whether it has changed.
bool UpdateLiveOut(const HBasicBlock& block);
static void ProcessEnvironment(HInstruction* instruction,
HInstruction* actual_user,
BitVectorView<size_t> live_in);
static void RecursivelyProcessInputs(HInstruction* instruction,
HInstruction* actual_user,
BitVectorView<size_t> live_in);
// Returns whether `instruction` in an HEnvironment held by `env_holder`
// should be kept live by the HEnvironment.
static bool ShouldBeLiveForEnvironment(HInstruction* env_holder, HInstruction* instruction) {
DCHECK(instruction != nullptr);
// A value that's not live in compiled code may still be needed in interpreter,
// due to code motion, etc.
if (env_holder->IsDeoptimize()) return true;
// A value live at a throwing instruction in a try block may be copied by
// the exception handler to its location at the top of the catch block.
if (env_holder->CanThrowIntoCatchBlock()) return true;
HGraph* graph = instruction->GetBlock()->GetGraph();
if (graph->IsDebuggable()) return true;
// When compiling in OSR mode, all loops in the compiled method may be entered
// from the interpreter via SuspendCheck; thus we need to preserve the environment.
if (env_holder->IsSuspendCheck() && graph->IsCompilingOsr()) return true;
if (graph -> IsDeadReferenceSafe()) return false;
return instruction->GetType() == DataType::Type::kReference;
}
void CheckNoLiveInIrreducibleLoop(const HBasicBlock& block) const {
if (!block.IsLoopHeader() || !block.GetLoopInformation()->IsIrreducible()) {
return;
}
DoCheckNoLiveInIrreducibleLoop(block);
}
void DoCheckNoLiveInIrreducibleLoop(const HBasicBlock& block) const;
HGraph* const graph_;
CodeGenerator* const codegen_;
// Use a local ScopedArenaAllocator for allocating memory.
// This allocator must remain alive while doing register allocation.
ScopedArenaAllocator* const allocator_;
ScopedArenaVector<BlockInfo*> block_infos_;
// Temporary array used when computing live_in, live_out, and kill sets.
ScopedArenaVector<HInstruction*> instructions_from_ssa_index_;
// Compressed map from lifetime position to instruction (nullptr for block start).
// Indexed by the lifetime position divided by `kLivenessPositionsPerInstruction`.
ScopedArenaVector<HInstruction*> instructions_from_lifetime_position_;
friend class RegisterAllocatorTest;
DISALLOW_COPY_AND_ASSIGN(SsaLivenessAnalysis);
};
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_SSA_LIVENESS_ANALYSIS_H_