Move `HGraph` to its own file.
Ideally we would not need to `#include "graph.h"` in the
`nodes.h` but that would require extreme refactoring. More
realistic aim is to include `graph.h` instead of the whole
`nodes.h` for some compilation units.
Note that `HGraph::AllocateInstructionId()` is no longer
`inline` but it should not cause a significant regression
even if link-time optimization fails to inline it.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Flag: EXEMPT refactor
Change-Id: I2b2f2a06298984e5df1e725362383871fd3ba552
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 9391554..53460b4 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -158,6 +158,7 @@
"optimizing/data_type.cc",
"optimizing/dead_code_elimination.cc",
"optimizing/escape.cc",
+ "optimizing/graph.cc",
"optimizing/graph_checker.cc",
"optimizing/graph_visualizer.cc",
"optimizing/gvn.cc",
@@ -302,6 +303,7 @@
tools: ["generate_operator_out"],
srcs: [
"linker/linker_patch.h",
+ "optimizing/graph.h",
"optimizing/locations.h",
"optimizing/nodes.h",
"optimizing/optimizing_compiler_stats.h",
diff --git a/compiler/optimizing/block_namer.h b/compiler/optimizing/block_namer.h
index 39c5973..c56a96f 100644
--- a/compiler/optimizing/block_namer.h
+++ b/compiler/optimizing/block_namer.h
@@ -24,7 +24,7 @@
namespace art HIDDEN {
class HBasicBlock;
-struct BlockNamer {
+class BlockNamer {
public:
struct NameWrapper {
HBasicBlock* blk_;
diff --git a/compiler/optimizing/graph.cc b/compiler/optimizing/graph.cc
new file mode 100644
index 0000000..3ae9246
--- /dev/null
+++ b/compiler/optimizing/graph.cc
@@ -0,0 +1,1221 @@
+/*
+ * Copyright (C) 2025 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph.h"
+
+#include "base/arena_bit_vector.h"
+#include "base/logging.h"
+#include "base/scoped_arena_containers.h"
+#include "common_dominator.h"
+#include "graph_visualizer.h"
+#include "loop_information-inl.h"
+#include "nodes.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art HIDDEN {
+
+void HGraph::AddBlock(HBasicBlock* block) {
+ block->SetBlockId(blocks_.size());
+ blocks_.push_back(block);
+}
+
+int32_t HGraph::AllocateInstructionId() {
+ CHECK_NE(current_instruction_id_, INT32_MAX);
+ return current_instruction_id_++;
+}
+
+// Register a back edge; if the block was not a loop header before the call,
+// associate a newly created loop info with it.
+void AddBackEdge(HBasicBlock* block, HBasicBlock* back_edge) {
+ if (block->GetLoopInformation() == nullptr) {
+ HGraph* graph = block->GetGraph();
+ block->SetLoopInformation(new (graph->GetAllocator()) HLoopInformation(block, graph));
+ }
+ DCHECK_EQ(block->GetLoopInformation()->GetHeader(), block);
+ block->GetLoopInformation()->AddBackEdge(back_edge);
+}
+
+void HGraph::FindBackEdges(/*out*/ BitVectorView<size_t> visited) {
+ // "visited" must be empty on entry, it's an output argument for all visited (i.e. live) blocks.
+ DCHECK(!visited.IsAnyBitSet());
+
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(GetArenaStack());
+ // Nodes that we're currently visiting, indexed by block id.
+ BitVectorView<size_t> visiting =
+ ArenaBitVector::CreateFixedSize(&allocator, blocks_.size(), kArenaAllocGraphBuilder);
+ // Number of successors visited from a given node, indexed by block id.
+ ScopedArenaVector<size_t> successors_visited(blocks_.size(),
+ 0u,
+ allocator.Adapter(kArenaAllocGraphBuilder));
+ // Stack of nodes that we're currently visiting (same as marked in "visiting" above).
+ ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder));
+ constexpr size_t kDefaultWorklistSize = 8;
+ worklist.reserve(kDefaultWorklistSize);
+ visited.SetBit(entry_block_->GetBlockId());
+ visiting.SetBit(entry_block_->GetBlockId());
+ worklist.push_back(entry_block_);
+
+ while (!worklist.empty()) {
+ HBasicBlock* current = worklist.back();
+ uint32_t current_id = current->GetBlockId();
+ if (successors_visited[current_id] == current->GetSuccessors().size()) {
+ visiting.ClearBit(current_id);
+ worklist.pop_back();
+ } else {
+ HBasicBlock* successor = current->GetSuccessors()[successors_visited[current_id]++];
+ uint32_t successor_id = successor->GetBlockId();
+ if (visiting.IsBitSet(successor_id)) {
+ DCHECK(ContainsElement(worklist, successor));
+ AddBackEdge(successor, current);
+ } else if (!visited.IsBitSet(successor_id)) {
+ visited.SetBit(successor_id);
+ visiting.SetBit(successor_id);
+ worklist.push_back(successor);
+ }
+ }
+ }
+}
+
+void HGraph::RemoveDeadBlocksInstructionsAsUsersAndDisconnect(
+ BitVectorView<const size_t> visited) const {
+ for (size_t i = 0; i < blocks_.size(); ++i) {
+ if (!visited.IsBitSet(i)) {
+ HBasicBlock* block = blocks_[i];
+ if (block == nullptr) continue;
+
+ // Remove as user.
+ for (HInstructionIteratorPrefetchNext it(block->GetPhis()); !it.Done(); it.Advance()) {
+ it.Current()->RemoveAsUser();
+ }
+ for (HInstructionIteratorPrefetchNext it(block->GetInstructions()); !it.Done();
+ it.Advance()) {
+ it.Current()->RemoveAsUser();
+ }
+
+ // Remove non-catch phi uses, and disconnect the block.
+ block->DisconnectFromSuccessors(visited);
+ }
+ }
+}
+
+void HGraph::RemoveDeadBlocks(BitVectorView<const size_t> visited) {
+ DCHECK(reverse_post_order_.empty()) << "We shouldn't have dominance information.";
+ for (size_t i = 0; i < blocks_.size(); ++i) {
+ if (!visited.IsBitSet(i)) {
+ HBasicBlock* block = blocks_[i];
+ if (block == nullptr) continue;
+
+ // Remove all remaining uses (which should be only catch phi uses), and the instructions.
+ block->RemoveCatchPhiUsesAndInstruction(/* building_dominator_tree = */ true);
+
+ // Remove the block from the list of blocks, so that further analyses
+ // never see it.
+ blocks_[i] = nullptr;
+ if (block->IsExitBlock()) {
+ SetExitBlock(nullptr);
+ }
+ // Mark the block as removed. This is used by the HGraphBuilder to discard
+ // the block as a branch target.
+ block->SetGraph(nullptr);
+ }
+ }
+}
+
+GraphAnalysisResult HGraph::BuildDominatorTree() {
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(GetArenaStack());
+
+ BitVectorView<size_t> visited =
+ ArenaBitVector::CreateFixedSize(&allocator, blocks_.size(), kArenaAllocGraphBuilder);
+
+ // (1) Find the back edges in the graph doing a DFS traversal.
+ FindBackEdges(visited);
+
+ // (2) Remove instructions and phis from blocks not visited during
+ // the initial DFS as users from other instructions, so that
+ // users can be safely removed before uses later.
+ // Also disconnect the block from its successors, updating the successor's phis if needed.
+ RemoveDeadBlocksInstructionsAsUsersAndDisconnect(visited);
+
+ // (3) Remove blocks not visited during the initial DFS.
+ // Step (5) requires dead blocks to be removed from the
+ // predecessors list of live blocks.
+ RemoveDeadBlocks(visited);
+
+ // (4) Simplify the CFG now, so that we don't need to recompute
+ // dominators and the reverse post order.
+ SimplifyCFG();
+
+ // (5) Compute the dominance information and the reverse post order.
+ ComputeDominanceInformation();
+
+ // (6) Analyze loops discovered through back edge analysis, and
+ // set the loop information on each block.
+ GraphAnalysisResult result = AnalyzeLoops();
+ if (result != kAnalysisSuccess) {
+ return result;
+ }
+
+ // (7) Precompute per-block try membership before entering the SSA builder,
+ // which needs the information to build catch block phis from values of
+ // locals at throwing instructions inside try blocks.
+ ComputeTryBlockInformation();
+
+ return kAnalysisSuccess;
+}
+
+GraphAnalysisResult HGraph::RecomputeDominatorTree() {
+ DCHECK(!HasIrreducibleLoops()) << "Recomputing loop information in graphs with irreducible loops "
+ << "is unsupported, as it could lead to loop header changes";
+ ClearLoopInformation();
+ ClearDominanceInformation();
+ return BuildDominatorTree();
+}
+
+void HGraph::ClearDominanceInformation() {
+ for (HBasicBlock* block : GetActiveBlocks()) {
+ block->ClearDominanceInformation();
+ }
+ reverse_post_order_.clear();
+}
+
+void HGraph::ClearLoopInformation() {
+ SetHasLoops(false);
+ SetHasIrreducibleLoops(false);
+ for (HBasicBlock* block : GetActiveBlocks()) {
+ block->SetLoopInformation(nullptr);
+ }
+}
+
+static bool UpdateDominatorOfSuccessor(HBasicBlock* block, HBasicBlock* successor) {
+ DCHECK(ContainsElement(block->GetSuccessors(), successor));
+
+ HBasicBlock* old_dominator = successor->GetDominator();
+ HBasicBlock* new_dominator =
+ (old_dominator == nullptr) ? block
+ : CommonDominator::ForPair(old_dominator, block);
+
+ if (old_dominator == new_dominator) {
+ return false;
+ } else {
+ successor->SetDominator(new_dominator);
+ return true;
+ }
+}
+
+void HGraph::ComputeDominanceInformation() {
+ DCHECK(reverse_post_order_.empty());
+ const size_t size = blocks_.size();
+ reverse_post_order_.reserve(size);
+ reverse_post_order_.push_back(entry_block_);
+
+ {
+ // Allocate memory from local ScopedArenaAllocator.
+ ScopedArenaAllocator allocator(GetArenaStack());
+ // Number of visits of a given node, indexed by block id.
+ ScopedArenaVector<size_t> visits(size, 0u, allocator.Adapter(kArenaAllocGraphBuilder));
+ // Number of successors visited from a given node, indexed by block id.
+ ScopedArenaVector<size_t> successors_visited(
+ size, 0u, allocator.Adapter(kArenaAllocGraphBuilder));
+ // Nodes for which we need to visit successors.
+ ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder));
+ worklist.reserve(size);
+ worklist.push_back(entry_block_);
+
+ // Cached for the check below.
+ HBasicBlock* exit = GetExitBlock();
+
+ while (!worklist.empty()) {
+ HBasicBlock* current = worklist.back();
+ uint32_t current_id = current->GetBlockId();
+ DCHECK_LT(successors_visited[current_id], current->GetSuccessors().size());
+ HBasicBlock* successor = current->GetSuccessors()[successors_visited[current_id]++];
+ if (successors_visited[current_id] == current->GetSuccessors().size()) {
+ worklist.pop_back();
+ }
+ UpdateDominatorOfSuccessor(current, successor);
+
+ // Once all the forward edges have been visited, we know the immediate
+ // dominator of the block. We can then start visiting its successors.
+ size_t successor_visits_needed =
+ successor->GetPredecessors().size() -
+ (successor->IsLoopHeader() ? successor->GetLoopInformation()->NumberOfBackEdges() : 0u);
+ if (++visits[successor->GetBlockId()] == successor_visits_needed) {
+ reverse_post_order_.push_back(successor);
+ // The exit block is the only one with no successors. Will be encountered only one time per
+ // graph, at the end.
+ if (LIKELY(successor != exit)) {
+ worklist.push_back(successor);
+ }
+ }
+ }
+ }
+
+ // Check if the graph has back edges not dominated by their respective headers.
+ // If so, we need to update the dominators of those headers and recursively of
+ // their successors. We do that with a fix-point iteration over all blocks.
+ // The algorithm is guaranteed to terminate because it loops only if the sum
+ // of all dominator chains has decreased in the current iteration.
+ bool must_run_fix_point = false;
+ for (HBasicBlock* block : blocks_) {
+ if (block != nullptr &&
+ block->IsLoopHeader() &&
+ block->GetLoopInformation()->HasBackEdgeNotDominatedByHeader()) {
+ must_run_fix_point = true;
+ break;
+ }
+ }
+ if (must_run_fix_point) {
+ bool update_occurred = true;
+ while (update_occurred) {
+ update_occurred = false;
+ for (HBasicBlock* block : GetReversePostOrder()) {
+ for (HBasicBlock* successor : block->GetSuccessors()) {
+ update_occurred |= UpdateDominatorOfSuccessor(block, successor);
+ }
+ }
+ }
+ }
+
+ // Make sure that there are no remaining blocks whose dominator information
+ // needs to be updated.
+ if (kIsDebugBuild) {
+ for (HBasicBlock* block : GetReversePostOrder()) {
+ for (HBasicBlock* successor : block->GetSuccessors()) {
+ DCHECK(!UpdateDominatorOfSuccessor(block, successor));
+ }
+ }
+ }
+
+ // Populate `dominated_blocks_` information after computing all dominators.
+ // The potential presence of irreducible loops requires to do it after.
+ for (HBasicBlock* block : GetReversePostOrder()) {
+ if (!block->IsEntryBlock()) {
+ block->GetDominator()->AddDominatedBlock(block);
+ }
+ }
+}
+
+HBasicBlock* HGraph::SplitEdge(HBasicBlock* block, HBasicBlock* successor) {
+ HBasicBlock* new_block = HBasicBlock::Create(allocator_, this, successor->GetDexPc());
+ AddBlock(new_block);
+ // Use `InsertBetween` to ensure the predecessor index and successor index of
+ // `block` and `successor` are preserved.
+ new_block->InsertBetween(block, successor);
+ return new_block;
+}
+
+void HGraph::SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor) {
+ // Insert a new node between `block` and `successor` to split the
+ // critical edge.
+ HBasicBlock* new_block = SplitEdge(block, successor);
+ new_block->AddInstruction(new (allocator_) HGoto(successor->GetDexPc()));
+ if (successor->IsLoopHeader()) {
+ // If we split at a back edge boundary, make the new block the back edge.
+ HLoopInformation* info = successor->GetLoopInformation();
+ if (info->IsBackEdge(*block)) {
+ info->RemoveBackEdge(block);
+ info->AddBackEdge(new_block);
+ }
+ }
+}
+
+// Reorder phi inputs to match reordering of the block's predecessors.
+static void FixPhisAfterPredecessorsReodering(HBasicBlock* block, size_t first, size_t second) {
+ for (HInstructionIteratorPrefetchNext it(block->GetPhis()); !it.Done(); it.Advance()) {
+ HPhi* phi = it.Current()->AsPhi();
+ HInstruction* first_instr = phi->InputAt(first);
+ HInstruction* second_instr = phi->InputAt(second);
+ phi->ReplaceInput(first_instr, second);
+ phi->ReplaceInput(second_instr, first);
+ }
+}
+
+// Make sure that the first predecessor of a loop header is the incoming block.
+void HGraph::OrderLoopHeaderPredecessors(HBasicBlock* header) {
+ DCHECK(header->IsLoopHeader());
+ HLoopInformation* info = header->GetLoopInformation();
+ if (info->IsBackEdge(*header->GetPredecessors()[0])) {
+ HBasicBlock* to_swap = header->GetPredecessors()[0];
+ for (size_t pred = 1, e = header->GetPredecessors().size(); pred < e; ++pred) {
+ HBasicBlock* predecessor = header->GetPredecessors()[pred];
+ if (!info->IsBackEdge(*predecessor)) {
+ header->predecessors_[pred] = to_swap;
+ header->predecessors_[0] = predecessor;
+ FixPhisAfterPredecessorsReodering(header, 0, pred);
+ break;
+ }
+ }
+ }
+}
+
+// Transform control flow of the loop to a single preheader format (don't touch the data flow).
+// New_preheader can be already among the header predecessors - this situation will be correctly
+// processed.
+static void FixControlForNewSinglePreheader(HBasicBlock* header, HBasicBlock* new_preheader) {
+ HLoopInformation* loop_info = header->GetLoopInformation();
+ for (size_t pred = 0; pred < header->GetPredecessors().size(); ++pred) {
+ HBasicBlock* predecessor = header->GetPredecessors()[pred];
+ if (!loop_info->IsBackEdge(*predecessor) && predecessor != new_preheader) {
+ predecessor->ReplaceSuccessor(header, new_preheader);
+ pred--;
+ }
+ }
+}
+
+// == Before == == After ==
+// _________ _________ _________ _________
+// | B0 | | B1 | (old preheaders) | B0 | | B1 |
+// |=========| |=========| |=========| |=========|
+// | i0 = .. | | i1 = .. | | i0 = .. | | i1 = .. |
+// |_________| |_________| |_________| |_________|
+// \ / \ /
+// \ / ___v____________v___
+// \ / (new preheader) | B20 <- B0, B1 |
+// | | |====================|
+// | | | i20 = phi(i0, i1) |
+// | | |____________________|
+// | | |
+// /\ | | /\ /\ | /\
+// / v_______v_________v_______v \ / v___________v_____________v \
+// | | B10 <- B0, B1, B2, B3 | | | | B10 <- B20, B2, B3 | |
+// | |===========================| | (header) | |===========================| |
+// | | i10 = phi(i0, i1, i2, i3) | | | | i10 = phi(i20, i2, i3) | |
+// | |___________________________| | | |___________________________| |
+// | / \ | | / \ |
+// | ... ... | | ... ... |
+// | _________ _________ | | _________ _________ |
+// | | B2 | | B3 | | | | B2 | | B3 | |
+// | |=========| |=========| | (back edges) | |=========| |=========| |
+// | | i2 = .. | | i3 = .. | | | | i2 = .. | | i3 = .. | |
+// | |_________| |_________| | | |_________| |_________| |
+// \ / \ / \ / \ /
+// \___/ \___/ \___/ \___/
+//
+void HGraph::TransformLoopToSinglePreheaderFormat(HBasicBlock* header) {
+ HLoopInformation* loop_info = header->GetLoopInformation();
+
+ HBasicBlock* preheader = HBasicBlock::Create(allocator_, this, header->GetDexPc());
+ AddBlock(preheader);
+ preheader->AddInstruction(new (allocator_) HGoto(header->GetDexPc()));
+
+ // If the old header has no Phis then we only need to fix the control flow.
+ if (header->GetPhis().IsEmpty()) {
+ FixControlForNewSinglePreheader(header, preheader);
+ preheader->AddSuccessor(header);
+ return;
+ }
+
+ // Find the first non-back edge block in the header's predecessors list.
+ size_t first_nonbackedge_pred_pos = 0;
+ bool found = false;
+ for (size_t pred = 0; pred < header->GetPredecessors().size(); ++pred) {
+ HBasicBlock* predecessor = header->GetPredecessors()[pred];
+ if (!loop_info->IsBackEdge(*predecessor)) {
+ first_nonbackedge_pred_pos = pred;
+ found = true;
+ break;
+ }
+ }
+
+ DCHECK(found);
+
+ // Fix the data-flow.
+ for (HInstructionIteratorPrefetchNext it(header->GetPhis()); !it.Done(); it.Advance()) {
+ HPhi* header_phi = it.Current()->AsPhi();
+
+ HPhi* preheader_phi = new (GetAllocator()) HPhi(GetAllocator(),
+ header_phi->GetRegNumber(),
+ 0,
+ header_phi->GetType());
+ if (header_phi->GetType() == DataType::Type::kReference) {
+ preheader_phi->SetReferenceTypeInfoIfValid(header_phi->GetReferenceTypeInfo());
+ }
+ preheader->AddPhi(preheader_phi);
+
+ HInstruction* orig_input = header_phi->InputAt(first_nonbackedge_pred_pos);
+ header_phi->ReplaceInput(preheader_phi, first_nonbackedge_pred_pos);
+ preheader_phi->AddInput(orig_input);
+
+ for (size_t input_pos = first_nonbackedge_pred_pos + 1;
+ input_pos < header_phi->InputCount();
+ input_pos++) {
+ HInstruction* input = header_phi->InputAt(input_pos);
+ HBasicBlock* pred_block = header->GetPredecessors()[input_pos];
+
+ if (loop_info->Contains(*pred_block)) {
+ DCHECK(loop_info->IsBackEdge(*pred_block));
+ } else {
+ preheader_phi->AddInput(input);
+ header_phi->RemoveInputAt(input_pos);
+ input_pos--;
+ }
+ }
+ }
+
+ // Fix the control-flow.
+ HBasicBlock* first_pred = header->GetPredecessors()[first_nonbackedge_pred_pos];
+ preheader->InsertBetween(first_pred, header);
+
+ FixControlForNewSinglePreheader(header, preheader);
+}
+
+void HGraph::SimplifyLoop(HBasicBlock* header) {
+ HLoopInformation* info = header->GetLoopInformation();
+
+ // Make sure the loop has only one pre header. This simplifies SSA building by having
+ // to just look at the pre header to know which locals are initialized at entry of the
+ // loop. Also, don't allow the entry block to be a pre header: this simplifies inlining
+ // this graph.
+ size_t number_of_incomings = header->GetPredecessors().size() - info->NumberOfBackEdges();
+ if (number_of_incomings != 1 || (GetEntryBlock()->GetSingleSuccessor() == header)) {
+ TransformLoopToSinglePreheaderFormat(header);
+ }
+
+ OrderLoopHeaderPredecessors(header);
+
+ HInstruction* first_instruction = header->GetFirstInstruction();
+ if (first_instruction != nullptr && first_instruction->IsSuspendCheck()) {
+ // Called from DeadBlockElimination. Update SuspendCheck pointer.
+ info->SetSuspendCheck(first_instruction->AsSuspendCheck());
+ }
+}
+
+void HGraph::ComputeTryBlockInformation() {
+ // Iterate in reverse post order to propagate try membership information from
+ // predecessors to their successors.
+ bool graph_has_try_catch = false;
+
+ for (HBasicBlock* block : GetReversePostOrder()) {
+ if (block->IsEntryBlock() || block->IsCatchBlock()) {
+ // Catch blocks after simplification have only exceptional predecessors
+ // and hence are never in tries.
+ continue;
+ }
+
+ // Infer try membership from the first predecessor. Having simplified loops,
+ // the first predecessor can never be a back edge and therefore it must have
+ // been visited already and had its try membership set.
+ HBasicBlock* first_predecessor = block->GetPredecessors()[0];
+ DCHECK_IMPLIES(block->IsLoopHeader(),
+ !block->GetLoopInformation()->IsBackEdge(*first_predecessor));
+ const HTryBoundary* try_entry = first_predecessor->ComputeTryEntryOfSuccessors();
+ graph_has_try_catch |= try_entry != nullptr;
+ if (try_entry != nullptr &&
+ (block->GetTryCatchInformation() == nullptr ||
+ try_entry != &block->GetTryCatchInformation()->GetTryEntry())) {
+ // We are either setting try block membership for the first time or it
+ // has changed.
+ block->SetTryCatchInformation(new (allocator_) TryCatchInformation(*try_entry));
+ }
+ }
+
+ SetHasTryCatch(graph_has_try_catch);
+}
+
+void HGraph::SimplifyCFG() {
+// Simplify the CFG for future analysis, and code generation:
+ // (1): Split critical edges.
+ // (2): Simplify loops by having only one preheader.
+ // NOTE: We're appending new blocks inside the loop, so we need to use index because iterators
+ // can be invalidated. We remember the initial size to avoid iterating over the new blocks.
+ for (size_t block_id = 0u, end = blocks_.size(); block_id != end; ++block_id) {
+ HBasicBlock* block = blocks_[block_id];
+ if (block == nullptr) continue;
+ if (block->GetSuccessors().size() > 1) {
+ // Only split normal-flow edges. We cannot split exceptional edges as they
+ // are synthesized (approximate real control flow), and we do not need to
+ // anyway. Moves that would be inserted there are performed by the runtime.
+ ArrayRef<HBasicBlock* const> normal_successors = block->GetNormalSuccessors();
+ for (size_t j = 0, e = normal_successors.size(); j < e; ++j) {
+ HBasicBlock* successor = normal_successors[j];
+ DCHECK(!successor->IsCatchBlock());
+ if (successor == exit_block_) {
+ // (Throw/Return/ReturnVoid)->TryBoundary->Exit. Special case which we
+ // do not want to split because Goto->Exit is not allowed.
+ DCHECK(block->IsSingleTryBoundary());
+ } else if (successor->GetPredecessors().size() > 1) {
+ SplitCriticalEdge(block, successor);
+ // SplitCriticalEdge could have invalidated the `normal_successors`
+ // ArrayRef. We must re-acquire it.
+ normal_successors = block->GetNormalSuccessors();
+ DCHECK_EQ(normal_successors[j]->GetSingleSuccessor(), successor);
+ DCHECK_EQ(e, normal_successors.size());
+ }
+ }
+ }
+ if (block->IsLoopHeader()) {
+ SimplifyLoop(block);
+ } else if (!block->IsEntryBlock() &&
+ block->GetFirstInstruction() != nullptr &&
+ block->GetFirstInstruction()->IsSuspendCheck()) {
+ // We are being called by the dead code elimiation pass, and what used to be
+ // a loop got dismantled. Just remove the suspend check.
+ block->RemoveInstruction(block->GetFirstInstruction());
+ }
+ }
+}
+
+GraphAnalysisResult HGraph::AnalyzeLoops() const {
+ // We iterate post order to ensure we visit inner loops before outer loops.
+ // `PopulateRecursive` needs this guarantee to know whether a natural loop
+ // contains an irreducible loop.
+ for (HBasicBlock* block : GetPostOrder()) {
+ if (block->IsLoopHeader()) {
+ if (block->IsCatchBlock()) {
+ // TODO: Dealing with exceptional back edges could be tricky because
+ // they only approximate the real control flow. Bail out for now.
+ VLOG(compiler) << "Not compiled: Exceptional back edges";
+ return kAnalysisFailThrowCatchLoop;
+ }
+ block->GetLoopInformation()->Populate();
+ }
+ }
+ return kAnalysisSuccess;
+}
+
+template <class InstructionType, typename ValueType>
+InstructionType* HGraph::CreateConstant(ValueType value,
+ ArenaSafeMap<ValueType, InstructionType*>* cache) {
+ // Try to find an existing constant of the given value.
+ InstructionType* constant = nullptr;
+ auto cached_constant = cache->find(value);
+ if (cached_constant != cache->end()) {
+ constant = cached_constant->second;
+ }
+
+ // If not found or previously deleted, create and cache a new instruction.
+ // Don't bother reviving a previously deleted instruction, for simplicity.
+ if (constant == nullptr || constant->GetBlock() == nullptr) {
+ constant = new (allocator_) InstructionType(value);
+ cache->Overwrite(value, constant);
+ InsertConstant(constant);
+ }
+ return constant;
+}
+
+void HGraph::InsertConstant(HConstant* constant) {
+ // New constants are inserted before the SuspendCheck at the bottom of the
+ // entry block. Note that this method can be called from the graph builder and
+ // the entry block therefore may not end with SuspendCheck->Goto yet.
+ HInstruction* insert_before = nullptr;
+
+ HInstruction* gota = entry_block_->GetLastInstruction();
+ if (gota != nullptr && gota->IsGoto()) {
+ HInstruction* suspend_check = gota->GetPrevious();
+ if (suspend_check != nullptr && suspend_check->IsSuspendCheck()) {
+ insert_before = suspend_check;
+ } else {
+ insert_before = gota;
+ }
+ }
+
+ if (insert_before == nullptr) {
+ entry_block_->AddInstruction(constant);
+ } else {
+ entry_block_->InsertInstructionBefore(constant, insert_before);
+ }
+}
+
+HNullConstant* HGraph::GetNullConstant() {
+ // For simplicity, don't bother reviving the cached null constant if it is
+ // not null and not in a block. Otherwise, we need to clear the instruction
+ // id and/or any invariants the graph is assuming when adding new instructions.
+ if ((cached_null_constant_ == nullptr) || (cached_null_constant_->GetBlock() == nullptr)) {
+ cached_null_constant_ = new (allocator_) HNullConstant();
+ cached_null_constant_->SetReferenceTypeInfo(GetInexactObjectRti());
+ InsertConstant(cached_null_constant_);
+ }
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(cached_null_constant_->GetReferenceTypeInfo().IsValid());
+ }
+ return cached_null_constant_;
+}
+
+HIntConstant* HGraph::GetIntConstant(int32_t value) {
+ return CreateConstant(value, &cached_int_constants_);
+}
+
+HLongConstant* HGraph::GetLongConstant(int64_t value) {
+ return CreateConstant(value, &cached_long_constants_);
+}
+
+HFloatConstant* HGraph::GetFloatConstant(float value) {
+ return CreateConstant(bit_cast<int32_t, float>(value), &cached_float_constants_);
+}
+
+HDoubleConstant* HGraph::GetDoubleConstant(double value) {
+ return CreateConstant(bit_cast<int64_t, double>(value), &cached_double_constants_);
+}
+
+HCurrentMethod* HGraph::GetCurrentMethod() {
+ // For simplicity, don't bother reviving the cached current method if it is
+ // not null and not in a block. Otherwise, we need to clear the instruction
+ // id and/or any invariants the graph is assuming when adding new instructions.
+ if ((cached_current_method_ == nullptr) || (cached_current_method_->GetBlock() == nullptr)) {
+ cached_current_method_ = new (allocator_) HCurrentMethod(
+ Is64BitInstructionSet(instruction_set_) ? DataType::Type::kInt64 : DataType::Type::kInt32,
+ entry_block_->GetDexPc());
+ if (entry_block_->GetFirstInstruction() == nullptr) {
+ entry_block_->AddInstruction(cached_current_method_);
+ } else {
+ entry_block_->InsertInstructionBefore(
+ cached_current_method_, entry_block_->GetFirstInstruction());
+ }
+ }
+ return cached_current_method_;
+}
+
+const char* HGraph::GetMethodName() const {
+ const dex::MethodId& method_id = dex_file_.GetMethodId(method_idx_);
+ return dex_file_.GetMethodName(method_id);
+}
+
+std::string HGraph::PrettyMethod(bool with_signature) const {
+ return dex_file_.PrettyMethod(method_idx_, with_signature);
+}
+
+HConstant* HGraph::GetConstant(DataType::Type type, int64_t value) {
+ switch (type) {
+ case DataType::Type::kBool:
+ DCHECK(IsUint<1>(value));
+ FALLTHROUGH_INTENDED;
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ DCHECK(IsInt(DataType::Size(type) * kBitsPerByte, value));
+ return GetIntConstant(static_cast<int32_t>(value));
+
+ case DataType::Type::kInt64:
+ return GetLongConstant(value);
+
+ default:
+ LOG(FATAL) << "Unsupported constant type";
+ UNREACHABLE();
+ }
+}
+
+void HGraph::CacheFloatConstant(HFloatConstant* constant) {
+ int32_t value = bit_cast<int32_t, float>(constant->GetValue());
+ DCHECK(cached_float_constants_.find(value) == cached_float_constants_.end());
+ cached_float_constants_.Overwrite(value, constant);
+}
+
+void HGraph::CacheDoubleConstant(HDoubleConstant* constant) {
+ int64_t value = bit_cast<int64_t, double>(constant->GetValue());
+ DCHECK(cached_double_constants_.find(value) == cached_double_constants_.end());
+ cached_double_constants_.Overwrite(value, constant);
+}
+
+std::ostream& HGraph::Dump(std::ostream& os,
+ CodeGenerator* codegen,
+ std::optional<std::reference_wrapper<const BlockNamer>> namer) {
+ HGraphVisualizer vis(&os, this, codegen, namer);
+ vis.DumpGraphDebug();
+ return os;
+}
+
+void HGraph::DeleteDeadEmptyBlock(HBasicBlock* block) {
+ DCHECK_EQ(block->GetGraph(), this);
+ DCHECK(block->GetSuccessors().empty());
+ DCHECK(block->GetPredecessors().empty());
+ DCHECK(block->GetDominatedBlocks().empty());
+ DCHECK(block->GetDominator() == nullptr);
+ DCHECK(block->GetInstructions().IsEmpty());
+ DCHECK(block->GetPhis().IsEmpty());
+
+ if (block->IsExitBlock()) {
+ SetExitBlock(nullptr);
+ }
+
+ RemoveElement(reverse_post_order_, block);
+ blocks_[block->GetBlockId()] = nullptr;
+ block->SetGraph(nullptr);
+}
+
+void HGraph::UpdateLoopAndTryInformationOfNewBlock(HBasicBlock* block,
+ HBasicBlock* reference,
+ bool replace_if_back_edge,
+ bool has_more_specific_try_catch_info) {
+ if (block->IsLoopHeader()) {
+ // Clear the information of which blocks are contained in that loop. Since the
+ // information is stored as a bit vector based on block ids, we have to update
+ // it, as those block ids were specific to the callee graph and we are now adding
+ // these blocks to the caller graph.
+ block->GetLoopInformation()->ClearAllBlocks();
+ }
+
+ // If not already in a loop, update the loop information.
+ if (!block->IsInLoop()) {
+ block->SetLoopInformation(reference->GetLoopInformation());
+ }
+
+ // If the block is in a loop, update all its outward loops.
+ HLoopInformation* loop_info = block->GetLoopInformation();
+ if (loop_info != nullptr) {
+ for (HLoopInformationOutwardIterator loop_it(*block);
+ !loop_it.Done();
+ loop_it.Advance()) {
+ loop_it.Current()->Add(block);
+ }
+ if (replace_if_back_edge && loop_info->IsBackEdge(*reference)) {
+ loop_info->ReplaceBackEdge(reference, block);
+ }
+ }
+
+ DCHECK_IMPLIES(has_more_specific_try_catch_info, !reference->IsTryBlock())
+ << "We don't allow to inline try catches inside of other try blocks.";
+
+ // Update the TryCatchInformation, if we are not inlining a try catch.
+ if (!has_more_specific_try_catch_info) {
+ // Copy TryCatchInformation if `reference` is a try block, not if it is a catch block.
+ TryCatchInformation* try_catch_info =
+ reference->IsTryBlock() ? reference->GetTryCatchInformation() : nullptr;
+ block->SetTryCatchInformation(try_catch_info);
+ }
+}
+
+HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
+ DCHECK(HasExitBlock()) << "Unimplemented scenario";
+ // Update the environments in this graph to have the invoke's environment
+ // as parent.
+ {
+ // Skip the entry block, we do not need to update the entry's suspend check.
+ for (HBasicBlock* block : GetReversePostOrderSkipEntryBlock()) {
+ for (HInstructionIteratorPrefetchNext instr_it(block->GetInstructions());
+ !instr_it.Done();
+ instr_it.Advance()) {
+ HInstruction* current = instr_it.Current();
+ if (current->NeedsEnvironment()) {
+ DCHECK(current->HasEnvironment());
+ current->GetEnvironment()->SetAndCopyParentChain(
+ outer_graph->GetAllocator(), invoke->GetEnvironment());
+ }
+ }
+ }
+ }
+
+ if (HasBoundsChecks()) {
+ outer_graph->SetHasBoundsChecks(true);
+ }
+ if (HasLoops()) {
+ outer_graph->SetHasLoops(true);
+ }
+ if (HasIrreducibleLoops()) {
+ outer_graph->SetHasIrreducibleLoops(true);
+ }
+ if (HasDirectCriticalNativeCall()) {
+ outer_graph->SetHasDirectCriticalNativeCall(true);
+ }
+ if (HasTryCatch()) {
+ outer_graph->SetHasTryCatch(true);
+ }
+ if (HasMonitorOperations()) {
+ outer_graph->SetHasMonitorOperations(true);
+ }
+ if (HasTraditionalSIMD()) {
+ outer_graph->SetHasTraditionalSIMD(true);
+ }
+ if (HasPredicatedSIMD()) {
+ outer_graph->SetHasPredicatedSIMD(true);
+ }
+ if (HasAlwaysThrowingInvokes()) {
+ outer_graph->SetHasAlwaysThrowingInvokes(true);
+ }
+
+ HInstruction* return_value = nullptr;
+ if (GetBlocks().size() == 3) {
+ // Inliner already made sure we don't inline methods that always throw.
+ DCHECK(!GetBlocks()[1]->GetLastInstruction()->IsThrow());
+ // Simple case of an entry block, a body block, and an exit block.
+ // Put the body block's instruction into `invoke`'s block.
+ HBasicBlock* body = GetBlocks()[1];
+ DCHECK(GetBlocks()[0]->IsEntryBlock());
+ DCHECK(GetBlocks()[2]->IsExitBlock());
+ DCHECK(!body->IsExitBlock());
+ DCHECK(!body->IsInLoop());
+ HInstruction* last = body->GetLastInstruction();
+
+ // Note that we add instructions before the invoke only to simplify polymorphic inlining.
+ invoke->GetBlock()->instructions_.AddBefore(invoke, body->GetInstructions());
+ body->GetInstructions().SetBlockOfInstructions(invoke->GetBlock());
+
+ // Replace the invoke with the return value of the inlined graph.
+ if (last->IsReturn()) {
+ return_value = last->InputAt(0);
+ } else {
+ DCHECK(last->IsReturnVoid());
+ }
+
+ invoke->GetBlock()->RemoveInstruction(last);
+ } else {
+ // Need to inline multiple blocks. We split `invoke`'s block
+ // into two blocks, merge the first block of the inlined graph into
+ // the first half, and replace the exit block of the inlined graph
+ // with the second half.
+ ArenaAllocator* allocator = outer_graph->GetAllocator();
+ HBasicBlock* at = invoke->GetBlock();
+ // Note that we split before the invoke only to simplify polymorphic inlining.
+ HBasicBlock* to = at->SplitBeforeForInlining(invoke);
+
+ HBasicBlock* first = entry_block_->GetSuccessors()[0];
+ DCHECK(!first->IsInLoop());
+ DCHECK(first->GetTryCatchInformation() == nullptr);
+ at->MergeWithInlined(first);
+ exit_block_->ReplaceWith(to);
+
+ // Update the meta information surrounding blocks:
+ // (1) the graph they are now in,
+ // (2) the reverse post order of that graph,
+ // (3) their potential loop information, inner and outer,
+ // (4) try block membership.
+ // Note that we do not need to update catch phi inputs because they
+ // correspond to the register file of the outer method which the inlinee
+ // cannot modify.
+
+ // We don't add the entry block, the exit block, and the first block, which
+ // has been merged with `at`.
+ static constexpr int kNumberOfSkippedBlocksInCallee = 3;
+
+ // We add the `to` block.
+ static constexpr int kNumberOfNewBlocksInCaller = 1;
+ size_t blocks_added = (reverse_post_order_.size() - kNumberOfSkippedBlocksInCallee)
+ + kNumberOfNewBlocksInCaller;
+
+ // Find the location of `at` in the outer graph's reverse post order. The new
+ // blocks will be added after it.
+ size_t index_of_at = IndexOfElement(outer_graph->reverse_post_order_, at);
+ MakeRoomFor(&outer_graph->reverse_post_order_, blocks_added, index_of_at);
+
+ // Do a reverse post order of the blocks in the callee and do (1), (2), (3)
+ // and (4) to the blocks that apply.
+ for (HBasicBlock* current : GetReversePostOrder()) {
+ if (current != exit_block_ && current != entry_block_ && current != first) {
+ DCHECK(current->GetGraph() == this);
+ current->SetGraph(outer_graph);
+ outer_graph->AddBlock(current);
+ outer_graph->reverse_post_order_[++index_of_at] = current;
+ UpdateLoopAndTryInformationOfNewBlock(current,
+ at,
+ /* replace_if_back_edge= */ false,
+ current->GetTryCatchInformation() != nullptr);
+ }
+ }
+
+ // Do (1), (2), (3) and (4) to `to`.
+ to->SetGraph(outer_graph);
+ outer_graph->AddBlock(to);
+ outer_graph->reverse_post_order_[++index_of_at] = to;
+ // Only `to` can become a back edge, as the inlined blocks
+ // are predecessors of `to`.
+ UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge= */ true);
+
+ // Update all predecessors of the exit block (now the `to` block)
+ // to not `HReturn` but `HGoto` instead. Special case throwing blocks
+ // to now get the outer graph exit block as successor.
+ HPhi* return_value_phi = nullptr;
+ bool rerun_dominance = false;
+ bool rerun_loop_analysis = false;
+ for (size_t pred = 0; pred < to->GetPredecessors().size(); ++pred) {
+ HBasicBlock* predecessor = to->GetPredecessors()[pred];
+ HInstruction* last = predecessor->GetLastInstruction();
+
+ // At this point we might either have:
+ // A) Return/ReturnVoid/Throw as the last instruction, or
+ // B) `Return/ReturnVoid/Throw->TryBoundary` as the last instruction chain
+
+ const bool saw_try_boundary = last->IsTryBoundary();
+ if (saw_try_boundary) {
+ DCHECK(predecessor->IsSingleTryBoundary());
+ DCHECK(!last->AsTryBoundary()->IsEntry());
+ predecessor = predecessor->GetSinglePredecessor();
+ last = predecessor->GetLastInstruction();
+ }
+
+ if (last->IsThrow()) {
+ if (at->IsTryBlock()) {
+ DCHECK(!saw_try_boundary) << "We don't support inlining of try blocks into try blocks.";
+ // Create a TryBoundary of kind:exit and point it to the Exit block.
+ HBasicBlock* new_block = outer_graph->SplitEdge(predecessor, to);
+ new_block->AddInstruction(
+ new (allocator) HTryBoundary(HTryBoundary::BoundaryKind::kExit, last->GetDexPc()));
+ new_block->ReplaceSuccessor(to, outer_graph->GetExitBlock());
+
+ // Copy information from the predecessor.
+ new_block->SetLoopInformation(predecessor->GetLoopInformation());
+ TryCatchInformation* try_catch_info = predecessor->GetTryCatchInformation();
+ new_block->SetTryCatchInformation(try_catch_info);
+ for (HBasicBlock* xhandler :
+ try_catch_info->GetTryEntry().GetBlock()->GetExceptionalSuccessors()) {
+ new_block->AddSuccessor(xhandler);
+ }
+ DCHECK(try_catch_info->GetTryEntry().HasSameExceptionHandlersAs(
+ *new_block->GetLastInstruction()->AsTryBoundary()));
+ } else {
+ // We either have `Throw->TryBoundary` or `Throw`. We want to point the whole chain to the
+ // exit, so we recompute `predecessor`
+ predecessor = to->GetPredecessors()[pred];
+ predecessor->ReplaceSuccessor(to, outer_graph->GetExitBlock());
+ }
+
+ --pred;
+ // We need to re-run dominance information, as the exit block now has
+ // a new predecessor and potential new dominator.
+ // TODO(solanes): See if it's worth it to hand-modify the domination chain instead of
+ // rerunning the dominance for the whole graph.
+ rerun_dominance = true;
+ if (predecessor->GetLoopInformation() != nullptr) {
+ // The loop information might have changed e.g. `predecessor` might not be in a loop
+ // anymore. We only do this if `predecessor` has loop information as it is impossible for
+ // predecessor to end up in a loop if it wasn't in one before.
+ rerun_loop_analysis = true;
+ }
+ } else {
+ if (last->IsReturnVoid()) {
+ DCHECK(return_value == nullptr);
+ DCHECK(return_value_phi == nullptr);
+ } else {
+ DCHECK(last->IsReturn());
+ if (return_value_phi != nullptr) {
+ return_value_phi->AddInput(last->InputAt(0));
+ } else if (return_value == nullptr) {
+ return_value = last->InputAt(0);
+ } else {
+ // There will be multiple returns.
+ return_value_phi = new (allocator) HPhi(
+ allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
+ to->AddPhi(return_value_phi);
+ return_value_phi->AddInput(return_value);
+ return_value_phi->AddInput(last->InputAt(0));
+ return_value = return_value_phi;
+ }
+ }
+ predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
+ predecessor->RemoveInstruction(last);
+
+ if (saw_try_boundary) {
+ predecessor = to->GetPredecessors()[pred];
+ DCHECK(predecessor->EndsWithTryBoundary());
+ DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
+ if (predecessor->GetSuccessors()[0]->GetPredecessors().size() > 1) {
+ outer_graph->SplitCriticalEdge(predecessor, to);
+ rerun_dominance = true;
+ if (predecessor->GetLoopInformation() != nullptr) {
+ rerun_loop_analysis = true;
+ }
+ }
+ }
+ }
+ }
+ if (rerun_loop_analysis) {
+ outer_graph->RecomputeDominatorTree();
+ } else if (rerun_dominance) {
+ outer_graph->ClearDominanceInformation();
+ outer_graph->ComputeDominanceInformation();
+ }
+ }
+
+ // Walk over the entry block and:
+ // - Move constants from the entry block to the outer_graph's entry block,
+ // - Replace HParameterValue instructions with their real value.
+ // - Remove suspend checks, that hold an environment.
+ // We must do this after the other blocks have been inlined, otherwise ids of
+ // constants could overlap with the inner graph.
+ size_t parameter_index = 0;
+ for (HInstructionIteratorPrefetchNext it(entry_block_->GetInstructions()); !it.Done();
+ it.Advance()) {
+ HInstruction* current = it.Current();
+ HInstruction* replacement = nullptr;
+ if (current->IsNullConstant()) {
+ replacement = outer_graph->GetNullConstant();
+ } else if (current->IsIntConstant()) {
+ replacement = outer_graph->GetIntConstant(current->AsIntConstant()->GetValue());
+ } else if (current->IsLongConstant()) {
+ replacement = outer_graph->GetLongConstant(current->AsLongConstant()->GetValue());
+ } else if (current->IsFloatConstant()) {
+ replacement = outer_graph->GetFloatConstant(current->AsFloatConstant()->GetValue());
+ } else if (current->IsDoubleConstant()) {
+ replacement = outer_graph->GetDoubleConstant(current->AsDoubleConstant()->GetValue());
+ } else if (current->IsParameterValue()) {
+ if (kIsDebugBuild &&
+ invoke->IsInvokeStaticOrDirect() &&
+ invoke->AsInvokeStaticOrDirect()->IsStaticWithExplicitClinitCheck()) {
+ // Ensure we do not use the last input of `invoke`, as it
+ // contains a clinit check which is not an actual argument.
+ size_t last_input_index = invoke->InputCount() - 1;
+ DCHECK(parameter_index != last_input_index);
+ }
+ replacement = invoke->InputAt(parameter_index++);
+ } else if (current->IsCurrentMethod()) {
+ replacement = outer_graph->GetCurrentMethod();
+ } else {
+ // It is OK to ignore MethodEntryHook for inlined functions.
+ // In debug mode we don't inline and in release mode method
+ // tracing is best effort so OK to ignore them.
+ DCHECK(current->IsGoto() || current->IsSuspendCheck() || current->IsMethodEntryHook());
+ entry_block_->RemoveInstruction(current);
+ }
+ if (replacement != nullptr) {
+ current->ReplaceWith(replacement);
+ // If the current is the return value then we need to update the latter.
+ if (current == return_value) {
+ DCHECK_EQ(entry_block_, return_value->GetBlock());
+ return_value = replacement;
+ }
+ }
+ }
+
+ return return_value;
+}
+
+/*
+ * Loop will be transformed to:
+ * old_pre_header
+ * |
+ * if_block
+ * / \
+ * true_block false_block
+ * \ /
+ * new_pre_header
+ * |
+ * header
+ */
+void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) {
+ DCHECK(header->IsLoopHeader());
+ HBasicBlock* old_pre_header = header->GetDominator();
+
+ // Need extra block to avoid critical edge.
+ HBasicBlock* if_block = HBasicBlock::Create(allocator_, this, header->GetDexPc());
+ HBasicBlock* true_block = HBasicBlock::Create(allocator_, this, header->GetDexPc());
+ HBasicBlock* false_block = HBasicBlock::Create(allocator_, this, header->GetDexPc());
+ HBasicBlock* new_pre_header = HBasicBlock::Create(allocator_, this, header->GetDexPc());
+ AddBlock(if_block);
+ AddBlock(true_block);
+ AddBlock(false_block);
+ AddBlock(new_pre_header);
+
+ header->ReplacePredecessor(old_pre_header, new_pre_header);
+ old_pre_header->successors_.clear();
+ old_pre_header->dominated_blocks_.clear();
+
+ old_pre_header->AddSuccessor(if_block);
+ if_block->AddSuccessor(true_block); // True successor
+ if_block->AddSuccessor(false_block); // False successor
+ true_block->AddSuccessor(new_pre_header);
+ false_block->AddSuccessor(new_pre_header);
+
+ old_pre_header->dominated_blocks_.push_back(if_block);
+ if_block->SetDominator(old_pre_header);
+ if_block->dominated_blocks_.push_back(true_block);
+ true_block->SetDominator(if_block);
+ if_block->dominated_blocks_.push_back(false_block);
+ false_block->SetDominator(if_block);
+ if_block->dominated_blocks_.push_back(new_pre_header);
+ new_pre_header->SetDominator(if_block);
+ new_pre_header->dominated_blocks_.push_back(header);
+ header->SetDominator(new_pre_header);
+
+ // Fix reverse post order.
+ size_t index_of_header = IndexOfElement(reverse_post_order_, header);
+ MakeRoomFor(&reverse_post_order_, 4, index_of_header - 1);
+ reverse_post_order_[index_of_header++] = if_block;
+ reverse_post_order_[index_of_header++] = true_block;
+ reverse_post_order_[index_of_header++] = false_block;
+ reverse_post_order_[index_of_header++] = new_pre_header;
+
+ // The pre_header can never be a back edge of a loop.
+ DCHECK((old_pre_header->GetLoopInformation() == nullptr) ||
+ !old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header));
+ UpdateLoopAndTryInformationOfNewBlock(
+ if_block, old_pre_header, /* replace_if_back_edge= */ false);
+ UpdateLoopAndTryInformationOfNewBlock(
+ true_block, old_pre_header, /* replace_if_back_edge= */ false);
+ UpdateLoopAndTryInformationOfNewBlock(
+ false_block, old_pre_header, /* replace_if_back_edge= */ false);
+ UpdateLoopAndTryInformationOfNewBlock(
+ new_pre_header, old_pre_header, /* replace_if_back_edge= */ false);
+}
+
+// Creates a new two-basic-block loop and inserts it between original loop header and
+// original loop exit; also adjusts dominators, post order and new LoopInformation.
+HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header,
+ HBasicBlock* body,
+ HBasicBlock* exit) {
+ DCHECK(header->IsLoopHeader());
+ HLoopInformation* loop = header->GetLoopInformation();
+
+ // Add new loop blocks.
+ HBasicBlock* new_pre_header = HBasicBlock::Create(allocator_, this, header->GetDexPc());
+ HBasicBlock* new_header = HBasicBlock::Create(allocator_, this, header->GetDexPc());
+ HBasicBlock* new_body = HBasicBlock::Create(allocator_, this, header->GetDexPc());
+ AddBlock(new_pre_header);
+ AddBlock(new_header);
+ AddBlock(new_body);
+
+ // Set up control flow.
+ header->ReplaceSuccessor(exit, new_pre_header);
+ new_pre_header->AddSuccessor(new_header);
+ new_header->AddSuccessor(exit);
+ new_header->AddSuccessor(new_body);
+ new_body->AddSuccessor(new_header);
+
+ // Set up dominators.
+ header->ReplaceDominatedBlock(exit, new_pre_header);
+ new_pre_header->SetDominator(header);
+ new_pre_header->dominated_blocks_.push_back(new_header);
+ new_header->SetDominator(new_pre_header);
+ new_header->dominated_blocks_.push_back(new_body);
+ new_body->SetDominator(new_header);
+ new_header->dominated_blocks_.push_back(exit);
+ exit->SetDominator(new_header);
+
+ // Fix reverse post order.
+ size_t index_of_header = IndexOfElement(reverse_post_order_, header);
+ MakeRoomFor(&reverse_post_order_, 2, index_of_header);
+ reverse_post_order_[++index_of_header] = new_pre_header;
+ reverse_post_order_[++index_of_header] = new_header;
+ size_t index_of_body = IndexOfElement(reverse_post_order_, body);
+ MakeRoomFor(&reverse_post_order_, 1, index_of_body - 1);
+ reverse_post_order_[index_of_body] = new_body;
+
+ // Add gotos and suspend check (client must add conditional in header).
+ new_pre_header->AddInstruction(new (allocator_) HGoto());
+ HSuspendCheck* suspend_check = new (allocator_) HSuspendCheck(header->GetDexPc());
+ new_header->AddInstruction(suspend_check);
+ new_body->AddInstruction(new (allocator_) HGoto());
+ DCHECK(loop->GetSuspendCheck() != nullptr);
+ suspend_check->CopyEnvironmentFromWithLoopPhiAdjustment(
+ loop->GetSuspendCheck()->GetEnvironment(), header);
+
+ // Update loop information.
+ AddBackEdge(new_header, new_body);
+ new_header->GetLoopInformation()->SetSuspendCheck(suspend_check);
+ new_header->GetLoopInformation()->Populate();
+ new_pre_header->SetLoopInformation(loop->GetPreHeader()->GetLoopInformation()); // outward
+ HLoopInformationOutwardIterator it(*new_header);
+ for (it.Advance(); !it.Done(); it.Advance()) {
+ it.Current()->Add(new_pre_header);
+ it.Current()->Add(new_header);
+ it.Current()->Add(new_body);
+ }
+ return new_pre_header;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/graph.h b/compiler/optimizing/graph.h
new file mode 100644
index 0000000..ad47f2e
--- /dev/null
+++ b/compiler/optimizing/graph.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (C) 2025 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_GRAPH_H_
+#define ART_COMPILER_OPTIMIZING_GRAPH_H_
+
+#include <algorithm>
+#include <iosfwd>
+#include <functional>
+#include <optional>
+#include <string>
+
+#include "base/arena_allocator.h"
+#include "base/arena_containers.h"
+#include "base/arena_object.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+#include "compilation_kind.h"
+#include "data_type.h"
+#include "dex/invoke_type.h"
+#include "handle_cache.h"
+#include "reference_type_info.h"
+
+namespace art HIDDEN {
+
+class BlockNamer;
+class CodeGenerator;
+class HBasicBlock;
+class HConstant;
+class HCurrentMethod;
+class HDoubleConstant;
+class HFloatConstant;
+class HInstruction;
+class HLongConstant;
+class HIntConstant;
+class HInvoke;
+class HNullConstant;
+class VariableSizedHandleScope;
+
+enum GraphAnalysisResult {
+ kAnalysisSkipped,
+ kAnalysisInvalidBytecode,
+ kAnalysisFailThrowCatchLoop,
+ kAnalysisFailAmbiguousArrayOp,
+ kAnalysisFailIrreducibleLoopAndStringInit,
+ kAnalysisFailPhiEquivalentInOsr,
+ kAnalysisSuccess,
+};
+
+std::ostream& operator<<(std::ostream& os, GraphAnalysisResult ga);
+
+// Control-flow graph of a method. Contains a list of basic blocks.
+class HGraph : public ArenaObject<kArenaAllocGraph> {
+ public:
+ HGraph(ArenaAllocator* allocator,
+ ArenaStack* arena_stack,
+ VariableSizedHandleScope* handles,
+ const DexFile& dex_file,
+ uint32_t method_idx,
+ InstructionSet instruction_set,
+ InvokeType invoke_type,
+ bool dead_reference_safe = false,
+ bool debuggable = false,
+ CompilationKind compilation_kind = CompilationKind::kOptimized,
+ int start_instruction_id = 0)
+ : allocator_(allocator),
+ arena_stack_(arena_stack),
+ handle_cache_(handles),
+ blocks_(allocator->Adapter(kArenaAllocBlockList)),
+ reverse_post_order_(allocator->Adapter(kArenaAllocReversePostOrder)),
+ linear_order_(allocator->Adapter(kArenaAllocLinearOrder)),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
+ number_of_vregs_(0),
+ number_of_in_vregs_(0),
+ temporaries_vreg_slots_(0),
+ has_bounds_checks_(false),
+ has_try_catch_(false),
+ has_monitor_operations_(false),
+ has_traditional_simd_(false),
+ has_predicated_simd_(false),
+ has_loops_(false),
+ has_irreducible_loops_(false),
+ has_direct_critical_native_call_(false),
+ has_always_throwing_invokes_(false),
+ dead_reference_safe_(dead_reference_safe),
+ debuggable_(debuggable),
+ current_instruction_id_(start_instruction_id),
+ dex_file_(dex_file),
+ method_idx_(method_idx),
+ invoke_type_(invoke_type),
+ in_ssa_form_(false),
+ number_of_cha_guards_(0),
+ instruction_set_(instruction_set),
+ cached_null_constant_(nullptr),
+ cached_int_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_float_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_long_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_double_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_current_method_(nullptr),
+ art_method_(nullptr),
+ compilation_kind_(compilation_kind),
+ useful_optimizing_(false),
+ cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
+ blocks_.reserve(kDefaultNumberOfBlocks);
+ }
+
+ std::ostream& Dump(std::ostream& os,
+ CodeGenerator* codegen,
+ std::optional<std::reference_wrapper<const BlockNamer>> namer = std::nullopt);
+
+ ArenaAllocator* GetAllocator() const { return allocator_; }
+ ArenaStack* GetArenaStack() const { return arena_stack_; }
+
+ HandleCache* GetHandleCache() { return &handle_cache_; }
+
+ const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
+
+ // An iterator to only blocks that are still actually in the graph (when
+ // blocks are removed they are replaced with 'nullptr' in GetBlocks to
+ // simplify block-id assignment and avoid memmoves in the block-list).
+ IterationRange<FilterNull<ArenaVector<HBasicBlock*>::const_iterator>> GetActiveBlocks() const {
+ return FilterOutNull(MakeIterationRange(GetBlocks()));
+ }
+
+ bool IsInSsaForm() const { return in_ssa_form_; }
+ void SetInSsaForm() { in_ssa_form_ = true; }
+
+ HBasicBlock* GetEntryBlock() const { return entry_block_; }
+ HBasicBlock* GetExitBlock() const { return exit_block_; }
+ bool HasExitBlock() const { return exit_block_ != nullptr; }
+
+ void SetEntryBlock(HBasicBlock* block) { entry_block_ = block; }
+ void SetExitBlock(HBasicBlock* block) { exit_block_ = block; }
+
+ void AddBlock(HBasicBlock* block);
+
+ void ComputeDominanceInformation();
+ void ClearDominanceInformation();
+ void ClearLoopInformation();
+ void FindBackEdges(/*out*/ BitVectorView<size_t> visited);
+ GraphAnalysisResult BuildDominatorTree();
+ GraphAnalysisResult RecomputeDominatorTree();
+ void SimplifyCFG();
+ void SimplifyCatchBlocks();
+
+ // Analyze all natural loops in this graph. Returns a code specifying that it
+ // was successful or the reason for failure. The method will fail if a loop
+ // is a throw-catch loop, i.e. the header is a catch block.
+ GraphAnalysisResult AnalyzeLoops() const;
+
+ // Iterate over blocks to compute try block membership. Needs reverse post
+ // order and loop information.
+ void ComputeTryBlockInformation();
+
+ // Inline this graph in `outer_graph`, replacing the given `invoke` instruction.
+ // Returns the instruction to replace the invoke expression or null if the
+ // invoke is for a void method. Note that the caller is responsible for replacing
+ // and removing the invoke instruction.
+ HInstruction* InlineInto(HGraph* outer_graph, HInvoke* invoke);
+
+ // Update the loop and try membership of `block`, which was spawned from `reference`.
+ // In case `reference` is a back edge, `replace_if_back_edge` notifies whether `block`
+ // should be the new back edge.
+ // `has_more_specific_try_catch_info` will be set to true when inlining a try catch.
+ void UpdateLoopAndTryInformationOfNewBlock(HBasicBlock* block,
+ HBasicBlock* reference,
+ bool replace_if_back_edge,
+ bool has_more_specific_try_catch_info = false);
+
+ // Need to add a couple of blocks to test if the loop body is entered and
+ // put deoptimization instructions, etc.
+ void TransformLoopHeaderForBCE(HBasicBlock* header);
+
+ // Adds a new loop directly after the loop with the given header and exit.
+ // Returns the new preheader.
+ HBasicBlock* TransformLoopForVectorization(HBasicBlock* header,
+ HBasicBlock* body,
+ HBasicBlock* exit);
+
+ // Removes `block` from the graph. Assumes `block` has been disconnected from
+ // other blocks and has no instructions or phis.
+ void DeleteDeadEmptyBlock(HBasicBlock* block);
+
+ // Splits the edge between `block` and `successor` while preserving the
+ // indices in the predecessor/successor lists. If there are multiple edges
+ // between the blocks, the lowest indices are used.
+ // Returns the new block which is empty and has the same dex pc as `successor`.
+ HBasicBlock* SplitEdge(HBasicBlock* block, HBasicBlock* successor);
+
+ void SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor);
+
+ void OrderLoopHeaderPredecessors(HBasicBlock* header);
+
+ // Transform a loop into a format with a single preheader.
+ //
+ // Each phi in the header should be split: original one in the header should only hold
+ // inputs reachable from the back edges and a single input from the preheader. The newly created
+ // phi in the preheader should collate the inputs from the original multiple incoming blocks.
+ //
+ // Loops in the graph typically have a single preheader, so this method is used to "repair" loops
+ // that no longer have this property.
+ void TransformLoopToSinglePreheaderFormat(HBasicBlock* header);
+
+ void SimplifyLoop(HBasicBlock* header);
+
+ ALWAYS_INLINE int32_t AllocateInstructionId();
+
+ int32_t GetCurrentInstructionId() const {
+ return current_instruction_id_;
+ }
+
+ void SetCurrentInstructionId(int32_t id) {
+ CHECK_GE(id, current_instruction_id_);
+ current_instruction_id_ = id;
+ }
+
+ void UpdateTemporariesVRegSlots(size_t slots) {
+ temporaries_vreg_slots_ = std::max(slots, temporaries_vreg_slots_);
+ }
+
+ size_t GetTemporariesVRegSlots() const {
+ DCHECK(!in_ssa_form_);
+ return temporaries_vreg_slots_;
+ }
+
+ void SetNumberOfVRegs(uint16_t number_of_vregs) {
+ number_of_vregs_ = number_of_vregs;
+ }
+
+ uint16_t GetNumberOfVRegs() const {
+ return number_of_vregs_;
+ }
+
+ void SetNumberOfInVRegs(uint16_t value) {
+ number_of_in_vregs_ = value;
+ }
+
+ uint16_t GetNumberOfInVRegs() const {
+ return number_of_in_vregs_;
+ }
+
+ uint16_t GetNumberOfLocalVRegs() const {
+ DCHECK(!in_ssa_form_);
+ return number_of_vregs_ - number_of_in_vregs_;
+ }
+
+ const ArenaVector<HBasicBlock*>& GetReversePostOrder() const {
+ return reverse_post_order_;
+ }
+
+ ArrayRef<HBasicBlock* const> GetReversePostOrderSkipEntryBlock() const {
+ DCHECK(GetReversePostOrder()[0] == entry_block_);
+ return ArrayRef<HBasicBlock* const>(GetReversePostOrder()).SubArray(1);
+ }
+
+ IterationRange<ArenaVector<HBasicBlock*>::const_reverse_iterator> GetPostOrder() const {
+ return ReverseRange(GetReversePostOrder());
+ }
+
+ const ArenaVector<HBasicBlock*>& GetLinearOrder() const {
+ return linear_order_;
+ }
+
+ IterationRange<ArenaVector<HBasicBlock*>::const_reverse_iterator> GetLinearPostOrder() const {
+ return ReverseRange(GetLinearOrder());
+ }
+
+ bool HasBoundsChecks() const {
+ return has_bounds_checks_;
+ }
+
+ void SetHasBoundsChecks(bool value) {
+ has_bounds_checks_ = value;
+ }
+
+ // Is the code known to be robust against eliminating dead references
+ // and the effects of early finalization?
+ bool IsDeadReferenceSafe() const { return dead_reference_safe_; }
+
+ void MarkDeadReferenceUnsafe() { dead_reference_safe_ = false; }
+
+ bool IsDebuggable() const { return debuggable_; }
+
+ // Returns a constant of the given type and value. If it does not exist
+ // already, it is created and inserted into the graph. This method is only for
+ // integral types.
+ HConstant* GetConstant(DataType::Type type, int64_t value);
+
+ // TODO: This is problematic for the consistency of reference type propagation
+ // because it can be created anytime after the pass and thus it will be left
+ // with an invalid type.
+ HNullConstant* GetNullConstant();
+
+ HIntConstant* GetIntConstant(int32_t value);
+ HLongConstant* GetLongConstant(int64_t value);
+ HFloatConstant* GetFloatConstant(float value);
+ HDoubleConstant* GetDoubleConstant(double value);
+
+ HCurrentMethod* GetCurrentMethod();
+
+ const DexFile& GetDexFile() const {
+ return dex_file_;
+ }
+
+ uint32_t GetMethodIdx() const {
+ return method_idx_;
+ }
+
+ // Get the method name (without the signature), e.g. "<init>"
+ const char* GetMethodName() const;
+
+ // Get the pretty method name (class + name + optionally signature).
+ std::string PrettyMethod(bool with_signature = true) const;
+
+ InvokeType GetInvokeType() const {
+ return invoke_type_;
+ }
+
+ InstructionSet GetInstructionSet() const {
+ return instruction_set_;
+ }
+
+ bool IsCompilingOsr() const { return compilation_kind_ == CompilationKind::kOsr; }
+
+ bool IsCompilingBaseline() const { return compilation_kind_ == CompilationKind::kBaseline; }
+
+ CompilationKind GetCompilationKind() const { return compilation_kind_; }
+
+ ArenaSet<ArtMethod*>& GetCHASingleImplementationList() {
+ return cha_single_implementation_list_;
+ }
+
+ // In case of OSR we intend to use SuspendChecks as an entry point to the
+ // function; for debuggable graphs we might deoptimize to interpreter from
+ // SuspendChecks. In these cases we should always generate code for them.
+ bool SuspendChecksAreAllowedToNoOp() const {
+ return !IsDebuggable() && !IsCompilingOsr();
+ }
+
+ void AddCHASingleImplementationDependency(ArtMethod* method) {
+ cha_single_implementation_list_.insert(method);
+ }
+
+ bool HasShouldDeoptimizeFlag() const {
+ return number_of_cha_guards_ != 0 || debuggable_;
+ }
+
+ bool HasTryCatch() const { return has_try_catch_; }
+ void SetHasTryCatch(bool value) { has_try_catch_ = value; }
+
+ bool HasMonitorOperations() const { return has_monitor_operations_; }
+ void SetHasMonitorOperations(bool value) { has_monitor_operations_ = value; }
+
+ bool HasTraditionalSIMD() { return has_traditional_simd_; }
+ void SetHasTraditionalSIMD(bool value) { has_traditional_simd_ = value; }
+
+ bool HasPredicatedSIMD() { return has_predicated_simd_; }
+ void SetHasPredicatedSIMD(bool value) { has_predicated_simd_ = value; }
+
+ bool HasSIMD() const { return has_traditional_simd_ || has_predicated_simd_; }
+
+ bool HasLoops() const { return has_loops_; }
+ void SetHasLoops(bool value) { has_loops_ = value; }
+
+ bool HasIrreducibleLoops() const { return has_irreducible_loops_; }
+ void SetHasIrreducibleLoops(bool value) { has_irreducible_loops_ = value; }
+
+ bool HasDirectCriticalNativeCall() const { return has_direct_critical_native_call_; }
+ void SetHasDirectCriticalNativeCall(bool value) { has_direct_critical_native_call_ = value; }
+
+ bool HasAlwaysThrowingInvokes() const { return has_always_throwing_invokes_; }
+ void SetHasAlwaysThrowingInvokes(bool value) { has_always_throwing_invokes_ = value; }
+
+ ArtMethod* GetArtMethod() const { return art_method_; }
+ void SetArtMethod(ArtMethod* method) { art_method_ = method; }
+
+ void SetProfilingInfo(ProfilingInfo* info) { profiling_info_ = info; }
+ ProfilingInfo* GetProfilingInfo() const { return profiling_info_; }
+
+ ReferenceTypeInfo GetInexactObjectRti() {
+ return ReferenceTypeInfo::Create(handle_cache_.GetObjectClassHandle(), /* is_exact= */ false);
+ }
+
+ uint32_t GetNumberOfCHAGuards() const { return number_of_cha_guards_; }
+ void SetNumberOfCHAGuards(uint32_t num) { number_of_cha_guards_ = num; }
+ void IncrementNumberOfCHAGuards() { number_of_cha_guards_++; }
+
+ void SetUsefulOptimizing() { useful_optimizing_ = true; }
+ bool IsUsefulOptimizing() const { return useful_optimizing_; }
+
+ private:
+ static const size_t kDefaultNumberOfBlocks = 8u;
+
+ void RemoveDeadBlocksInstructionsAsUsersAndDisconnect(BitVectorView<const size_t> visited) const;
+ void RemoveDeadBlocks(BitVectorView<const size_t> visited);
+
+ template <class InstructionType, typename ValueType>
+ InstructionType* CreateConstant(ValueType value,
+ ArenaSafeMap<ValueType, InstructionType*>* cache);
+
+ void InsertConstant(HConstant* instruction);
+
+ // Cache a float constant into the graph. This method should only be
+ // called by the SsaBuilder when creating "equivalent" instructions.
+ void CacheFloatConstant(HFloatConstant* constant);
+
+ // See CacheFloatConstant comment.
+ void CacheDoubleConstant(HDoubleConstant* constant);
+
+ ArenaAllocator* const allocator_;
+ ArenaStack* const arena_stack_;
+
+ HandleCache handle_cache_;
+
+ // List of blocks in insertion order.
+ ArenaVector<HBasicBlock*> blocks_;
+
+ // List of blocks to perform a reverse post order tree traversal.
+ ArenaVector<HBasicBlock*> reverse_post_order_;
+
+ // List of blocks to perform a linear order tree traversal. Unlike the reverse
+ // post order, this order is not incrementally kept up-to-date.
+ ArenaVector<HBasicBlock*> linear_order_;
+
+ HBasicBlock* entry_block_;
+ HBasicBlock* exit_block_;
+
+ // The number of virtual registers in this method. Contains the parameters.
+ uint16_t number_of_vregs_;
+
+ // The number of virtual registers used by parameters of this method.
+ uint16_t number_of_in_vregs_;
+
+ // Number of vreg size slots that the temporaries use (used in baseline compiler).
+ size_t temporaries_vreg_slots_;
+
+ // Flag whether there are bounds checks in the graph. We can skip
+ // BCE if it's false.
+ bool has_bounds_checks_;
+
+ // Flag whether there are try/catch blocks in the graph. We will skip
+ // try/catch-related passes if it's false.
+ bool has_try_catch_;
+
+ // Flag whether there are any HMonitorOperation in the graph. If yes this will mandate
+ // DexRegisterMap to be present to allow deadlock analysis for non-debuggable code.
+ bool has_monitor_operations_;
+
+ // Flags whether SIMD (traditional or predicated) instructions appear in the graph.
+ // If either is true, the code generators may have to be more careful spilling the wider
+ // contents of SIMD registers.
+ bool has_traditional_simd_;
+ bool has_predicated_simd_;
+
+ // Flag whether there are any loops in the graph. We can skip loop
+ // optimization if it's false.
+ bool has_loops_;
+
+ // Flag whether there are any irreducible loops in the graph.
+ bool has_irreducible_loops_;
+
+ // Flag whether there are any direct calls to native code registered
+ // for @CriticalNative methods.
+ bool has_direct_critical_native_call_;
+
+ // Flag whether the graph contains invokes that always throw.
+ bool has_always_throwing_invokes_;
+
+ // Is the code known to be robust against eliminating dead references
+ // and the effects of early finalization? If false, dead reference variables
+ // are kept if they might be visible to the garbage collector.
+ // Currently this means that the class was declared to be dead-reference-safe,
+ // the method accesses no reachability-sensitive fields or data, and the same
+ // is true for any methods that were inlined into the current one.
+ bool dead_reference_safe_;
+
+ // Indicates whether the graph should be compiled in a way that
+ // ensures full debuggability. If false, we can apply more
+ // aggressive optimizations that may limit the level of debugging.
+ const bool debuggable_;
+
+ // The current id to assign to a newly added instruction. See HInstruction.id_.
+ int32_t current_instruction_id_;
+
+ // The dex file from which the method is from.
+ const DexFile& dex_file_;
+
+ // The method index in the dex file.
+ const uint32_t method_idx_;
+
+ // If inlined, this encodes how the callee is being invoked.
+ const InvokeType invoke_type_;
+
+ // Whether the graph has been transformed to SSA form. Only used
+ // in debug mode to ensure we are not using properties only valid
+ // for non-SSA form (like the number of temporaries).
+ bool in_ssa_form_;
+
+ // Number of CHA guards in the graph. Used to short-circuit the
+ // CHA guard optimization pass when there is no CHA guard left.
+ uint32_t number_of_cha_guards_;
+
+ const InstructionSet instruction_set_;
+
+ // Cached constants.
+ HNullConstant* cached_null_constant_;
+ ArenaSafeMap<int32_t, HIntConstant*> cached_int_constants_;
+ ArenaSafeMap<int32_t, HFloatConstant*> cached_float_constants_;
+ ArenaSafeMap<int64_t, HLongConstant*> cached_long_constants_;
+ ArenaSafeMap<int64_t, HDoubleConstant*> cached_double_constants_;
+
+ HCurrentMethod* cached_current_method_;
+
+ // The ArtMethod this graph is for. Note that for AOT, it may be null,
+ // for example for methods whose declaring class could not be resolved
+ // (such as when the superclass could not be found).
+ ArtMethod* art_method_;
+
+ // The `ProfilingInfo` associated with the method being compiled.
+ ProfilingInfo* profiling_info_;
+
+ // How we are compiling the graph: either optimized, osr, or baseline.
+ // For osr, we will make all loops seen as irreducible and emit special
+ // stack maps to mark compiled code entries which the interpreter can
+ // directly jump to.
+ const CompilationKind compilation_kind_;
+
+ // Whether after compiling baseline it is still useful re-optimizing this
+ // method.
+ bool useful_optimizing_;
+
+ // List of methods that are assumed to have single implementation.
+ ArenaSet<ArtMethod*> cha_single_implementation_list_;
+
+ friend class SsaBuilder; // For caching constants.
+ friend class SsaLivenessAnalysis; // For the linear order.
+ friend class HInliner; // For the reverse post order.
+ ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
+ DISALLOW_COPY_AND_ASSIGN(HGraph);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_GRAPH_H_
+
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index e887f3b..54491da 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -52,69 +52,6 @@
// double).
static constexpr bool kEnableFloatingPointStaticEvaluation = (FLT_EVAL_METHOD == 0);
-void HGraph::AddBlock(HBasicBlock* block) {
- block->SetBlockId(blocks_.size());
- blocks_.push_back(block);
-}
-
-inline int32_t HGraph::AllocateInstructionId() {
- CHECK_NE(current_instruction_id_, INT32_MAX);
- return current_instruction_id_++;
-}
-
-// Register a back edge; if the block was not a loop header before the call,
-// associate a newly created loop info with it.
-void AddBackEdge(HBasicBlock* block, HBasicBlock* back_edge) {
- if (block->GetLoopInformation() == nullptr) {
- HGraph* graph = block->GetGraph();
- block->SetLoopInformation(new (graph->GetAllocator()) HLoopInformation(block, graph));
- }
- DCHECK_EQ(block->GetLoopInformation()->GetHeader(), block);
- block->GetLoopInformation()->AddBackEdge(back_edge);
-}
-
-void HGraph::FindBackEdges(/*out*/ BitVectorView<size_t> visited) {
- // "visited" must be empty on entry, it's an output argument for all visited (i.e. live) blocks.
- DCHECK(!visited.IsAnyBitSet());
-
- // Allocate memory from local ScopedArenaAllocator.
- ScopedArenaAllocator allocator(GetArenaStack());
- // Nodes that we're currently visiting, indexed by block id.
- BitVectorView<size_t> visiting =
- ArenaBitVector::CreateFixedSize(&allocator, blocks_.size(), kArenaAllocGraphBuilder);
- // Number of successors visited from a given node, indexed by block id.
- ScopedArenaVector<size_t> successors_visited(blocks_.size(),
- 0u,
- allocator.Adapter(kArenaAllocGraphBuilder));
- // Stack of nodes that we're currently visiting (same as marked in "visiting" above).
- ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder));
- constexpr size_t kDefaultWorklistSize = 8;
- worklist.reserve(kDefaultWorklistSize);
- visited.SetBit(entry_block_->GetBlockId());
- visiting.SetBit(entry_block_->GetBlockId());
- worklist.push_back(entry_block_);
-
- while (!worklist.empty()) {
- HBasicBlock* current = worklist.back();
- uint32_t current_id = current->GetBlockId();
- if (successors_visited[current_id] == current->GetSuccessors().size()) {
- visiting.ClearBit(current_id);
- worklist.pop_back();
- } else {
- HBasicBlock* successor = current->GetSuccessors()[successors_visited[current_id]++];
- uint32_t successor_id = successor->GetBlockId();
- if (visiting.IsBitSet(successor_id)) {
- DCHECK(ContainsElement(worklist, successor));
- AddBackEdge(successor, current);
- } else if (!visited.IsBitSet(successor_id)) {
- visited.SetBit(successor_id);
- visiting.SetBit(successor_id);
- worklist.push_back(successor);
- }
- }
- }
-}
-
// Remove the environment use records of the instruction for users.
void HInstruction::RemoveEnvironmentUses() {
for (HEnvironment* environment = GetEnvironment();
@@ -157,28 +94,6 @@
}
}
-void HGraph::RemoveDeadBlocksInstructionsAsUsersAndDisconnect(
- BitVectorView<const size_t> visited) const {
- for (size_t i = 0; i < blocks_.size(); ++i) {
- if (!visited.IsBitSet(i)) {
- HBasicBlock* block = blocks_[i];
- if (block == nullptr) continue;
-
- // Remove as user.
- for (HInstructionIteratorPrefetchNext it(block->GetPhis()); !it.Done(); it.Advance()) {
- it.Current()->RemoveAsUser();
- }
- for (HInstructionIteratorPrefetchNext it(block->GetInstructions()); !it.Done();
- it.Advance()) {
- it.Current()->RemoveAsUser();
- }
-
- // Remove non-catch phi uses, and disconnect the block.
- block->DisconnectFromSuccessors(visited);
- }
- }
-}
-
// This method assumes `insn` has been removed from all users with the exception of catch
// phis because of missing exceptional edges in the graph. It removes the
// instruction from catch phi uses, together with inputs of other catch phis in
@@ -198,95 +113,6 @@
}
}
-void HGraph::RemoveDeadBlocks(BitVectorView<const size_t> visited) {
- DCHECK(reverse_post_order_.empty()) << "We shouldn't have dominance information.";
- for (size_t i = 0; i < blocks_.size(); ++i) {
- if (!visited.IsBitSet(i)) {
- HBasicBlock* block = blocks_[i];
- if (block == nullptr) continue;
-
- // Remove all remaining uses (which should be only catch phi uses), and the instructions.
- block->RemoveCatchPhiUsesAndInstruction(/* building_dominator_tree = */ true);
-
- // Remove the block from the list of blocks, so that further analyses
- // never see it.
- blocks_[i] = nullptr;
- if (block->IsExitBlock()) {
- SetExitBlock(nullptr);
- }
- // Mark the block as removed. This is used by the HGraphBuilder to discard
- // the block as a branch target.
- block->SetGraph(nullptr);
- }
- }
-}
-
-GraphAnalysisResult HGraph::BuildDominatorTree() {
- // Allocate memory from local ScopedArenaAllocator.
- ScopedArenaAllocator allocator(GetArenaStack());
-
- BitVectorView<size_t> visited =
- ArenaBitVector::CreateFixedSize(&allocator, blocks_.size(), kArenaAllocGraphBuilder);
-
- // (1) Find the back edges in the graph doing a DFS traversal.
- FindBackEdges(visited);
-
- // (2) Remove instructions and phis from blocks not visited during
- // the initial DFS as users from other instructions, so that
- // users can be safely removed before uses later.
- // Also disconnect the block from its successors, updating the successor's phis if needed.
- RemoveDeadBlocksInstructionsAsUsersAndDisconnect(visited);
-
- // (3) Remove blocks not visited during the initial DFS.
- // Step (5) requires dead blocks to be removed from the
- // predecessors list of live blocks.
- RemoveDeadBlocks(visited);
-
- // (4) Simplify the CFG now, so that we don't need to recompute
- // dominators and the reverse post order.
- SimplifyCFG();
-
- // (5) Compute the dominance information and the reverse post order.
- ComputeDominanceInformation();
-
- // (6) Analyze loops discovered through back edge analysis, and
- // set the loop information on each block.
- GraphAnalysisResult result = AnalyzeLoops();
- if (result != kAnalysisSuccess) {
- return result;
- }
-
- // (7) Precompute per-block try membership before entering the SSA builder,
- // which needs the information to build catch block phis from values of
- // locals at throwing instructions inside try blocks.
- ComputeTryBlockInformation();
-
- return kAnalysisSuccess;
-}
-
-GraphAnalysisResult HGraph::RecomputeDominatorTree() {
- DCHECK(!HasIrreducibleLoops()) << "Recomputing loop information in graphs with irreducible loops "
- << "is unsupported, as it could lead to loop header changes";
- ClearLoopInformation();
- ClearDominanceInformation();
- return BuildDominatorTree();
-}
-
-void HGraph::ClearDominanceInformation() {
- for (HBasicBlock* block : GetActiveBlocks()) {
- block->ClearDominanceInformation();
- }
- reverse_post_order_.clear();
-}
-
-void HGraph::ClearLoopInformation() {
- SetHasLoops(false);
- SetHasIrreducibleLoops(false);
- for (HBasicBlock* block : GetActiveBlocks()) {
- block->SetLoopInformation(nullptr);
- }
-}
-
void HBasicBlock::ClearDominanceInformation() {
dominated_blocks_.clear();
dominator_ = nullptr;
@@ -300,529 +126,6 @@
return instruction;
}
-static bool UpdateDominatorOfSuccessor(HBasicBlock* block, HBasicBlock* successor) {
- DCHECK(ContainsElement(block->GetSuccessors(), successor));
-
- HBasicBlock* old_dominator = successor->GetDominator();
- HBasicBlock* new_dominator =
- (old_dominator == nullptr) ? block
- : CommonDominator::ForPair(old_dominator, block);
-
- if (old_dominator == new_dominator) {
- return false;
- } else {
- successor->SetDominator(new_dominator);
- return true;
- }
-}
-
-void HGraph::ComputeDominanceInformation() {
- DCHECK(reverse_post_order_.empty());
- const size_t size = blocks_.size();
- reverse_post_order_.reserve(size);
- reverse_post_order_.push_back(entry_block_);
-
- {
- // Allocate memory from local ScopedArenaAllocator.
- ScopedArenaAllocator allocator(GetArenaStack());
- // Number of visits of a given node, indexed by block id.
- ScopedArenaVector<size_t> visits(size, 0u, allocator.Adapter(kArenaAllocGraphBuilder));
- // Number of successors visited from a given node, indexed by block id.
- ScopedArenaVector<size_t> successors_visited(
- size, 0u, allocator.Adapter(kArenaAllocGraphBuilder));
- // Nodes for which we need to visit successors.
- ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocGraphBuilder));
- worklist.reserve(size);
- worklist.push_back(entry_block_);
-
- // Cached for the check below.
- HBasicBlock* exit = GetExitBlock();
-
- while (!worklist.empty()) {
- HBasicBlock* current = worklist.back();
- uint32_t current_id = current->GetBlockId();
- DCHECK_LT(successors_visited[current_id], current->GetSuccessors().size());
- HBasicBlock* successor = current->GetSuccessors()[successors_visited[current_id]++];
- if (successors_visited[current_id] == current->GetSuccessors().size()) {
- worklist.pop_back();
- }
- UpdateDominatorOfSuccessor(current, successor);
-
- // Once all the forward edges have been visited, we know the immediate
- // dominator of the block. We can then start visiting its successors.
- size_t successor_visits_needed =
- successor->GetPredecessors().size() -
- (successor->IsLoopHeader() ? successor->GetLoopInformation()->NumberOfBackEdges() : 0u);
- if (++visits[successor->GetBlockId()] == successor_visits_needed) {
- reverse_post_order_.push_back(successor);
- // The exit block is the only one with no successors. Will be encountered only one time per
- // graph, at the end.
- if (LIKELY(successor != exit)) {
- worklist.push_back(successor);
- }
- }
- }
- }
-
- // Check if the graph has back edges not dominated by their respective headers.
- // If so, we need to update the dominators of those headers and recursively of
- // their successors. We do that with a fix-point iteration over all blocks.
- // The algorithm is guaranteed to terminate because it loops only if the sum
- // of all dominator chains has decreased in the current iteration.
- bool must_run_fix_point = false;
- for (HBasicBlock* block : blocks_) {
- if (block != nullptr &&
- block->IsLoopHeader() &&
- block->GetLoopInformation()->HasBackEdgeNotDominatedByHeader()) {
- must_run_fix_point = true;
- break;
- }
- }
- if (must_run_fix_point) {
- bool update_occurred = true;
- while (update_occurred) {
- update_occurred = false;
- for (HBasicBlock* block : GetReversePostOrder()) {
- for (HBasicBlock* successor : block->GetSuccessors()) {
- update_occurred |= UpdateDominatorOfSuccessor(block, successor);
- }
- }
- }
- }
-
- // Make sure that there are no remaining blocks whose dominator information
- // needs to be updated.
- if (kIsDebugBuild) {
- for (HBasicBlock* block : GetReversePostOrder()) {
- for (HBasicBlock* successor : block->GetSuccessors()) {
- DCHECK(!UpdateDominatorOfSuccessor(block, successor));
- }
- }
- }
-
- // Populate `dominated_blocks_` information after computing all dominators.
- // The potential presence of irreducible loops requires to do it after.
- for (HBasicBlock* block : GetReversePostOrder()) {
- if (!block->IsEntryBlock()) {
- block->GetDominator()->AddDominatedBlock(block);
- }
- }
-}
-
-HBasicBlock* HGraph::SplitEdge(HBasicBlock* block, HBasicBlock* successor) {
- HBasicBlock* new_block = HBasicBlock::Create(allocator_, this, successor->GetDexPc());
- AddBlock(new_block);
- // Use `InsertBetween` to ensure the predecessor index and successor index of
- // `block` and `successor` are preserved.
- new_block->InsertBetween(block, successor);
- return new_block;
-}
-
-void HGraph::SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor) {
- // Insert a new node between `block` and `successor` to split the
- // critical edge.
- HBasicBlock* new_block = SplitEdge(block, successor);
- new_block->AddInstruction(new (allocator_) HGoto(successor->GetDexPc()));
- if (successor->IsLoopHeader()) {
- // If we split at a back edge boundary, make the new block the back edge.
- HLoopInformation* info = successor->GetLoopInformation();
- if (info->IsBackEdge(*block)) {
- info->RemoveBackEdge(block);
- info->AddBackEdge(new_block);
- }
- }
-}
-
-// Reorder phi inputs to match reordering of the block's predecessors.
-static void FixPhisAfterPredecessorsReodering(HBasicBlock* block, size_t first, size_t second) {
- for (HInstructionIteratorPrefetchNext it(block->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->AsPhi();
- HInstruction* first_instr = phi->InputAt(first);
- HInstruction* second_instr = phi->InputAt(second);
- phi->ReplaceInput(first_instr, second);
- phi->ReplaceInput(second_instr, first);
- }
-}
-
-// Make sure that the first predecessor of a loop header is the incoming block.
-void HGraph::OrderLoopHeaderPredecessors(HBasicBlock* header) {
- DCHECK(header->IsLoopHeader());
- HLoopInformation* info = header->GetLoopInformation();
- if (info->IsBackEdge(*header->GetPredecessors()[0])) {
- HBasicBlock* to_swap = header->GetPredecessors()[0];
- for (size_t pred = 1, e = header->GetPredecessors().size(); pred < e; ++pred) {
- HBasicBlock* predecessor = header->GetPredecessors()[pred];
- if (!info->IsBackEdge(*predecessor)) {
- header->predecessors_[pred] = to_swap;
- header->predecessors_[0] = predecessor;
- FixPhisAfterPredecessorsReodering(header, 0, pred);
- break;
- }
- }
- }
-}
-
-// Transform control flow of the loop to a single preheader format (don't touch the data flow).
-// New_preheader can be already among the header predecessors - this situation will be correctly
-// processed.
-static void FixControlForNewSinglePreheader(HBasicBlock* header, HBasicBlock* new_preheader) {
- HLoopInformation* loop_info = header->GetLoopInformation();
- for (size_t pred = 0; pred < header->GetPredecessors().size(); ++pred) {
- HBasicBlock* predecessor = header->GetPredecessors()[pred];
- if (!loop_info->IsBackEdge(*predecessor) && predecessor != new_preheader) {
- predecessor->ReplaceSuccessor(header, new_preheader);
- pred--;
- }
- }
-}
-
-// == Before == == After ==
-// _________ _________ _________ _________
-// | B0 | | B1 | (old preheaders) | B0 | | B1 |
-// |=========| |=========| |=========| |=========|
-// | i0 = .. | | i1 = .. | | i0 = .. | | i1 = .. |
-// |_________| |_________| |_________| |_________|
-// \ / \ /
-// \ / ___v____________v___
-// \ / (new preheader) | B20 <- B0, B1 |
-// | | |====================|
-// | | | i20 = phi(i0, i1) |
-// | | |____________________|
-// | | |
-// /\ | | /\ /\ | /\
-// / v_______v_________v_______v \ / v___________v_____________v \
-// | | B10 <- B0, B1, B2, B3 | | | | B10 <- B20, B2, B3 | |
-// | |===========================| | (header) | |===========================| |
-// | | i10 = phi(i0, i1, i2, i3) | | | | i10 = phi(i20, i2, i3) | |
-// | |___________________________| | | |___________________________| |
-// | / \ | | / \ |
-// | ... ... | | ... ... |
-// | _________ _________ | | _________ _________ |
-// | | B2 | | B3 | | | | B2 | | B3 | |
-// | |=========| |=========| | (back edges) | |=========| |=========| |
-// | | i2 = .. | | i3 = .. | | | | i2 = .. | | i3 = .. | |
-// | |_________| |_________| | | |_________| |_________| |
-// \ / \ / \ / \ /
-// \___/ \___/ \___/ \___/
-//
-void HGraph::TransformLoopToSinglePreheaderFormat(HBasicBlock* header) {
- HLoopInformation* loop_info = header->GetLoopInformation();
-
- HBasicBlock* preheader = HBasicBlock::Create(allocator_, this, header->GetDexPc());
- AddBlock(preheader);
- preheader->AddInstruction(new (allocator_) HGoto(header->GetDexPc()));
-
- // If the old header has no Phis then we only need to fix the control flow.
- if (header->GetPhis().IsEmpty()) {
- FixControlForNewSinglePreheader(header, preheader);
- preheader->AddSuccessor(header);
- return;
- }
-
- // Find the first non-back edge block in the header's predecessors list.
- size_t first_nonbackedge_pred_pos = 0;
- bool found = false;
- for (size_t pred = 0; pred < header->GetPredecessors().size(); ++pred) {
- HBasicBlock* predecessor = header->GetPredecessors()[pred];
- if (!loop_info->IsBackEdge(*predecessor)) {
- first_nonbackedge_pred_pos = pred;
- found = true;
- break;
- }
- }
-
- DCHECK(found);
-
- // Fix the data-flow.
- for (HInstructionIteratorPrefetchNext it(header->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* header_phi = it.Current()->AsPhi();
-
- HPhi* preheader_phi = new (GetAllocator()) HPhi(GetAllocator(),
- header_phi->GetRegNumber(),
- 0,
- header_phi->GetType());
- if (header_phi->GetType() == DataType::Type::kReference) {
- preheader_phi->SetReferenceTypeInfoIfValid(header_phi->GetReferenceTypeInfo());
- }
- preheader->AddPhi(preheader_phi);
-
- HInstruction* orig_input = header_phi->InputAt(first_nonbackedge_pred_pos);
- header_phi->ReplaceInput(preheader_phi, first_nonbackedge_pred_pos);
- preheader_phi->AddInput(orig_input);
-
- for (size_t input_pos = first_nonbackedge_pred_pos + 1;
- input_pos < header_phi->InputCount();
- input_pos++) {
- HInstruction* input = header_phi->InputAt(input_pos);
- HBasicBlock* pred_block = header->GetPredecessors()[input_pos];
-
- if (loop_info->Contains(*pred_block)) {
- DCHECK(loop_info->IsBackEdge(*pred_block));
- } else {
- preheader_phi->AddInput(input);
- header_phi->RemoveInputAt(input_pos);
- input_pos--;
- }
- }
- }
-
- // Fix the control-flow.
- HBasicBlock* first_pred = header->GetPredecessors()[first_nonbackedge_pred_pos];
- preheader->InsertBetween(first_pred, header);
-
- FixControlForNewSinglePreheader(header, preheader);
-}
-
-void HGraph::SimplifyLoop(HBasicBlock* header) {
- HLoopInformation* info = header->GetLoopInformation();
-
- // Make sure the loop has only one pre header. This simplifies SSA building by having
- // to just look at the pre header to know which locals are initialized at entry of the
- // loop. Also, don't allow the entry block to be a pre header: this simplifies inlining
- // this graph.
- size_t number_of_incomings = header->GetPredecessors().size() - info->NumberOfBackEdges();
- if (number_of_incomings != 1 || (GetEntryBlock()->GetSingleSuccessor() == header)) {
- TransformLoopToSinglePreheaderFormat(header);
- }
-
- OrderLoopHeaderPredecessors(header);
-
- HInstruction* first_instruction = header->GetFirstInstruction();
- if (first_instruction != nullptr && first_instruction->IsSuspendCheck()) {
- // Called from DeadBlockElimination. Update SuspendCheck pointer.
- info->SetSuspendCheck(first_instruction->AsSuspendCheck());
- }
-}
-
-void HGraph::ComputeTryBlockInformation() {
- // Iterate in reverse post order to propagate try membership information from
- // predecessors to their successors.
- bool graph_has_try_catch = false;
-
- for (HBasicBlock* block : GetReversePostOrder()) {
- if (block->IsEntryBlock() || block->IsCatchBlock()) {
- // Catch blocks after simplification have only exceptional predecessors
- // and hence are never in tries.
- continue;
- }
-
- // Infer try membership from the first predecessor. Having simplified loops,
- // the first predecessor can never be a back edge and therefore it must have
- // been visited already and had its try membership set.
- HBasicBlock* first_predecessor = block->GetPredecessors()[0];
- DCHECK_IMPLIES(block->IsLoopHeader(),
- !block->GetLoopInformation()->IsBackEdge(*first_predecessor));
- const HTryBoundary* try_entry = first_predecessor->ComputeTryEntryOfSuccessors();
- graph_has_try_catch |= try_entry != nullptr;
- if (try_entry != nullptr &&
- (block->GetTryCatchInformation() == nullptr ||
- try_entry != &block->GetTryCatchInformation()->GetTryEntry())) {
- // We are either setting try block membership for the first time or it
- // has changed.
- block->SetTryCatchInformation(new (allocator_) TryCatchInformation(*try_entry));
- }
- }
-
- SetHasTryCatch(graph_has_try_catch);
-}
-
-void HGraph::SimplifyCFG() {
-// Simplify the CFG for future analysis, and code generation:
- // (1): Split critical edges.
- // (2): Simplify loops by having only one preheader.
- // NOTE: We're appending new blocks inside the loop, so we need to use index because iterators
- // can be invalidated. We remember the initial size to avoid iterating over the new blocks.
- for (size_t block_id = 0u, end = blocks_.size(); block_id != end; ++block_id) {
- HBasicBlock* block = blocks_[block_id];
- if (block == nullptr) continue;
- if (block->GetSuccessors().size() > 1) {
- // Only split normal-flow edges. We cannot split exceptional edges as they
- // are synthesized (approximate real control flow), and we do not need to
- // anyway. Moves that would be inserted there are performed by the runtime.
- ArrayRef<HBasicBlock* const> normal_successors = block->GetNormalSuccessors();
- for (size_t j = 0, e = normal_successors.size(); j < e; ++j) {
- HBasicBlock* successor = normal_successors[j];
- DCHECK(!successor->IsCatchBlock());
- if (successor == exit_block_) {
- // (Throw/Return/ReturnVoid)->TryBoundary->Exit. Special case which we
- // do not want to split because Goto->Exit is not allowed.
- DCHECK(block->IsSingleTryBoundary());
- } else if (successor->GetPredecessors().size() > 1) {
- SplitCriticalEdge(block, successor);
- // SplitCriticalEdge could have invalidated the `normal_successors`
- // ArrayRef. We must re-acquire it.
- normal_successors = block->GetNormalSuccessors();
- DCHECK_EQ(normal_successors[j]->GetSingleSuccessor(), successor);
- DCHECK_EQ(e, normal_successors.size());
- }
- }
- }
- if (block->IsLoopHeader()) {
- SimplifyLoop(block);
- } else if (!block->IsEntryBlock() &&
- block->GetFirstInstruction() != nullptr &&
- block->GetFirstInstruction()->IsSuspendCheck()) {
- // We are being called by the dead code elimiation pass, and what used to be
- // a loop got dismantled. Just remove the suspend check.
- block->RemoveInstruction(block->GetFirstInstruction());
- }
- }
-}
-
-GraphAnalysisResult HGraph::AnalyzeLoops() const {
- // We iterate post order to ensure we visit inner loops before outer loops.
- // `PopulateRecursive` needs this guarantee to know whether a natural loop
- // contains an irreducible loop.
- for (HBasicBlock* block : GetPostOrder()) {
- if (block->IsLoopHeader()) {
- if (block->IsCatchBlock()) {
- // TODO: Dealing with exceptional back edges could be tricky because
- // they only approximate the real control flow. Bail out for now.
- VLOG(compiler) << "Not compiled: Exceptional back edges";
- return kAnalysisFailThrowCatchLoop;
- }
- block->GetLoopInformation()->Populate();
- }
- }
- return kAnalysisSuccess;
-}
-
-template <class InstructionType, typename ValueType>
-InstructionType* HGraph::CreateConstant(ValueType value,
- ArenaSafeMap<ValueType, InstructionType*>* cache) {
- // Try to find an existing constant of the given value.
- InstructionType* constant = nullptr;
- auto cached_constant = cache->find(value);
- if (cached_constant != cache->end()) {
- constant = cached_constant->second;
- }
-
- // If not found or previously deleted, create and cache a new instruction.
- // Don't bother reviving a previously deleted instruction, for simplicity.
- if (constant == nullptr || constant->GetBlock() == nullptr) {
- constant = new (allocator_) InstructionType(value);
- cache->Overwrite(value, constant);
- InsertConstant(constant);
- }
- return constant;
-}
-
-void HGraph::InsertConstant(HConstant* constant) {
- // New constants are inserted before the SuspendCheck at the bottom of the
- // entry block. Note that this method can be called from the graph builder and
- // the entry block therefore may not end with SuspendCheck->Goto yet.
- HInstruction* insert_before = nullptr;
-
- HInstruction* gota = entry_block_->GetLastInstruction();
- if (gota != nullptr && gota->IsGoto()) {
- HInstruction* suspend_check = gota->GetPrevious();
- if (suspend_check != nullptr && suspend_check->IsSuspendCheck()) {
- insert_before = suspend_check;
- } else {
- insert_before = gota;
- }
- }
-
- if (insert_before == nullptr) {
- entry_block_->AddInstruction(constant);
- } else {
- entry_block_->InsertInstructionBefore(constant, insert_before);
- }
-}
-
-HNullConstant* HGraph::GetNullConstant() {
- // For simplicity, don't bother reviving the cached null constant if it is
- // not null and not in a block. Otherwise, we need to clear the instruction
- // id and/or any invariants the graph is assuming when adding new instructions.
- if ((cached_null_constant_ == nullptr) || (cached_null_constant_->GetBlock() == nullptr)) {
- cached_null_constant_ = new (allocator_) HNullConstant();
- cached_null_constant_->SetReferenceTypeInfo(GetInexactObjectRti());
- InsertConstant(cached_null_constant_);
- }
- if (kIsDebugBuild) {
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(cached_null_constant_->GetReferenceTypeInfo().IsValid());
- }
- return cached_null_constant_;
-}
-
-HIntConstant* HGraph::GetIntConstant(int32_t value) {
- return CreateConstant(value, &cached_int_constants_);
-}
-
-HLongConstant* HGraph::GetLongConstant(int64_t value) {
- return CreateConstant(value, &cached_long_constants_);
-}
-
-HFloatConstant* HGraph::GetFloatConstant(float value) {
- return CreateConstant(bit_cast<int32_t, float>(value), &cached_float_constants_);
-}
-
-HDoubleConstant* HGraph::GetDoubleConstant(double value) {
- return CreateConstant(bit_cast<int64_t, double>(value), &cached_double_constants_);
-}
-
-HCurrentMethod* HGraph::GetCurrentMethod() {
- // For simplicity, don't bother reviving the cached current method if it is
- // not null and not in a block. Otherwise, we need to clear the instruction
- // id and/or any invariants the graph is assuming when adding new instructions.
- if ((cached_current_method_ == nullptr) || (cached_current_method_->GetBlock() == nullptr)) {
- cached_current_method_ = new (allocator_) HCurrentMethod(
- Is64BitInstructionSet(instruction_set_) ? DataType::Type::kInt64 : DataType::Type::kInt32,
- entry_block_->GetDexPc());
- if (entry_block_->GetFirstInstruction() == nullptr) {
- entry_block_->AddInstruction(cached_current_method_);
- } else {
- entry_block_->InsertInstructionBefore(
- cached_current_method_, entry_block_->GetFirstInstruction());
- }
- }
- return cached_current_method_;
-}
-
-const char* HGraph::GetMethodName() const {
- const dex::MethodId& method_id = dex_file_.GetMethodId(method_idx_);
- return dex_file_.GetMethodName(method_id);
-}
-
-std::string HGraph::PrettyMethod(bool with_signature) const {
- return dex_file_.PrettyMethod(method_idx_, with_signature);
-}
-
-HConstant* HGraph::GetConstant(DataType::Type type, int64_t value) {
- switch (type) {
- case DataType::Type::kBool:
- DCHECK(IsUint<1>(value));
- FALLTHROUGH_INTENDED;
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- DCHECK(IsInt(DataType::Size(type) * kBitsPerByte, value));
- return GetIntConstant(static_cast<int32_t>(value));
-
- case DataType::Type::kInt64:
- return GetLongConstant(value);
-
- default:
- LOG(FATAL) << "Unsupported constant type";
- UNREACHABLE();
- }
-}
-
-void HGraph::CacheFloatConstant(HFloatConstant* constant) {
- int32_t value = bit_cast<int32_t, float>(constant->GetValue());
- DCHECK(cached_float_constants_.find(value) == cached_float_constants_.end());
- cached_float_constants_.Overwrite(value, constant);
-}
-
-void HGraph::CacheDoubleConstant(HDoubleConstant* constant) {
- int64_t value = bit_cast<int64_t, double>(constant->GetValue());
- DCHECK(cached_double_constants_.find(value) == cached_double_constants_.end());
- cached_double_constants_.Overwrite(value, constant);
-}
-
bool HBasicBlock::Dominates(const HBasicBlock* other) const {
// Walk up the dominator tree from `other`, to find out if `this`
// is an ancestor.
@@ -1743,14 +1046,6 @@
return os;
}
-std::ostream& HGraph::Dump(std::ostream& os,
- CodeGenerator* codegen,
- std::optional<std::reference_wrapper<const BlockNamer>> namer) {
- HGraphVisualizer vis(&os, this, codegen, namer);
- vis.DumpGraphDebug();
- return os;
-}
-
void HInstruction::MoveBefore(HInstruction* cursor, bool do_checks) {
if (do_checks) {
DCHECK(!IsPhi());
@@ -2325,492 +1620,6 @@
graph_ = nullptr;
}
-void HGraph::DeleteDeadEmptyBlock(HBasicBlock* block) {
- DCHECK_EQ(block->GetGraph(), this);
- DCHECK(block->GetSuccessors().empty());
- DCHECK(block->GetPredecessors().empty());
- DCHECK(block->GetDominatedBlocks().empty());
- DCHECK(block->GetDominator() == nullptr);
- DCHECK(block->GetInstructions().IsEmpty());
- DCHECK(block->GetPhis().IsEmpty());
-
- if (block->IsExitBlock()) {
- SetExitBlock(nullptr);
- }
-
- RemoveElement(reverse_post_order_, block);
- blocks_[block->GetBlockId()] = nullptr;
- block->SetGraph(nullptr);
-}
-
-void HGraph::UpdateLoopAndTryInformationOfNewBlock(HBasicBlock* block,
- HBasicBlock* reference,
- bool replace_if_back_edge,
- bool has_more_specific_try_catch_info) {
- if (block->IsLoopHeader()) {
- // Clear the information of which blocks are contained in that loop. Since the
- // information is stored as a bit vector based on block ids, we have to update
- // it, as those block ids were specific to the callee graph and we are now adding
- // these blocks to the caller graph.
- block->GetLoopInformation()->ClearAllBlocks();
- }
-
- // If not already in a loop, update the loop information.
- if (!block->IsInLoop()) {
- block->SetLoopInformation(reference->GetLoopInformation());
- }
-
- // If the block is in a loop, update all its outward loops.
- HLoopInformation* loop_info = block->GetLoopInformation();
- if (loop_info != nullptr) {
- for (HLoopInformationOutwardIterator loop_it(*block);
- !loop_it.Done();
- loop_it.Advance()) {
- loop_it.Current()->Add(block);
- }
- if (replace_if_back_edge && loop_info->IsBackEdge(*reference)) {
- loop_info->ReplaceBackEdge(reference, block);
- }
- }
-
- DCHECK_IMPLIES(has_more_specific_try_catch_info, !reference->IsTryBlock())
- << "We don't allow to inline try catches inside of other try blocks.";
-
- // Update the TryCatchInformation, if we are not inlining a try catch.
- if (!has_more_specific_try_catch_info) {
- // Copy TryCatchInformation if `reference` is a try block, not if it is a catch block.
- TryCatchInformation* try_catch_info =
- reference->IsTryBlock() ? reference->GetTryCatchInformation() : nullptr;
- block->SetTryCatchInformation(try_catch_info);
- }
-}
-
-HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
- DCHECK(HasExitBlock()) << "Unimplemented scenario";
- // Update the environments in this graph to have the invoke's environment
- // as parent.
- {
- // Skip the entry block, we do not need to update the entry's suspend check.
- for (HBasicBlock* block : GetReversePostOrderSkipEntryBlock()) {
- for (HInstructionIteratorPrefetchNext instr_it(block->GetInstructions());
- !instr_it.Done();
- instr_it.Advance()) {
- HInstruction* current = instr_it.Current();
- if (current->NeedsEnvironment()) {
- DCHECK(current->HasEnvironment());
- current->GetEnvironment()->SetAndCopyParentChain(
- outer_graph->GetAllocator(), invoke->GetEnvironment());
- }
- }
- }
- }
-
- if (HasBoundsChecks()) {
- outer_graph->SetHasBoundsChecks(true);
- }
- if (HasLoops()) {
- outer_graph->SetHasLoops(true);
- }
- if (HasIrreducibleLoops()) {
- outer_graph->SetHasIrreducibleLoops(true);
- }
- if (HasDirectCriticalNativeCall()) {
- outer_graph->SetHasDirectCriticalNativeCall(true);
- }
- if (HasTryCatch()) {
- outer_graph->SetHasTryCatch(true);
- }
- if (HasMonitorOperations()) {
- outer_graph->SetHasMonitorOperations(true);
- }
- if (HasTraditionalSIMD()) {
- outer_graph->SetHasTraditionalSIMD(true);
- }
- if (HasPredicatedSIMD()) {
- outer_graph->SetHasPredicatedSIMD(true);
- }
- if (HasAlwaysThrowingInvokes()) {
- outer_graph->SetHasAlwaysThrowingInvokes(true);
- }
-
- HInstruction* return_value = nullptr;
- if (GetBlocks().size() == 3) {
- // Inliner already made sure we don't inline methods that always throw.
- DCHECK(!GetBlocks()[1]->GetLastInstruction()->IsThrow());
- // Simple case of an entry block, a body block, and an exit block.
- // Put the body block's instruction into `invoke`'s block.
- HBasicBlock* body = GetBlocks()[1];
- DCHECK(GetBlocks()[0]->IsEntryBlock());
- DCHECK(GetBlocks()[2]->IsExitBlock());
- DCHECK(!body->IsExitBlock());
- DCHECK(!body->IsInLoop());
- HInstruction* last = body->GetLastInstruction();
-
- // Note that we add instructions before the invoke only to simplify polymorphic inlining.
- invoke->GetBlock()->instructions_.AddBefore(invoke, body->GetInstructions());
- body->GetInstructions().SetBlockOfInstructions(invoke->GetBlock());
-
- // Replace the invoke with the return value of the inlined graph.
- if (last->IsReturn()) {
- return_value = last->InputAt(0);
- } else {
- DCHECK(last->IsReturnVoid());
- }
-
- invoke->GetBlock()->RemoveInstruction(last);
- } else {
- // Need to inline multiple blocks. We split `invoke`'s block
- // into two blocks, merge the first block of the inlined graph into
- // the first half, and replace the exit block of the inlined graph
- // with the second half.
- ArenaAllocator* allocator = outer_graph->GetAllocator();
- HBasicBlock* at = invoke->GetBlock();
- // Note that we split before the invoke only to simplify polymorphic inlining.
- HBasicBlock* to = at->SplitBeforeForInlining(invoke);
-
- HBasicBlock* first = entry_block_->GetSuccessors()[0];
- DCHECK(!first->IsInLoop());
- DCHECK(first->GetTryCatchInformation() == nullptr);
- at->MergeWithInlined(first);
- exit_block_->ReplaceWith(to);
-
- // Update the meta information surrounding blocks:
- // (1) the graph they are now in,
- // (2) the reverse post order of that graph,
- // (3) their potential loop information, inner and outer,
- // (4) try block membership.
- // Note that we do not need to update catch phi inputs because they
- // correspond to the register file of the outer method which the inlinee
- // cannot modify.
-
- // We don't add the entry block, the exit block, and the first block, which
- // has been merged with `at`.
- static constexpr int kNumberOfSkippedBlocksInCallee = 3;
-
- // We add the `to` block.
- static constexpr int kNumberOfNewBlocksInCaller = 1;
- size_t blocks_added = (reverse_post_order_.size() - kNumberOfSkippedBlocksInCallee)
- + kNumberOfNewBlocksInCaller;
-
- // Find the location of `at` in the outer graph's reverse post order. The new
- // blocks will be added after it.
- size_t index_of_at = IndexOfElement(outer_graph->reverse_post_order_, at);
- MakeRoomFor(&outer_graph->reverse_post_order_, blocks_added, index_of_at);
-
- // Do a reverse post order of the blocks in the callee and do (1), (2), (3)
- // and (4) to the blocks that apply.
- for (HBasicBlock* current : GetReversePostOrder()) {
- if (current != exit_block_ && current != entry_block_ && current != first) {
- DCHECK(current->GetGraph() == this);
- current->SetGraph(outer_graph);
- outer_graph->AddBlock(current);
- outer_graph->reverse_post_order_[++index_of_at] = current;
- UpdateLoopAndTryInformationOfNewBlock(current,
- at,
- /* replace_if_back_edge= */ false,
- current->GetTryCatchInformation() != nullptr);
- }
- }
-
- // Do (1), (2), (3) and (4) to `to`.
- to->SetGraph(outer_graph);
- outer_graph->AddBlock(to);
- outer_graph->reverse_post_order_[++index_of_at] = to;
- // Only `to` can become a back edge, as the inlined blocks
- // are predecessors of `to`.
- UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge= */ true);
-
- // Update all predecessors of the exit block (now the `to` block)
- // to not `HReturn` but `HGoto` instead. Special case throwing blocks
- // to now get the outer graph exit block as successor.
- HPhi* return_value_phi = nullptr;
- bool rerun_dominance = false;
- bool rerun_loop_analysis = false;
- for (size_t pred = 0; pred < to->GetPredecessors().size(); ++pred) {
- HBasicBlock* predecessor = to->GetPredecessors()[pred];
- HInstruction* last = predecessor->GetLastInstruction();
-
- // At this point we might either have:
- // A) Return/ReturnVoid/Throw as the last instruction, or
- // B) `Return/ReturnVoid/Throw->TryBoundary` as the last instruction chain
-
- const bool saw_try_boundary = last->IsTryBoundary();
- if (saw_try_boundary) {
- DCHECK(predecessor->IsSingleTryBoundary());
- DCHECK(!last->AsTryBoundary()->IsEntry());
- predecessor = predecessor->GetSinglePredecessor();
- last = predecessor->GetLastInstruction();
- }
-
- if (last->IsThrow()) {
- if (at->IsTryBlock()) {
- DCHECK(!saw_try_boundary) << "We don't support inlining of try blocks into try blocks.";
- // Create a TryBoundary of kind:exit and point it to the Exit block.
- HBasicBlock* new_block = outer_graph->SplitEdge(predecessor, to);
- new_block->AddInstruction(
- new (allocator) HTryBoundary(HTryBoundary::BoundaryKind::kExit, last->GetDexPc()));
- new_block->ReplaceSuccessor(to, outer_graph->GetExitBlock());
-
- // Copy information from the predecessor.
- new_block->SetLoopInformation(predecessor->GetLoopInformation());
- TryCatchInformation* try_catch_info = predecessor->GetTryCatchInformation();
- new_block->SetTryCatchInformation(try_catch_info);
- for (HBasicBlock* xhandler :
- try_catch_info->GetTryEntry().GetBlock()->GetExceptionalSuccessors()) {
- new_block->AddSuccessor(xhandler);
- }
- DCHECK(try_catch_info->GetTryEntry().HasSameExceptionHandlersAs(
- *new_block->GetLastInstruction()->AsTryBoundary()));
- } else {
- // We either have `Throw->TryBoundary` or `Throw`. We want to point the whole chain to the
- // exit, so we recompute `predecessor`
- predecessor = to->GetPredecessors()[pred];
- predecessor->ReplaceSuccessor(to, outer_graph->GetExitBlock());
- }
-
- --pred;
- // We need to re-run dominance information, as the exit block now has
- // a new predecessor and potential new dominator.
- // TODO(solanes): See if it's worth it to hand-modify the domination chain instead of
- // rerunning the dominance for the whole graph.
- rerun_dominance = true;
- if (predecessor->GetLoopInformation() != nullptr) {
- // The loop information might have changed e.g. `predecessor` might not be in a loop
- // anymore. We only do this if `predecessor` has loop information as it is impossible for
- // predecessor to end up in a loop if it wasn't in one before.
- rerun_loop_analysis = true;
- }
- } else {
- if (last->IsReturnVoid()) {
- DCHECK(return_value == nullptr);
- DCHECK(return_value_phi == nullptr);
- } else {
- DCHECK(last->IsReturn());
- if (return_value_phi != nullptr) {
- return_value_phi->AddInput(last->InputAt(0));
- } else if (return_value == nullptr) {
- return_value = last->InputAt(0);
- } else {
- // There will be multiple returns.
- return_value_phi = new (allocator) HPhi(
- allocator, kNoRegNumber, 0, HPhi::ToPhiType(invoke->GetType()), to->GetDexPc());
- to->AddPhi(return_value_phi);
- return_value_phi->AddInput(return_value);
- return_value_phi->AddInput(last->InputAt(0));
- return_value = return_value_phi;
- }
- }
- predecessor->AddInstruction(new (allocator) HGoto(last->GetDexPc()));
- predecessor->RemoveInstruction(last);
-
- if (saw_try_boundary) {
- predecessor = to->GetPredecessors()[pred];
- DCHECK(predecessor->EndsWithTryBoundary());
- DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
- if (predecessor->GetSuccessors()[0]->GetPredecessors().size() > 1) {
- outer_graph->SplitCriticalEdge(predecessor, to);
- rerun_dominance = true;
- if (predecessor->GetLoopInformation() != nullptr) {
- rerun_loop_analysis = true;
- }
- }
- }
- }
- }
- if (rerun_loop_analysis) {
- outer_graph->RecomputeDominatorTree();
- } else if (rerun_dominance) {
- outer_graph->ClearDominanceInformation();
- outer_graph->ComputeDominanceInformation();
- }
- }
-
- // Walk over the entry block and:
- // - Move constants from the entry block to the outer_graph's entry block,
- // - Replace HParameterValue instructions with their real value.
- // - Remove suspend checks, that hold an environment.
- // We must do this after the other blocks have been inlined, otherwise ids of
- // constants could overlap with the inner graph.
- size_t parameter_index = 0;
- for (HInstructionIteratorPrefetchNext it(entry_block_->GetInstructions()); !it.Done();
- it.Advance()) {
- HInstruction* current = it.Current();
- HInstruction* replacement = nullptr;
- if (current->IsNullConstant()) {
- replacement = outer_graph->GetNullConstant();
- } else if (current->IsIntConstant()) {
- replacement = outer_graph->GetIntConstant(current->AsIntConstant()->GetValue());
- } else if (current->IsLongConstant()) {
- replacement = outer_graph->GetLongConstant(current->AsLongConstant()->GetValue());
- } else if (current->IsFloatConstant()) {
- replacement = outer_graph->GetFloatConstant(current->AsFloatConstant()->GetValue());
- } else if (current->IsDoubleConstant()) {
- replacement = outer_graph->GetDoubleConstant(current->AsDoubleConstant()->GetValue());
- } else if (current->IsParameterValue()) {
- if (kIsDebugBuild &&
- invoke->IsInvokeStaticOrDirect() &&
- invoke->AsInvokeStaticOrDirect()->IsStaticWithExplicitClinitCheck()) {
- // Ensure we do not use the last input of `invoke`, as it
- // contains a clinit check which is not an actual argument.
- size_t last_input_index = invoke->InputCount() - 1;
- DCHECK(parameter_index != last_input_index);
- }
- replacement = invoke->InputAt(parameter_index++);
- } else if (current->IsCurrentMethod()) {
- replacement = outer_graph->GetCurrentMethod();
- } else {
- // It is OK to ignore MethodEntryHook for inlined functions.
- // In debug mode we don't inline and in release mode method
- // tracing is best effort so OK to ignore them.
- DCHECK(current->IsGoto() || current->IsSuspendCheck() || current->IsMethodEntryHook());
- entry_block_->RemoveInstruction(current);
- }
- if (replacement != nullptr) {
- current->ReplaceWith(replacement);
- // If the current is the return value then we need to update the latter.
- if (current == return_value) {
- DCHECK_EQ(entry_block_, return_value->GetBlock());
- return_value = replacement;
- }
- }
- }
-
- return return_value;
-}
-
-/*
- * Loop will be transformed to:
- * old_pre_header
- * |
- * if_block
- * / \
- * true_block false_block
- * \ /
- * new_pre_header
- * |
- * header
- */
-void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) {
- DCHECK(header->IsLoopHeader());
- HBasicBlock* old_pre_header = header->GetDominator();
-
- // Need extra block to avoid critical edge.
- HBasicBlock* if_block = HBasicBlock::Create(allocator_, this, header->GetDexPc());
- HBasicBlock* true_block = HBasicBlock::Create(allocator_, this, header->GetDexPc());
- HBasicBlock* false_block = HBasicBlock::Create(allocator_, this, header->GetDexPc());
- HBasicBlock* new_pre_header = HBasicBlock::Create(allocator_, this, header->GetDexPc());
- AddBlock(if_block);
- AddBlock(true_block);
- AddBlock(false_block);
- AddBlock(new_pre_header);
-
- header->ReplacePredecessor(old_pre_header, new_pre_header);
- old_pre_header->successors_.clear();
- old_pre_header->dominated_blocks_.clear();
-
- old_pre_header->AddSuccessor(if_block);
- if_block->AddSuccessor(true_block); // True successor
- if_block->AddSuccessor(false_block); // False successor
- true_block->AddSuccessor(new_pre_header);
- false_block->AddSuccessor(new_pre_header);
-
- old_pre_header->dominated_blocks_.push_back(if_block);
- if_block->SetDominator(old_pre_header);
- if_block->dominated_blocks_.push_back(true_block);
- true_block->SetDominator(if_block);
- if_block->dominated_blocks_.push_back(false_block);
- false_block->SetDominator(if_block);
- if_block->dominated_blocks_.push_back(new_pre_header);
- new_pre_header->SetDominator(if_block);
- new_pre_header->dominated_blocks_.push_back(header);
- header->SetDominator(new_pre_header);
-
- // Fix reverse post order.
- size_t index_of_header = IndexOfElement(reverse_post_order_, header);
- MakeRoomFor(&reverse_post_order_, 4, index_of_header - 1);
- reverse_post_order_[index_of_header++] = if_block;
- reverse_post_order_[index_of_header++] = true_block;
- reverse_post_order_[index_of_header++] = false_block;
- reverse_post_order_[index_of_header++] = new_pre_header;
-
- // The pre_header can never be a back edge of a loop.
- DCHECK((old_pre_header->GetLoopInformation() == nullptr) ||
- !old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header));
- UpdateLoopAndTryInformationOfNewBlock(
- if_block, old_pre_header, /* replace_if_back_edge= */ false);
- UpdateLoopAndTryInformationOfNewBlock(
- true_block, old_pre_header, /* replace_if_back_edge= */ false);
- UpdateLoopAndTryInformationOfNewBlock(
- false_block, old_pre_header, /* replace_if_back_edge= */ false);
- UpdateLoopAndTryInformationOfNewBlock(
- new_pre_header, old_pre_header, /* replace_if_back_edge= */ false);
-}
-
-// Creates a new two-basic-block loop and inserts it between original loop header and
-// original loop exit; also adjusts dominators, post order and new LoopInformation.
-HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header,
- HBasicBlock* body,
- HBasicBlock* exit) {
- DCHECK(header->IsLoopHeader());
- HLoopInformation* loop = header->GetLoopInformation();
-
- // Add new loop blocks.
- HBasicBlock* new_pre_header = HBasicBlock::Create(allocator_, this, header->GetDexPc());
- HBasicBlock* new_header = HBasicBlock::Create(allocator_, this, header->GetDexPc());
- HBasicBlock* new_body = HBasicBlock::Create(allocator_, this, header->GetDexPc());
- AddBlock(new_pre_header);
- AddBlock(new_header);
- AddBlock(new_body);
-
- // Set up control flow.
- header->ReplaceSuccessor(exit, new_pre_header);
- new_pre_header->AddSuccessor(new_header);
- new_header->AddSuccessor(exit);
- new_header->AddSuccessor(new_body);
- new_body->AddSuccessor(new_header);
-
- // Set up dominators.
- header->ReplaceDominatedBlock(exit, new_pre_header);
- new_pre_header->SetDominator(header);
- new_pre_header->dominated_blocks_.push_back(new_header);
- new_header->SetDominator(new_pre_header);
- new_header->dominated_blocks_.push_back(new_body);
- new_body->SetDominator(new_header);
- new_header->dominated_blocks_.push_back(exit);
- exit->SetDominator(new_header);
-
- // Fix reverse post order.
- size_t index_of_header = IndexOfElement(reverse_post_order_, header);
- MakeRoomFor(&reverse_post_order_, 2, index_of_header);
- reverse_post_order_[++index_of_header] = new_pre_header;
- reverse_post_order_[++index_of_header] = new_header;
- size_t index_of_body = IndexOfElement(reverse_post_order_, body);
- MakeRoomFor(&reverse_post_order_, 1, index_of_body - 1);
- reverse_post_order_[index_of_body] = new_body;
-
- // Add gotos and suspend check (client must add conditional in header).
- new_pre_header->AddInstruction(new (allocator_) HGoto());
- HSuspendCheck* suspend_check = new (allocator_) HSuspendCheck(header->GetDexPc());
- new_header->AddInstruction(suspend_check);
- new_body->AddInstruction(new (allocator_) HGoto());
- DCHECK(loop->GetSuspendCheck() != nullptr);
- suspend_check->CopyEnvironmentFromWithLoopPhiAdjustment(
- loop->GetSuspendCheck()->GetEnvironment(), header);
-
- // Update loop information.
- AddBackEdge(new_header, new_body);
- new_header->GetLoopInformation()->SetSuspendCheck(suspend_check);
- new_header->GetLoopInformation()->Populate();
- new_pre_header->SetLoopInformation(loop->GetPreHeader()->GetLoopInformation()); // outward
- HLoopInformationOutwardIterator it(*new_header);
- for (it.Advance(); !it.Done(); it.Advance()) {
- it.Current()->Add(new_pre_header);
- it.Current()->Add(new_header);
- it.Current()->Add(new_body);
- }
- return new_pre_header;
-}
-
static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti) {
if (rti.IsValid()) {
ScopedObjectAccess soa(Thread::Current());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 581b0f0..7804e85 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -44,6 +44,7 @@
#include "dex/invoke_type.h"
#include "dex/method_reference.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "graph.h"
#include "handle.h"
#include "handle_cache.h"
#include "instruction_list.h"
@@ -129,514 +130,11 @@
kCondLast = kCondAE,
};
-enum GraphAnalysisResult {
- kAnalysisSkipped,
- kAnalysisInvalidBytecode,
- kAnalysisFailThrowCatchLoop,
- kAnalysisFailAmbiguousArrayOp,
- kAnalysisFailIrreducibleLoopAndStringInit,
- kAnalysisFailPhiEquivalentInOsr,
- kAnalysisSuccess,
-};
-
-std::ostream& operator<<(std::ostream& os, GraphAnalysisResult ga);
-
template <typename T>
static inline typename std::make_unsigned<T>::type MakeUnsigned(T x) {
return static_cast<typename std::make_unsigned<T>::type>(x);
}
-// Control-flow graph of a method. Contains a list of basic blocks.
-class HGraph : public ArenaObject<kArenaAllocGraph> {
- public:
- HGraph(ArenaAllocator* allocator,
- ArenaStack* arena_stack,
- VariableSizedHandleScope* handles,
- const DexFile& dex_file,
- uint32_t method_idx,
- InstructionSet instruction_set,
- InvokeType invoke_type,
- bool dead_reference_safe = false,
- bool debuggable = false,
- CompilationKind compilation_kind = CompilationKind::kOptimized,
- int start_instruction_id = 0)
- : allocator_(allocator),
- arena_stack_(arena_stack),
- handle_cache_(handles),
- blocks_(allocator->Adapter(kArenaAllocBlockList)),
- reverse_post_order_(allocator->Adapter(kArenaAllocReversePostOrder)),
- linear_order_(allocator->Adapter(kArenaAllocLinearOrder)),
- entry_block_(nullptr),
- exit_block_(nullptr),
- number_of_vregs_(0),
- number_of_in_vregs_(0),
- temporaries_vreg_slots_(0),
- has_bounds_checks_(false),
- has_try_catch_(false),
- has_monitor_operations_(false),
- has_traditional_simd_(false),
- has_predicated_simd_(false),
- has_loops_(false),
- has_irreducible_loops_(false),
- has_direct_critical_native_call_(false),
- has_always_throwing_invokes_(false),
- dead_reference_safe_(dead_reference_safe),
- debuggable_(debuggable),
- current_instruction_id_(start_instruction_id),
- dex_file_(dex_file),
- method_idx_(method_idx),
- invoke_type_(invoke_type),
- in_ssa_form_(false),
- number_of_cha_guards_(0),
- instruction_set_(instruction_set),
- cached_null_constant_(nullptr),
- cached_int_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
- cached_float_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
- cached_long_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
- cached_double_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
- cached_current_method_(nullptr),
- art_method_(nullptr),
- compilation_kind_(compilation_kind),
- useful_optimizing_(false),
- cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
- blocks_.reserve(kDefaultNumberOfBlocks);
- }
-
- std::ostream& Dump(std::ostream& os,
- CodeGenerator* codegen,
- std::optional<std::reference_wrapper<const BlockNamer>> namer = std::nullopt);
-
- ArenaAllocator* GetAllocator() const { return allocator_; }
- ArenaStack* GetArenaStack() const { return arena_stack_; }
-
- HandleCache* GetHandleCache() { return &handle_cache_; }
-
- const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
-
- // An iterator to only blocks that are still actually in the graph (when
- // blocks are removed they are replaced with 'nullptr' in GetBlocks to
- // simplify block-id assignment and avoid memmoves in the block-list).
- IterationRange<FilterNull<ArenaVector<HBasicBlock*>::const_iterator>> GetActiveBlocks() const {
- return FilterOutNull(MakeIterationRange(GetBlocks()));
- }
-
- bool IsInSsaForm() const { return in_ssa_form_; }
- void SetInSsaForm() { in_ssa_form_ = true; }
-
- HBasicBlock* GetEntryBlock() const { return entry_block_; }
- HBasicBlock* GetExitBlock() const { return exit_block_; }
- bool HasExitBlock() const { return exit_block_ != nullptr; }
-
- void SetEntryBlock(HBasicBlock* block) { entry_block_ = block; }
- void SetExitBlock(HBasicBlock* block) { exit_block_ = block; }
-
- void AddBlock(HBasicBlock* block);
-
- void ComputeDominanceInformation();
- void ClearDominanceInformation();
- void ClearLoopInformation();
- void FindBackEdges(/*out*/ BitVectorView<size_t> visited);
- GraphAnalysisResult BuildDominatorTree();
- GraphAnalysisResult RecomputeDominatorTree();
- void SimplifyCFG();
- void SimplifyCatchBlocks();
-
- // Analyze all natural loops in this graph. Returns a code specifying that it
- // was successful or the reason for failure. The method will fail if a loop
- // is a throw-catch loop, i.e. the header is a catch block.
- GraphAnalysisResult AnalyzeLoops() const;
-
- // Iterate over blocks to compute try block membership. Needs reverse post
- // order and loop information.
- void ComputeTryBlockInformation();
-
- // Inline this graph in `outer_graph`, replacing the given `invoke` instruction.
- // Returns the instruction to replace the invoke expression or null if the
- // invoke is for a void method. Note that the caller is responsible for replacing
- // and removing the invoke instruction.
- HInstruction* InlineInto(HGraph* outer_graph, HInvoke* invoke);
-
- // Update the loop and try membership of `block`, which was spawned from `reference`.
- // In case `reference` is a back edge, `replace_if_back_edge` notifies whether `block`
- // should be the new back edge.
- // `has_more_specific_try_catch_info` will be set to true when inlining a try catch.
- void UpdateLoopAndTryInformationOfNewBlock(HBasicBlock* block,
- HBasicBlock* reference,
- bool replace_if_back_edge,
- bool has_more_specific_try_catch_info = false);
-
- // Need to add a couple of blocks to test if the loop body is entered and
- // put deoptimization instructions, etc.
- void TransformLoopHeaderForBCE(HBasicBlock* header);
-
- // Adds a new loop directly after the loop with the given header and exit.
- // Returns the new preheader.
- HBasicBlock* TransformLoopForVectorization(HBasicBlock* header,
- HBasicBlock* body,
- HBasicBlock* exit);
-
- // Removes `block` from the graph. Assumes `block` has been disconnected from
- // other blocks and has no instructions or phis.
- void DeleteDeadEmptyBlock(HBasicBlock* block);
-
- // Splits the edge between `block` and `successor` while preserving the
- // indices in the predecessor/successor lists. If there are multiple edges
- // between the blocks, the lowest indices are used.
- // Returns the new block which is empty and has the same dex pc as `successor`.
- HBasicBlock* SplitEdge(HBasicBlock* block, HBasicBlock* successor);
-
- void SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor);
-
- void OrderLoopHeaderPredecessors(HBasicBlock* header);
-
- // Transform a loop into a format with a single preheader.
- //
- // Each phi in the header should be split: original one in the header should only hold
- // inputs reachable from the back edges and a single input from the preheader. The newly created
- // phi in the preheader should collate the inputs from the original multiple incoming blocks.
- //
- // Loops in the graph typically have a single preheader, so this method is used to "repair" loops
- // that no longer have this property.
- void TransformLoopToSinglePreheaderFormat(HBasicBlock* header);
-
- void SimplifyLoop(HBasicBlock* header);
-
- ALWAYS_INLINE int32_t AllocateInstructionId();
-
- int32_t GetCurrentInstructionId() const {
- return current_instruction_id_;
- }
-
- void SetCurrentInstructionId(int32_t id) {
- CHECK_GE(id, current_instruction_id_);
- current_instruction_id_ = id;
- }
-
- void UpdateTemporariesVRegSlots(size_t slots) {
- temporaries_vreg_slots_ = std::max(slots, temporaries_vreg_slots_);
- }
-
- size_t GetTemporariesVRegSlots() const {
- DCHECK(!in_ssa_form_);
- return temporaries_vreg_slots_;
- }
-
- void SetNumberOfVRegs(uint16_t number_of_vregs) {
- number_of_vregs_ = number_of_vregs;
- }
-
- uint16_t GetNumberOfVRegs() const {
- return number_of_vregs_;
- }
-
- void SetNumberOfInVRegs(uint16_t value) {
- number_of_in_vregs_ = value;
- }
-
- uint16_t GetNumberOfInVRegs() const {
- return number_of_in_vregs_;
- }
-
- uint16_t GetNumberOfLocalVRegs() const {
- DCHECK(!in_ssa_form_);
- return number_of_vregs_ - number_of_in_vregs_;
- }
-
- const ArenaVector<HBasicBlock*>& GetReversePostOrder() const {
- return reverse_post_order_;
- }
-
- ArrayRef<HBasicBlock* const> GetReversePostOrderSkipEntryBlock() const {
- DCHECK(GetReversePostOrder()[0] == entry_block_);
- return ArrayRef<HBasicBlock* const>(GetReversePostOrder()).SubArray(1);
- }
-
- IterationRange<ArenaVector<HBasicBlock*>::const_reverse_iterator> GetPostOrder() const {
- return ReverseRange(GetReversePostOrder());
- }
-
- const ArenaVector<HBasicBlock*>& GetLinearOrder() const {
- return linear_order_;
- }
-
- IterationRange<ArenaVector<HBasicBlock*>::const_reverse_iterator> GetLinearPostOrder() const {
- return ReverseRange(GetLinearOrder());
- }
-
- bool HasBoundsChecks() const {
- return has_bounds_checks_;
- }
-
- void SetHasBoundsChecks(bool value) {
- has_bounds_checks_ = value;
- }
-
- // Is the code known to be robust against eliminating dead references
- // and the effects of early finalization?
- bool IsDeadReferenceSafe() const { return dead_reference_safe_; }
-
- void MarkDeadReferenceUnsafe() { dead_reference_safe_ = false; }
-
- bool IsDebuggable() const { return debuggable_; }
-
- // Returns a constant of the given type and value. If it does not exist
- // already, it is created and inserted into the graph. This method is only for
- // integral types.
- HConstant* GetConstant(DataType::Type type, int64_t value);
-
- // TODO: This is problematic for the consistency of reference type propagation
- // because it can be created anytime after the pass and thus it will be left
- // with an invalid type.
- HNullConstant* GetNullConstant();
-
- HIntConstant* GetIntConstant(int32_t value);
- HLongConstant* GetLongConstant(int64_t value);
- HFloatConstant* GetFloatConstant(float value);
- HDoubleConstant* GetDoubleConstant(double value);
-
- HCurrentMethod* GetCurrentMethod();
-
- const DexFile& GetDexFile() const {
- return dex_file_;
- }
-
- uint32_t GetMethodIdx() const {
- return method_idx_;
- }
-
- // Get the method name (without the signature), e.g. "<init>"
- const char* GetMethodName() const;
-
- // Get the pretty method name (class + name + optionally signature).
- std::string PrettyMethod(bool with_signature = true) const;
-
- InvokeType GetInvokeType() const {
- return invoke_type_;
- }
-
- InstructionSet GetInstructionSet() const {
- return instruction_set_;
- }
-
- bool IsCompilingOsr() const { return compilation_kind_ == CompilationKind::kOsr; }
-
- bool IsCompilingBaseline() const { return compilation_kind_ == CompilationKind::kBaseline; }
-
- CompilationKind GetCompilationKind() const { return compilation_kind_; }
-
- ArenaSet<ArtMethod*>& GetCHASingleImplementationList() {
- return cha_single_implementation_list_;
- }
-
- // In case of OSR we intend to use SuspendChecks as an entry point to the
- // function; for debuggable graphs we might deoptimize to interpreter from
- // SuspendChecks. In these cases we should always generate code for them.
- bool SuspendChecksAreAllowedToNoOp() const {
- return !IsDebuggable() && !IsCompilingOsr();
- }
-
- void AddCHASingleImplementationDependency(ArtMethod* method) {
- cha_single_implementation_list_.insert(method);
- }
-
- bool HasShouldDeoptimizeFlag() const {
- return number_of_cha_guards_ != 0 || debuggable_;
- }
-
- bool HasTryCatch() const { return has_try_catch_; }
- void SetHasTryCatch(bool value) { has_try_catch_ = value; }
-
- bool HasMonitorOperations() const { return has_monitor_operations_; }
- void SetHasMonitorOperations(bool value) { has_monitor_operations_ = value; }
-
- bool HasTraditionalSIMD() { return has_traditional_simd_; }
- void SetHasTraditionalSIMD(bool value) { has_traditional_simd_ = value; }
-
- bool HasPredicatedSIMD() { return has_predicated_simd_; }
- void SetHasPredicatedSIMD(bool value) { has_predicated_simd_ = value; }
-
- bool HasSIMD() const { return has_traditional_simd_ || has_predicated_simd_; }
-
- bool HasLoops() const { return has_loops_; }
- void SetHasLoops(bool value) { has_loops_ = value; }
-
- bool HasIrreducibleLoops() const { return has_irreducible_loops_; }
- void SetHasIrreducibleLoops(bool value) { has_irreducible_loops_ = value; }
-
- bool HasDirectCriticalNativeCall() const { return has_direct_critical_native_call_; }
- void SetHasDirectCriticalNativeCall(bool value) { has_direct_critical_native_call_ = value; }
-
- bool HasAlwaysThrowingInvokes() const { return has_always_throwing_invokes_; }
- void SetHasAlwaysThrowingInvokes(bool value) { has_always_throwing_invokes_ = value; }
-
- ArtMethod* GetArtMethod() const { return art_method_; }
- void SetArtMethod(ArtMethod* method) { art_method_ = method; }
-
- void SetProfilingInfo(ProfilingInfo* info) { profiling_info_ = info; }
- ProfilingInfo* GetProfilingInfo() const { return profiling_info_; }
-
- ReferenceTypeInfo GetInexactObjectRti() {
- return ReferenceTypeInfo::Create(handle_cache_.GetObjectClassHandle(), /* is_exact= */ false);
- }
-
- uint32_t GetNumberOfCHAGuards() const { return number_of_cha_guards_; }
- void SetNumberOfCHAGuards(uint32_t num) { number_of_cha_guards_ = num; }
- void IncrementNumberOfCHAGuards() { number_of_cha_guards_++; }
-
- void SetUsefulOptimizing() { useful_optimizing_ = true; }
- bool IsUsefulOptimizing() const { return useful_optimizing_; }
-
- private:
- static const size_t kDefaultNumberOfBlocks = 8u;
-
- void RemoveDeadBlocksInstructionsAsUsersAndDisconnect(BitVectorView<const size_t> visited) const;
- void RemoveDeadBlocks(BitVectorView<const size_t> visited);
-
- template <class InstructionType, typename ValueType>
- InstructionType* CreateConstant(ValueType value,
- ArenaSafeMap<ValueType, InstructionType*>* cache);
-
- void InsertConstant(HConstant* instruction);
-
- // Cache a float constant into the graph. This method should only be
- // called by the SsaBuilder when creating "equivalent" instructions.
- void CacheFloatConstant(HFloatConstant* constant);
-
- // See CacheFloatConstant comment.
- void CacheDoubleConstant(HDoubleConstant* constant);
-
- ArenaAllocator* const allocator_;
- ArenaStack* const arena_stack_;
-
- HandleCache handle_cache_;
-
- // List of blocks in insertion order.
- ArenaVector<HBasicBlock*> blocks_;
-
- // List of blocks to perform a reverse post order tree traversal.
- ArenaVector<HBasicBlock*> reverse_post_order_;
-
- // List of blocks to perform a linear order tree traversal. Unlike the reverse
- // post order, this order is not incrementally kept up-to-date.
- ArenaVector<HBasicBlock*> linear_order_;
-
- HBasicBlock* entry_block_;
- HBasicBlock* exit_block_;
-
- // The number of virtual registers in this method. Contains the parameters.
- uint16_t number_of_vregs_;
-
- // The number of virtual registers used by parameters of this method.
- uint16_t number_of_in_vregs_;
-
- // Number of vreg size slots that the temporaries use (used in baseline compiler).
- size_t temporaries_vreg_slots_;
-
- // Flag whether there are bounds checks in the graph. We can skip
- // BCE if it's false.
- bool has_bounds_checks_;
-
- // Flag whether there are try/catch blocks in the graph. We will skip
- // try/catch-related passes if it's false.
- bool has_try_catch_;
-
- // Flag whether there are any HMonitorOperation in the graph. If yes this will mandate
- // DexRegisterMap to be present to allow deadlock analysis for non-debuggable code.
- bool has_monitor_operations_;
-
- // Flags whether SIMD (traditional or predicated) instructions appear in the graph.
- // If either is true, the code generators may have to be more careful spilling the wider
- // contents of SIMD registers.
- bool has_traditional_simd_;
- bool has_predicated_simd_;
-
- // Flag whether there are any loops in the graph. We can skip loop
- // optimization if it's false.
- bool has_loops_;
-
- // Flag whether there are any irreducible loops in the graph.
- bool has_irreducible_loops_;
-
- // Flag whether there are any direct calls to native code registered
- // for @CriticalNative methods.
- bool has_direct_critical_native_call_;
-
- // Flag whether the graph contains invokes that always throw.
- bool has_always_throwing_invokes_;
-
- // Is the code known to be robust against eliminating dead references
- // and the effects of early finalization? If false, dead reference variables
- // are kept if they might be visible to the garbage collector.
- // Currently this means that the class was declared to be dead-reference-safe,
- // the method accesses no reachability-sensitive fields or data, and the same
- // is true for any methods that were inlined into the current one.
- bool dead_reference_safe_;
-
- // Indicates whether the graph should be compiled in a way that
- // ensures full debuggability. If false, we can apply more
- // aggressive optimizations that may limit the level of debugging.
- const bool debuggable_;
-
- // The current id to assign to a newly added instruction. See HInstruction.id_.
- int32_t current_instruction_id_;
-
- // The dex file from which the method is from.
- const DexFile& dex_file_;
-
- // The method index in the dex file.
- const uint32_t method_idx_;
-
- // If inlined, this encodes how the callee is being invoked.
- const InvokeType invoke_type_;
-
- // Whether the graph has been transformed to SSA form. Only used
- // in debug mode to ensure we are not using properties only valid
- // for non-SSA form (like the number of temporaries).
- bool in_ssa_form_;
-
- // Number of CHA guards in the graph. Used to short-circuit the
- // CHA guard optimization pass when there is no CHA guard left.
- uint32_t number_of_cha_guards_;
-
- const InstructionSet instruction_set_;
-
- // Cached constants.
- HNullConstant* cached_null_constant_;
- ArenaSafeMap<int32_t, HIntConstant*> cached_int_constants_;
- ArenaSafeMap<int32_t, HFloatConstant*> cached_float_constants_;
- ArenaSafeMap<int64_t, HLongConstant*> cached_long_constants_;
- ArenaSafeMap<int64_t, HDoubleConstant*> cached_double_constants_;
-
- HCurrentMethod* cached_current_method_;
-
- // The ArtMethod this graph is for. Note that for AOT, it may be null,
- // for example for methods whose declaring class could not be resolved
- // (such as when the superclass could not be found).
- ArtMethod* art_method_;
-
- // The `ProfilingInfo` associated with the method being compiled.
- ProfilingInfo* profiling_info_;
-
- // How we are compiling the graph: either optimized, osr, or baseline.
- // For osr, we will make all loops seen as irreducible and emit special
- // stack maps to mark compiled code entries which the interpreter can
- // directly jump to.
- const CompilationKind compilation_kind_;
-
- // Whether after compiling baseline it is still useful re-optimizing this
- // method.
- bool useful_optimizing_;
-
- // List of methods that are assumed to have single implementation.
- ArenaSet<ArtMethod*> cha_single_implementation_list_;
-
- friend class SsaBuilder; // For caching constants.
- friend class SsaLivenessAnalysis; // For the linear order.
- friend class HInliner; // For the reverse post order.
- ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
- DISALLOW_COPY_AND_ASSIGN(HGraph);
-};
-
// Stores try/catch information for basic blocks.
// Note that HGraph is constructed so that catch blocks cannot simultaneously
// be try blocks.