Merge remote-tracking branch 'goog/master' into mege
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index c9af1c6..cde41e0 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -47,9 +47,6 @@
 # Do you want run-test to be quieter? run-tests will only show output if they fail.
 ART_TEST_QUIET ?= true
 
-# Do you want default compiler tests run?
-ART_TEST_DEFAULT_COMPILER ?= true
-
 # Do you want interpreter tests run?
 ART_TEST_INTERPRETER ?= $(ART_TEST_FULL)
 ART_TEST_INTERPRETER_ACCESS_CHECKS ?= $(ART_TEST_FULL)
@@ -58,7 +55,7 @@
 ART_TEST_JIT ?= $(ART_TEST_FULL)
 
 # Do you want optimizing compiler tests run?
-ART_TEST_OPTIMIZING ?= $(ART_TEST_FULL)
+ART_TEST_OPTIMIZING ?= true
 
 # Do we want to test a PIC-compiled core image?
 ART_TEST_PIC_IMAGE ?= $(ART_TEST_FULL)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 33242f1..426c3ca 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -247,12 +247,6 @@
   runtime/reflection_test.cc \
   compiler/compiled_method_test.cc \
   compiler/debug/dwarf/dwarf_test.cc \
-  compiler/dex/gvn_dead_code_elimination_test.cc \
-  compiler/dex/global_value_numbering_test.cc \
-  compiler/dex/local_value_numbering_test.cc \
-  compiler/dex/mir_graph_test.cc \
-  compiler/dex/mir_optimization_test.cc \
-  compiler/dex/type_inference_test.cc \
   compiler/driver/compiled_method_storage_test.cc \
   compiler/driver/compiler_driver_test.cc \
   compiler/elf_writer_test.cc \
@@ -284,7 +278,6 @@
   compiler/utils/test_dex_file_builder_test.cc \
 
 COMPILER_GTEST_COMMON_SRC_FILES_all := \
-  compiler/dex/quick/quick_cfi_test.cc \
   compiler/jni/jni_cfi_test.cc \
   compiler/optimizing/codegen_test.cc \
   compiler/optimizing/constant_folding_test.cc \
@@ -374,7 +367,6 @@
 
 COMPILER_GTEST_HOST_SRC_FILES_x86 := \
   $(COMPILER_GTEST_COMMON_SRC_FILES_x86) \
-  compiler/dex/quick/x86/quick_assemble_x86_test.cc \
   compiler/utils/x86/assembler_x86_test.cc \
 
 COMPILER_GTEST_HOST_SRC_FILES_x86_64 := \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 11ee6dd..f12f007 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -21,40 +21,12 @@
 LIBART_COMPILER_SRC_FILES := \
 	compiled_method.cc \
 	debug/elf_debug_writer.cc \
-	dex/global_value_numbering.cc \
-	dex/gvn_dead_code_elimination.cc \
-	dex/local_value_numbering.cc \
-	dex/type_inference.cc \
-	dex/quick/codegen_util.cc \
-	dex/quick/dex_file_method_inliner.cc \
-	dex/quick/dex_file_to_method_inliner_map.cc \
-	dex/quick/gen_common.cc \
-	dex/quick/gen_invoke.cc \
-	dex/quick/gen_loadstore.cc \
-	dex/quick/lazy_debug_frame_opcode_writer.cc \
-	dex/quick/local_optimizations.cc \
-	dex/quick/mir_to_lir.cc \
-	dex/quick/quick_compiler.cc \
-	dex/quick/ralloc_util.cc \
-	dex/quick/resource_mask.cc \
 	dex/dex_to_dex_compiler.cc \
-	dex/bb_optimizations.cc \
-	dex/compiler_ir.cc \
-	dex/mir_analysis.cc \
-	dex/mir_dataflow.cc \
-	dex/mir_field_info.cc \
-	dex/mir_graph.cc \
-	dex/mir_method_info.cc \
-	dex/mir_optimization.cc \
-	dex/post_opt_passes.cc \
-	dex/pass_driver_me_opts.cc \
-	dex/pass_driver_me_post_opt.cc \
-	dex/pass_manager.cc \
-	dex/ssa_transformation.cc \
 	dex/verified_method.cc \
 	dex/verification_results.cc \
-	dex/vreg_analysis.cc \
 	dex/quick_compiler_callbacks.cc \
+	dex/quick/dex_file_method_inliner.cc \
+	dex/quick/dex_file_to_method_inliner_map.cc \
 	driver/compiled_method_storage.cc \
 	driver/compiler_driver.cc \
 	driver/compiler_options.cc \
@@ -111,12 +83,6 @@
 	oat_writer.cc
 
 LIBART_COMPILER_SRC_FILES_arm := \
-	dex/quick/arm/assemble_arm.cc \
-	dex/quick/arm/call_arm.cc \
-	dex/quick/arm/fp_arm.cc \
-	dex/quick/arm/int_arm.cc \
-	dex/quick/arm/target_arm.cc \
-	dex/quick/arm/utility_arm.cc \
 	jni/quick/arm/calling_convention_arm.cc \
 	linker/arm/relative_patcher_arm_base.cc \
 	linker/arm/relative_patcher_thumb2.cc \
@@ -133,12 +99,6 @@
 # 32bit one.
 LIBART_COMPILER_SRC_FILES_arm64 := \
     $(LIBART_COMPILER_SRC_FILES_arm) \
-	dex/quick/arm64/assemble_arm64.cc \
-	dex/quick/arm64/call_arm64.cc \
-	dex/quick/arm64/fp_arm64.cc \
-	dex/quick/arm64/int_arm64.cc \
-	dex/quick/arm64/target_arm64.cc \
-	dex/quick/arm64/utility_arm64.cc \
 	jni/quick/arm64/calling_convention_arm64.cc \
 	linker/arm64/relative_patcher_arm64.cc \
 	optimizing/code_generator_arm64.cc \
@@ -150,12 +110,6 @@
 	utils/arm64/managed_register_arm64.cc \
 
 LIBART_COMPILER_SRC_FILES_mips := \
-	dex/quick/mips/assemble_mips.cc \
-	dex/quick/mips/call_mips.cc \
-	dex/quick/mips/fp_mips.cc \
-	dex/quick/mips/int_mips.cc \
-	dex/quick/mips/target_mips.cc \
-	dex/quick/mips/utility_mips.cc \
 	jni/quick/mips/calling_convention_mips.cc \
 	optimizing/code_generator_mips.cc \
 	optimizing/intrinsics_mips.cc \
@@ -172,12 +126,6 @@
 
 
 LIBART_COMPILER_SRC_FILES_x86 := \
-	dex/quick/x86/assemble_x86.cc \
-	dex/quick/x86/call_x86.cc \
-	dex/quick/x86/fp_x86.cc \
-	dex/quick/x86/int_x86.cc \
-	dex/quick/x86/target_x86.cc \
-	dex/quick/x86/utility_x86.cc \
 	jni/quick/x86/calling_convention_x86.cc \
 	linker/x86/relative_patcher_x86.cc \
 	linker/x86/relative_patcher_x86_base.cc \
@@ -200,26 +148,20 @@
 LIBART_COMPILER_CFLAGS :=
 
 LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
-  dex/quick/resource_mask.h \
   dex/compiler_enums.h \
   dex/dex_to_dex_compiler.h \
-  dex/global_value_numbering.h \
-  dex/pass_me.h \
   driver/compiler_driver.h \
   driver/compiler_options.h \
   image_writer.h \
   optimizing/locations.h
 
 LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm := \
-  dex/quick/arm/arm_lir.h \
   utils/arm/constants_arm.h
 
 LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm64 := \
-  $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm) \
-  dex/quick/arm64/arm64_lir.h
+  $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm)
 
 LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips := \
-  dex/quick/mips/mips_lir.h \
   utils/mips/assembler_mips.h
 
 LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips64 := \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 6075cd6..6483ef6 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -21,7 +21,6 @@
 #include "art_method.h"
 #include "class_linker.h"
 #include "compiled_method.h"
-#include "dex/pass_manager.h"
 #include "dex/quick_compiler_callbacks.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
 #include "dex/verification_results.h"
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index 223affa..1626317 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -17,7 +17,6 @@
 #include "compiler.h"
 
 #include "base/logging.h"
-#include "dex/quick/quick_compiler_factory.h"
 #include "driver/compiler_driver.h"
 #include "optimizing/optimizing_compiler.h"
 #include "utils.h"
@@ -27,8 +26,7 @@
 Compiler* Compiler::Create(CompilerDriver* driver, Compiler::Kind kind) {
   switch (kind) {
     case kQuick:
-      return CreateQuickCompiler(driver);
-
+      // TODO: Remove Quick in options.
     case kOptimizing:
       return CreateOptimizingCompiler(driver);
 
diff --git a/compiler/dex/bb_optimizations.cc b/compiler/dex/bb_optimizations.cc
deleted file mode 100644
index 11a7e44..0000000
--- a/compiler/dex/bb_optimizations.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "bb_optimizations.h"
-#include "dataflow_iterator.h"
-#include "dataflow_iterator-inl.h"
-
-namespace art {
-
-/*
- * Code Layout pass implementation start.
- */
-bool CodeLayout::Worker(PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-  DCHECK(c_unit != nullptr);
-  BasicBlock* bb = pass_me_data_holder->bb;
-  DCHECK(bb != nullptr);
-  c_unit->mir_graph->LayoutBlocks(bb);
-  // No need of repeating, so just return false.
-  return false;
-}
-
-/*
- * BasicBlock Combine pass implementation start.
- */
-bool BBCombine::Worker(PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-  DCHECK(c_unit != nullptr);
-  BasicBlock* bb = pass_me_data_holder->bb;
-  DCHECK(bb != nullptr);
-  c_unit->mir_graph->CombineBlocks(bb);
-
-  // No need of repeating, so just return false.
-  return false;
-}
-
-/*
- * MethodUseCount pass implementation start.
- */
-bool MethodUseCount::Gate(const PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-  DCHECK(c_unit != nullptr);
-  // First initialize the data.
-  c_unit->mir_graph->InitializeMethodUses();
-
-  // Now check if the pass is to be ignored.
-  bool res = ((c_unit->disable_opt & (1 << kPromoteRegs)) == 0);
-
-  return res;
-}
-
-bool MethodUseCount::Worker(PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-  DCHECK(c_unit != nullptr);
-  BasicBlock* bb = pass_me_data_holder->bb;
-  DCHECK(bb != nullptr);
-  c_unit->mir_graph->CountUses(bb);
-  // No need of repeating, so just return false.
-  return false;
-}
-
-}  // namespace art
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
deleted file mode 100644
index 02d5327..0000000
--- a/compiler/dex/bb_optimizations.h
+++ /dev/null
@@ -1,452 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
-#define ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
-
-#include "base/casts.h"
-#include "compiler_ir.h"
-#include "dex_flags.h"
-#include "pass_me.h"
-#include "mir_graph.h"
-
-namespace art {
-
-/**
- * @class String Change
- * @brief Converts calls to String.<init> to StringFactory instead.
- */
-class StringChange : public PassME {
- public:
-  StringChange() : PassME("StringChange", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->StringChange();
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->HasInvokes();
-  }
-};
-
-/**
- * @class CacheFieldLoweringInfo
- * @brief Cache the lowering info for fields used by IGET/IPUT/SGET/SPUT insns.
- */
-class CacheFieldLoweringInfo : public PassME {
- public:
-  CacheFieldLoweringInfo() : PassME("CacheFieldLoweringInfo", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->DoCacheFieldLoweringInfo();
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->HasFieldAccess();
-  }
-};
-
-/**
- * @class CacheMethodLoweringInfo
- * @brief Cache the lowering info for methods called by INVOKEs.
- */
-class CacheMethodLoweringInfo : public PassME {
- public:
-  CacheMethodLoweringInfo() : PassME("CacheMethodLoweringInfo", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->DoCacheMethodLoweringInfo();
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->HasInvokes();
-  }
-};
-
-/**
- * @class SpecialMethodInliner
- * @brief Performs method inlining pass on special kinds of methods.
- * @details Special methods are methods that fall in one of the following categories:
- * empty, instance getter, instance setter, argument return, and constant return.
- */
-class SpecialMethodInliner : public PassME {
- public:
-  SpecialMethodInliner() : PassME("SpecialMethodInliner") {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->InlineSpecialMethodsGate();
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->InlineSpecialMethodsStart();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    c_unit->mir_graph->InlineSpecialMethods(bb);
-    // No need of repeating, so just return false.
-    return false;
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->InlineSpecialMethodsEnd();
-  }
-};
-
-/**
- * @class CodeLayout
- * @brief Perform the code layout pass.
- */
-class CodeLayout : public PassME {
- public:
-  CodeLayout() : PassME("CodeLayout", kAllNodes, kOptimizationBasicBlockChange, "2_post_layout_cfg") {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->VerifyDataflow();
-    c_unit->mir_graph->ClearAllVisitedFlags();
-  }
-
-  bool Worker(PassDataHolder* data) const;
-};
-
-/**
- * @class NullCheckElimination
- * @brief Null check elimination pass.
- */
-class NullCheckElimination : public PassME {
- public:
-  NullCheckElimination()
-    : PassME("NCE", kRepeatingPreOrderDFSTraversal, "3_post_nce_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->EliminateNullChecksGate();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->EliminateNullChecks(bb);
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->EliminateNullChecksEnd();
-  }
-};
-
-class ClassInitCheckElimination : public PassME {
- public:
-  ClassInitCheckElimination()
-    : PassME("ClInitCheckElimination", kRepeatingPreOrderDFSTraversal) {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->EliminateClassInitChecksGate();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->EliminateClassInitChecks(bb);
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->EliminateClassInitChecksEnd();
-  }
-};
-
-/**
- * @class GlobalValueNumberingPass
- * @brief Performs the global value numbering pass.
- */
-class GlobalValueNumberingPass : public PassME {
- public:
-  GlobalValueNumberingPass()
-    : PassME("GVN", kLoopRepeatingTopologicalSortTraversal, "4_post_gvn_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->ApplyGlobalValueNumberingGate();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->ApplyGlobalValueNumbering(bb);
-  }
-
-  void End(PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->ApplyGlobalValueNumberingEnd();
-  }
-};
-
-/**
- * @class DeadCodeEliminationPass
- * @brief Performs the GVN-based dead code elimination pass.
- */
-class DeadCodeEliminationPass : public PassME {
- public:
-  DeadCodeEliminationPass() : PassME("DCE", kPreOrderDFSTraversal, "4_post_dce_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->EliminateDeadCodeGate();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->EliminateDeadCode(bb);
-  }
-
-  void End(PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->EliminateDeadCodeEnd();
-  }
-};
-
-/**
- * @class GlobalValueNumberingCleanupPass
- * @brief Performs the cleanup after global value numbering pass and the dependent
- *        dead code elimination pass that needs the GVN data.
- */
-class GlobalValueNumberingCleanupPass : public PassME {
- public:
-  GlobalValueNumberingCleanupPass()
-    : PassME("GVNCleanup", kNoNodes, "") {
-  }
-
-  void Start(PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->GlobalValueNumberingCleanup();
-  }
-};
-
-/**
- * @class BBCombine
- * @brief Perform the basic block combination pass.
- */
-class BBCombine : public PassME {
- public:
-  BBCombine() : PassME("BBCombine", kPreOrderDFSTraversal, "5_post_bbcombine_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->HasTryCatchBlocks() ||
-        ((c_unit->disable_opt & (1 << kSuppressExceptionEdges)) != 0);
-  }
-
-  bool Worker(PassDataHolder* data) const;
-};
-
-/**
- * @class ConstantPropagation
- * @brief Perform a constant propagation pass.
- */
-class ConstantPropagation : public PassME {
- public:
-  ConstantPropagation() : PassME("ConstantPropagation") {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->InitializeConstantPropagation();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = down_cast<PassMEDataHolder*>(data)->bb;
-    DCHECK(bb != nullptr);
-    c_unit->mir_graph->DoConstantPropagation(bb);
-    // No need of repeating, so just return false.
-    return false;
-  }
-};
-
-/**
- * @class MethodUseCount
- * @brief Count the register uses of the method
- */
-class MethodUseCount : public PassME {
- public:
-  MethodUseCount() : PassME("UseCount") {
-  }
-
-  bool Worker(PassDataHolder* data) const;
-
-  bool Gate(const PassDataHolder* data) const;
-};
-
-/**
- * @class BasicBlock Optimizations
- * @brief Any simple BasicBlock optimization can be put here.
- */
-class BBOptimizations : public PassME {
- public:
-  BBOptimizations()
-      : PassME("BBOptimizations", kNoNodes, kOptimizationBasicBlockChange, "5_post_bbo_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return ((c_unit->disable_opt & (1 << kBBOpt)) == 0);
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->BasicBlockOptimizationStart();
-
-    /*
-     * This pass has a different ordering depending on the suppress exception,
-     * so do the pass here for now:
-     *   - Later, the Start should just change the ordering and we can move the extended
-     *     creation into the pass driver's main job with a new iterator
-     */
-    c_unit->mir_graph->BasicBlockOptimization();
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->BasicBlockOptimizationEnd();
-    down_cast<PassMEDataHolder*>(data)->dirty = !c_unit->mir_graph->DfsOrdersUpToDate();
-  }
-};
-
-/**
- * @class SuspendCheckElimination
- * @brief Any simple BasicBlock optimization can be put here.
- */
-class SuspendCheckElimination : public PassME {
- public:
-  SuspendCheckElimination()
-    : PassME("SuspendCheckElimination", kTopologicalSortTraversal, "6_post_sce_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->EliminateSuspendChecksGate();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->EliminateSuspendChecks(bb);
-  }
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
diff --git a/compiler/dex/compiler_ir.cc b/compiler/dex/compiler_ir.cc
deleted file mode 100644
index 6e1853b..0000000
--- a/compiler/dex/compiler_ir.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler_ir.h"
-
-#include "arch/instruction_set_features.h"
-#include "base/dumpable.h"
-#include "dex_flags.h"
-#include "dex/quick/mir_to_lir.h"
-#include "driver/compiler_driver.h"
-#include "mir_graph.h"
-#include "utils.h"
-
-namespace art {
-
-CompilationUnit::CompilationUnit(ArenaPool* pool, InstructionSet isa, CompilerDriver* driver,
-                                 ClassLinker* linker)
-  : compiler_driver(driver),
-    class_linker(linker),
-    dex_file(nullptr),
-    class_loader(nullptr),
-    class_def_idx(0),
-    method_idx(0),
-    access_flags(0),
-    invoke_type(kDirect),
-    shorty(nullptr),
-    disable_opt(0),
-    enable_debug(0),
-    verbose(false),
-    instruction_set(isa),
-    target64(Is64BitInstructionSet(isa)),
-    arena(pool),
-    arena_stack(pool),
-    mir_graph(nullptr),
-    cg(nullptr),
-    timings("QuickCompiler", true, false),
-    print_pass(false) {
-}
-
-CompilationUnit::~CompilationUnit() {
-  overridden_pass_options.clear();
-}
-
-void CompilationUnit::StartTimingSplit(const char* label) {
-  if (compiler_driver->GetDumpPasses()) {
-    timings.StartTiming(label);
-  }
-}
-
-void CompilationUnit::NewTimingSplit(const char* label) {
-  if (compiler_driver->GetDumpPasses()) {
-    timings.EndTiming();
-    timings.StartTiming(label);
-  }
-}
-
-void CompilationUnit::EndTiming() {
-  if (compiler_driver->GetDumpPasses()) {
-    timings.EndTiming();
-    if (enable_debug & (1 << kDebugTimings)) {
-      LOG(INFO) << "TIMINGS " << PrettyMethod(method_idx, *dex_file);
-      LOG(INFO) << Dumpable<TimingLogger>(timings);
-    }
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
deleted file mode 100644
index 5203355..0000000
--- a/compiler/dex/compiler_ir.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_COMPILER_IR_H_
-#define ART_COMPILER_DEX_COMPILER_IR_H_
-
-#include "jni.h"
-#include <string>
-#include <vector>
-
-#include "arch/instruction_set.h"
-#include "base/arena_allocator.h"
-#include "base/scoped_arena_allocator.h"
-#include "base/timing_logger.h"
-#include "invoke_type.h"
-#include "safe_map.h"
-
-namespace art {
-
-class ClassLinker;
-class CompilerDriver;
-class DexFile;
-class Mir2Lir;
-class MIRGraph;
-
-constexpr size_t kOptionStringMaxLength = 2048;
-
-/**
- * Structure abstracting pass option values, which can be of type string or integer.
- */
-struct OptionContent {
-  OptionContent(const OptionContent& option) :
-    type(option.type), container(option.container, option.type) {}
-
-  explicit OptionContent(const char* value) :
-    type(kString), container(value) {}
-
-  explicit OptionContent(int value) :
-    type(kInteger), container(value) {}
-
-  explicit OptionContent(int64_t value) :
-    type(kInteger), container(value) {}
-
-  ~OptionContent() {
-    if (type == kString) {
-      container.StringDelete();
-    }
-  }
-
-  /**
-   * Allows for a transparent display of the option content.
-   */
-  friend std::ostream& operator<<(std::ostream& out, const OptionContent& option) {
-    if (option.type == kString) {
-      out << option.container.s;
-    } else {
-      out << option.container.i;
-    }
-
-    return out;
-  }
-
-  inline const char* GetString() const {
-    return container.s;
-  }
-
-  inline int64_t GetInteger() const {
-    return container.i;
-  }
-
-  /**
-   * @brief Used to compare a string option value to a given @p value.
-   * @details Will return whether the internal string option is equal to
-   * the parameter @p value. It will return false if the type of the
-   * object is not a string.
-   * @param value The string to compare to.
-   * @return Returns whether the internal string option is equal to the
-   * parameter @p value.
-  */
-  inline bool Equals(const char* value) const {
-    DCHECK(value != nullptr);
-    if (type != kString) {
-      return false;
-    }
-    return !strncmp(container.s, value, kOptionStringMaxLength);
-  }
-
-  /**
-   * @brief Used to compare an integer option value to a given @p value.
-   * @details Will return whether the internal integer option is equal to
-   * the parameter @p value. It will return false if the type of the
-   * object is not an integer.
-   * @param value The integer to compare to.
-   * @return Returns whether the internal integer option is equal to the
-   * parameter @p value.
-  */
-  inline bool Equals(int64_t value) const {
-    if (type != kInteger) {
-      return false;
-    }
-    return container.i == value;
-  }
-
-  /**
-   * Describes the type of parameters allowed as option values.
-   */
-  enum OptionType {
-    kString = 0,
-    kInteger
-  };
-
-  OptionType type;
-
- private:
-  /**
-   * Union containing the option value of either type.
-   */
-  union OptionContainer {
-    OptionContainer(const OptionContainer& c, OptionType t) {
-      if (t == kString) {
-        DCHECK(c.s != nullptr);
-        s = strndup(c.s, kOptionStringMaxLength);
-      } else {
-        i = c.i;
-      }
-    }
-
-    explicit OptionContainer(const char* value) {
-      DCHECK(value != nullptr);
-      s = strndup(value, kOptionStringMaxLength);
-    }
-
-    explicit OptionContainer(int64_t value) : i(value) {}
-    ~OptionContainer() {}
-
-    void StringDelete() {
-      if (s != nullptr) {
-        free(s);
-      }
-    }
-
-    char* s;
-    int64_t i;
-  };
-
-  OptionContainer container;
-};
-
-struct CompilationUnit {
-  CompilationUnit(ArenaPool* pool, InstructionSet isa, CompilerDriver* driver, ClassLinker* linker);
-  ~CompilationUnit();
-
-  void StartTimingSplit(const char* label);
-  void NewTimingSplit(const char* label);
-  void EndTiming();
-
-  /*
-   * Fields needed/generated by common frontend and generally used throughout
-   * the compiler.
-  */
-  CompilerDriver* const compiler_driver;
-  ClassLinker* const class_linker;        // Linker to resolve fields and methods.
-  const DexFile* dex_file;                // DexFile containing the method being compiled.
-  jobject class_loader;                   // compiling method's class loader.
-  uint16_t class_def_idx;                 // compiling method's defining class definition index.
-  uint32_t method_idx;                    // compiling method's index into method_ids of DexFile.
-  uint32_t access_flags;                  // compiling method's access flags.
-  InvokeType invoke_type;                 // compiling method's invocation type.
-  const char* shorty;                     // compiling method's shorty.
-  uint32_t disable_opt;                   // opt_control_vector flags.
-  uint32_t enable_debug;                  // debugControlVector flags.
-  bool verbose;
-  const InstructionSet instruction_set;
-  const bool target64;
-
-  // TODO: move memory management to mir_graph, or just switch to using standard containers.
-  ArenaAllocator arena;
-  ArenaStack arena_stack;  // Arenas for ScopedArenaAllocator.
-
-  std::unique_ptr<MIRGraph> mir_graph;   // MIR container.
-  std::unique_ptr<Mir2Lir> cg;           // Target-specific codegen.
-  TimingLogger timings;
-  bool print_pass;                 // Do we want to print a pass or not?
-
-  /**
-   * @brief Holds pass options for current pass being applied to compilation unit.
-   * @details This is updated for every pass to contain the overridden pass options
-   * that were specified by user. The pass itself will check this to see if the
-   * default settings have been changed. The key is simply the option string without
-   * the pass name.
-   */
-  SafeMap<const std::string, const OptionContent> overridden_pass_options;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_COMPILER_IR_H_
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
deleted file mode 100644
index e9402e3..0000000
--- a/compiler/dex/dataflow_iterator-inl.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
-#define ART_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
-
-#include "dataflow_iterator.h"
-
-namespace art {
-
-// Single forward pass over the nodes.
-inline BasicBlock* DataflowIterator::ForwardSingleNext() {
-  BasicBlock* res = nullptr;
-
-  // Are we not yet at the end?
-  if (idx_ < end_idx_) {
-    // Get the next index.
-    BasicBlockId bb_id = (*block_id_list_)[idx_];
-    res = mir_graph_->GetBasicBlock(bb_id);
-    idx_++;
-  }
-
-  return res;
-}
-
-// Repeat full forward passes over all nodes until no change occurs during a complete pass.
-inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
-  BasicBlock* res = nullptr;
-
-  // Are we at the end and have we changed something?
-  if ((idx_ >= end_idx_) && changed_ == true) {
-    // Reset the index.
-    idx_ = start_idx_;
-    repeats_++;
-    changed_ = false;
-  }
-
-  // Are we not yet at the end?
-  if (idx_ < end_idx_) {
-    // Get the BasicBlockId.
-    BasicBlockId bb_id = (*block_id_list_)[idx_];
-    res = mir_graph_->GetBasicBlock(bb_id);
-    idx_++;
-  }
-
-  return res;
-}
-
-// Single reverse pass over the nodes.
-inline BasicBlock* DataflowIterator::ReverseSingleNext() {
-  BasicBlock* res = nullptr;
-
-  // Are we not yet at the end?
-  if (idx_ >= 0) {
-    // Get the BasicBlockId.
-    BasicBlockId bb_id = (*block_id_list_)[idx_];
-    res = mir_graph_->GetBasicBlock(bb_id);
-    idx_--;
-  }
-
-  return res;
-}
-
-// Repeat full backwards passes over all nodes until no change occurs during a complete pass.
-inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
-  BasicBlock* res = nullptr;
-
-  // Are we done and we changed something during the last iteration?
-  if ((idx_ < 0) && changed_) {
-    // Reset the index.
-    idx_ = start_idx_;
-    repeats_++;
-    changed_ = false;
-  }
-
-  // Are we not yet done?
-  if (idx_ >= 0) {
-    // Get the BasicBlockId.
-    BasicBlockId bb_id = (*block_id_list_)[idx_];
-    res = mir_graph_->GetBasicBlock(bb_id);
-    idx_--;
-  }
-
-  return res;
-}
-
-// AllNodes uses the existing block list, and should be considered unordered.
-inline BasicBlock* AllNodesIterator::Next(bool had_change) {
-  // Update changed: if had_changed is true, we remember it for the whole iteration.
-  changed_ |= had_change;
-
-  BasicBlock* res = nullptr;
-  while (idx_ != end_idx_) {
-    BasicBlock* bb = mir_graph_->GetBlockList()[idx_++];
-    DCHECK(bb != nullptr);
-    if (!bb->hidden) {
-      res = bb;
-      break;
-    }
-  }
-
-  return res;
-}
-
-inline BasicBlock* TopologicalSortIterator::Next(bool had_change) {
-  // Update changed: if had_changed is true, we remember it for the whole iteration.
-  changed_ |= had_change;
-
-  while (loop_head_stack_->size() != 0u &&
-      (*loop_ends_)[loop_head_stack_->back().first] == idx_) {
-    loop_head_stack_->pop_back();
-  }
-
-  if (idx_ == end_idx_) {
-    return nullptr;
-  }
-
-  // Get next block and return it.
-  BasicBlockId idx = idx_;
-  idx_ += 1;
-  BasicBlock* bb = mir_graph_->GetBasicBlock((*block_id_list_)[idx]);
-  DCHECK(bb != nullptr);
-  if ((*loop_ends_)[idx] != 0u) {
-    loop_head_stack_->push_back(std::make_pair(idx, false));  // Not recalculating.
-  }
-  return bb;
-}
-
-inline BasicBlock* LoopRepeatingTopologicalSortIterator::Next(bool had_change) {
-  if (idx_ != 0) {
-    // Mark last processed block visited.
-    BasicBlock* bb = mir_graph_->GetBasicBlock((*block_id_list_)[idx_ - 1]);
-    bb->visited = true;
-    if (had_change) {
-      // If we had a change we need to revisit the children.
-      ChildBlockIterator iter(bb, mir_graph_);
-      for (BasicBlock* child_bb = iter.Next(); child_bb != nullptr; child_bb = iter.Next()) {
-        child_bb->visited = false;
-      }
-    }
-  }
-
-  while (true) {
-    // Pop loops we have left and check if we need to recalculate one of them.
-    // NOTE: We need to do this even if idx_ == end_idx_.
-    while (loop_head_stack_->size() != 0u &&
-        (*loop_ends_)[loop_head_stack_->back().first] == idx_) {
-      auto top = loop_head_stack_->back();
-      uint16_t loop_head_idx = top.first;
-      bool recalculated = top.second;
-      loop_head_stack_->pop_back();
-      BasicBlock* loop_head = mir_graph_->GetBasicBlock((*block_id_list_)[loop_head_idx]);
-      DCHECK(loop_head != nullptr);
-      if (!recalculated || !loop_head->visited) {
-        // Recalculating this loop.
-        loop_head_stack_->push_back(std::make_pair(loop_head_idx, true));
-        idx_ = loop_head_idx + 1;
-        return loop_head;
-      }
-    }
-
-    if (idx_ == end_idx_) {
-      return nullptr;
-    }
-
-    // Get next block and return it if unvisited.
-    BasicBlockId idx = idx_;
-    idx_ += 1;
-    BasicBlock* bb = mir_graph_->GetBasicBlock((*block_id_list_)[idx]);
-    DCHECK(bb != nullptr);
-    if ((*loop_ends_)[idx] != 0u) {
-      // If bb->visited is false, the loop needs to be processed from scratch.
-      // Otherwise we mark it as recalculating; for a natural loop we will not
-      // need to recalculate any block in the loop anyway, and for unnatural
-      // loops we will recalculate the loop head only if one of its predecessors
-      // actually changes.
-      bool recalculating = bb->visited;
-      loop_head_stack_->push_back(std::make_pair(idx, recalculating));
-    }
-    if (!bb->visited) {
-      return bb;
-    }
-  }
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
deleted file mode 100644
index 097c2a4..0000000
--- a/compiler/dex/dataflow_iterator.h
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_DATAFLOW_ITERATOR_H_
-#define ART_COMPILER_DEX_DATAFLOW_ITERATOR_H_
-
-#include "base/logging.h"
-#include "mir_graph.h"
-
-namespace art {
-
-  /*
-   * This class supports iterating over lists of basic blocks in various
-   * interesting orders.  Note that for efficiency, the visit orders have been pre-computed.
-   * The order itself will not change during the iteration.  However, for some uses,
-   * auxiliary data associated with the basic blocks may be changed during the iteration,
-   * necessitating another pass over the list.  If this behavior is required, use the
-   * "Repeating" variant.  For the repeating variant, the caller must tell the iterator
-   * whether a change has been made that necessitates another pass.  Note that calling Next(true)
-   * does not affect the iteration order or short-circuit the current pass - it simply tells
-   * the iterator that once it has finished walking through the block list it should reset and
-   * do another full pass through the list.
-   */
-  /**
-   * @class DataflowIterator
-   * @brief The main iterator class, all other iterators derive of this one to define an iteration order.
-   */
-  class DataflowIterator {
-    public:
-      virtual ~DataflowIterator() {}
-
-      /**
-       * @brief How many times have we repeated the iterator across the BasicBlocks?
-       * @return the number of iteration repetitions.
-       */
-      int32_t GetRepeatCount() { return repeats_; }
-
-      /**
-       * @brief Has the user of the iterator reported a change yet?
-       * @details Does not mean there was or not a change, it is only whether the user passed a true to the Next function call.
-       * @return whether the user of the iterator reported a change yet.
-       */
-      int32_t GetChanged() { return changed_; }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) = 0;
-
-    protected:
-      /**
-       * @param mir_graph the MIRGraph we are interested in.
-       * @param start_idx the first index we want to iterate across.
-       * @param end_idx the last index we want to iterate (not included).
-       */
-      DataflowIterator(MIRGraph* mir_graph, int32_t start_idx, int32_t end_idx)
-          : mir_graph_(mir_graph),
-            start_idx_(start_idx),
-            end_idx_(end_idx),
-            block_id_list_(nullptr),
-            idx_(0),
-            repeats_(0),
-            changed_(false) {}
-
-      /**
-       * @brief Get the next BasicBlock iterating forward.
-       * @return the next BasicBlock iterating forward.
-       */
-      virtual BasicBlock* ForwardSingleNext() ALWAYS_INLINE;
-
-      /**
-       * @brief Get the next BasicBlock iterating backward.
-       * @return the next BasicBlock iterating backward.
-       */
-      virtual BasicBlock* ReverseSingleNext() ALWAYS_INLINE;
-
-      /**
-       * @brief Get the next BasicBlock iterating forward, restart if a BasicBlock was reported changed during the last iteration.
-       * @return the next BasicBlock iterating forward, with chance of repeating the iteration.
-       */
-      virtual BasicBlock* ForwardRepeatNext() ALWAYS_INLINE;
-
-      /**
-       * @brief Get the next BasicBlock iterating backward, restart if a BasicBlock was reported changed during the last iteration.
-       * @return the next BasicBlock iterating backward, with chance of repeating the iteration.
-       */
-      virtual BasicBlock* ReverseRepeatNext() ALWAYS_INLINE;
-
-      MIRGraph* const mir_graph_;                       /**< @brief the MIRGraph */
-      const int32_t start_idx_;                         /**< @brief the start index for the iteration */
-      const int32_t end_idx_;                           /**< @brief the last index for the iteration */
-      const ArenaVector<BasicBlockId>* block_id_list_;  /**< @brief the list of BasicBlocks we want to iterate on */
-      int32_t idx_;                                     /**< @brief Current index for the iterator */
-      int32_t repeats_;                                 /**< @brief Number of repeats over the iteration */
-      bool changed_;                                    /**< @brief Has something changed during the current iteration? */
-  };  // DataflowIterator
-
-  /**
-   * @class PreOrderDfsIterator
-   * @brief Used to perform a Pre-order Depth-First-Search Iteration of a MIRGraph.
-   */
-  class PreOrderDfsIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit PreOrderDfsIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
-        // Extra setup for the PreOrderDfsIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDfsOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ForwardSingleNext();
-      }
-  };
-
-  /**
-   * @class RepeatingPreOrderDfsIterator
-   * @brief Used to perform a Repeating Pre-order Depth-First-Search Iteration of a MIRGraph.
-   * @details If there is a change during an iteration, the iteration starts over at the end of the iteration.
-   */
-  class RepeatingPreOrderDfsIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit RepeatingPreOrderDfsIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
-        // Extra setup for the RepeatingPreOrderDfsIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDfsOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ForwardRepeatNext();
-      }
-  };
-
-  /**
-   * @class RepeatingPostOrderDfsIterator
-   * @brief Used to perform a Repeating Post-order Depth-First-Search Iteration of a MIRGraph.
-   * @details If there is a change during an iteration, the iteration starts over at the end of the iteration.
-   */
-  class RepeatingPostOrderDfsIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit RepeatingPostOrderDfsIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
-        // Extra setup for the RepeatingPostOrderDfsIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDfsPostOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ForwardRepeatNext();
-      }
-  };
-
-  /**
-   * @class ReversePostOrderDfsIterator
-   * @brief Used to perform a Reverse Post-order Depth-First-Search Iteration of a MIRGraph.
-   */
-  class ReversePostOrderDfsIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit ReversePostOrderDfsIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
-        // Extra setup for the ReversePostOrderDfsIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDfsPostOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ReverseSingleNext();
-      }
-  };
-
-  /**
-   * @class ReversePostOrderDfsIterator
-   * @brief Used to perform a Repeating Reverse Post-order Depth-First-Search Iteration of a MIRGraph.
-   * @details If there is a change during an iteration, the iteration starts over at the end of the iteration.
-   */
-  class RepeatingReversePostOrderDfsIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit RepeatingReversePostOrderDfsIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
-        // Extra setup for the RepeatingReversePostOrderDfsIterator
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDfsPostOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ReverseRepeatNext();
-      }
-  };
-
-  /**
-   * @class PostOrderDOMIterator
-   * @brief Used to perform a Post-order Domination Iteration of a MIRGraph.
-   */
-  class PostOrderDOMIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit PostOrderDOMIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
-        // Extra setup for thePostOrderDOMIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDomPostOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ForwardSingleNext();
-      }
-  };
-
-  /**
-   * @class AllNodesIterator
-   * @brief Used to perform an iteration on all the BasicBlocks a MIRGraph.
-   */
-  class AllNodesIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit AllNodesIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetBlockList().size()) {
-      }
-
-      /**
-       * @brief Resetting the iterator.
-       */
-      void Reset() {
-        idx_ = 0;
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) ALWAYS_INLINE;
-  };
-
-  /**
-   * @class TopologicalSortIterator
-   * @brief Used to perform a Topological Sort Iteration of a MIRGraph.
-   */
-  class TopologicalSortIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit TopologicalSortIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder().size()),
-            loop_ends_(&mir_graph->GetTopologicalSortOrderLoopEnds()),
-            loop_head_stack_(mir_graph_->GetTopologicalSortOrderLoopHeadStack()) {
-        // Extra setup for TopologicalSortIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetTopologicalSortOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) OVERRIDE;
-
-    private:
-     const ArenaVector<BasicBlockId>* const loop_ends_;
-     ArenaVector<std::pair<uint16_t, bool>>* const loop_head_stack_;
-  };
-
-  /**
-   * @class LoopRepeatingTopologicalSortIterator
-   * @brief Used to perform a Topological Sort Iteration of a MIRGraph, repeating loops as needed.
-   * @details The iterator uses the visited flags to keep track of the blocks that need
-   * recalculation and keeps a stack of loop heads in the MIRGraph. At the end of the loop
-   * it returns back to the loop head if it needs to be recalculated. Due to the use of
-   * the visited flags and the loop head stack in the MIRGraph, it's not possible to use
-   * two iterators at the same time or modify this data during iteration (though inspection
-   * of this data is allowed and sometimes even expected).
-   *
-   * NOTE: This iterator is not suitable for passes that need to propagate changes to
-   * predecessors, such as type inferrence.
-   */
-  class LoopRepeatingTopologicalSortIterator : public DataflowIterator {
-    public:
-     /**
-      * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-      * @param mir_graph The MIRGraph considered.
-      */
-     explicit LoopRepeatingTopologicalSortIterator(MIRGraph* mir_graph)
-         : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder().size()),
-           loop_ends_(&mir_graph->GetTopologicalSortOrderLoopEnds()),
-           loop_head_stack_(mir_graph_->GetTopologicalSortOrderLoopHeadStack()) {
-       // Extra setup for RepeatingTopologicalSortIterator.
-       idx_ = start_idx_;
-       block_id_list_ = &mir_graph->GetTopologicalSortOrder();
-       // Clear visited flags and check that the loop head stack is empty.
-       mir_graph->ClearAllVisitedFlags();
-       DCHECK_EQ(loop_head_stack_->size(), 0u);
-     }
-
-     ~LoopRepeatingTopologicalSortIterator() {
-       DCHECK_EQ(loop_head_stack_->size(), 0u);
-     }
-
-     /**
-      * @brief Get the next BasicBlock depending on iteration order.
-      * @param had_change did the user of the iteration change the previous BasicBlock.
-      * @return the next BasicBlock following the iteration order, 0 if finished.
-      */
-     virtual BasicBlock* Next(bool had_change = false) OVERRIDE;
-
-    private:
-     const ArenaVector<BasicBlockId>* const loop_ends_;
-     ArenaVector<std::pair<uint16_t, bool>>* const loop_head_stack_;
-  };
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_DATAFLOW_ITERATOR_H_
diff --git a/compiler/dex/dex_flags.h b/compiler/dex/dex_flags.h
deleted file mode 100644
index e8eb40c..0000000
--- a/compiler/dex/dex_flags.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_DEX_FLAGS_H_
-#define ART_COMPILER_DEX_DEX_FLAGS_H_
-
-namespace art {
-
-// Suppress optimization if corresponding bit set.
-enum OptControlVector {
-  kLoadStoreElimination = 0,
-  kLoadHoisting,
-  kSuppressLoads,
-  kNullCheckElimination,
-  kClassInitCheckElimination,
-  kGlobalValueNumbering,
-  kGvnDeadCodeElimination,
-  kLocalValueNumbering,
-  kPromoteRegs,
-  kTrackLiveTemps,
-  kSafeOptimizations,
-  kBBOpt,
-  kSuspendCheckElimination,
-  kMatch,
-  kPromoteCompilerTemps,
-  kBranchFusing,
-  kSuppressExceptionEdges,
-  kSuppressMethodInlining,
-};
-
-// Force code generation paths for testing.
-enum DebugControlVector {
-  kDebugVerbose,
-  kDebugDumpCFG,
-  kDebugSlowFieldPath,
-  kDebugSlowInvokePath,
-  kDebugSlowStringPath,
-  kDebugSlowTypePath,
-  kDebugSlowestFieldPath,
-  kDebugSlowestStringPath,
-  kDebugExerciseResolveMethod,
-  kDebugVerifyDataflow,
-  kDebugShowMemoryUsage,
-  kDebugShowNops,
-  kDebugCountOpcodes,
-  kDebugDumpCheckStats,
-  kDebugShowSummaryMemoryUsage,
-  kDebugShowFilterStats,
-  kDebugTimings,
-  kDebugCodegenDump
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_DEX_FLAGS_H_
diff --git a/compiler/dex/dex_types.h b/compiler/dex/dex_types.h
deleted file mode 100644
index f485c1c..0000000
--- a/compiler/dex/dex_types.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_DEX_TYPES_H_
-#define ART_COMPILER_DEX_DEX_TYPES_H_
-
-namespace art {
-
-typedef uint32_t DexOffset;          // Dex offset in code units.
-typedef uint16_t NarrowDexOffset;    // For use in structs, Dex offsets range from 0 .. 0xffff.
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_DEX_TYPES_H_
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
deleted file mode 100644
index 94ba4fa..0000000
--- a/compiler/dex/global_value_numbering.cc
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "global_value_numbering.h"
-
-#include "base/bit_vector-inl.h"
-#include "base/stl_util.h"
-#include "local_value_numbering.h"
-
-namespace art {
-
-GlobalValueNumbering::GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator,
-                                           Mode mode)
-    : cu_(cu),
-      mir_graph_(cu->mir_graph.get()),
-      allocator_(allocator),
-      bbs_processed_(0u),
-      max_bbs_to_process_(kMaxBbsToProcessMultiplyFactor * mir_graph_->GetNumReachableBlocks()),
-      last_value_(kNullValue),
-      modifications_allowed_(true),
-      mode_(mode),
-      global_value_map_(std::less<uint64_t>(), allocator->Adapter()),
-      array_location_map_(ArrayLocationComparator(), allocator->Adapter()),
-      array_location_reverse_map_(allocator->Adapter()),
-      ref_set_map_(std::less<ValueNameSet>(), allocator->Adapter()),
-      lvns_(mir_graph_->GetNumBlocks(), nullptr, allocator->Adapter()),
-      work_lvn_(nullptr),
-      merge_lvns_(allocator->Adapter()) {
-}
-
-GlobalValueNumbering::~GlobalValueNumbering() {
-  STLDeleteElements(&lvns_);
-}
-
-LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
-                                                             ScopedArenaAllocator* allocator) {
-  if (UNLIKELY(!Good())) {
-    return nullptr;
-  }
-  if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock) {
-    DCHECK(bb->first_mir_insn == nullptr);
-    return nullptr;
-  }
-  if (mode_ == kModeGvn && UNLIKELY(bbs_processed_ == max_bbs_to_process_)) {
-    // If we're still trying to converge, stop now. Otherwise, proceed to apply optimizations.
-    last_value_ = kNoValue;  // Make bad.
-    return nullptr;
-  }
-  if (mode_ == kModeGvnPostProcessing &&
-    mir_graph_->GetTopologicalSortOrderLoopHeadStack()->empty()) {
-    // Modifications outside loops are performed during the main phase.
-    return nullptr;
-  }
-  if (allocator == nullptr) {
-    allocator = allocator_;
-  }
-  DCHECK(work_lvn_.get() == nullptr);
-  work_lvn_.reset(new (allocator) LocalValueNumbering(this, bb->id, allocator));
-  if (bb->block_type == kEntryBlock) {
-    work_lvn_->PrepareEntryBlock();
-    DCHECK(bb->first_mir_insn == nullptr);  // modifications_allowed_ is irrelevant.
-  } else {
-    // To avoid repeated allocation on the ArenaStack, reuse a single vector kept as a member.
-    DCHECK(merge_lvns_.empty());
-    // If we're running the full GVN, the RepeatingTopologicalSortIterator keeps the loop
-    // head stack in the MIRGraph up to date and for a loop head we need to check whether
-    // we're making the initial computation and need to merge only preceding blocks in the
-    // topological order, or we're recalculating a loop head and need to merge all incoming
-    // LVNs. When we're not at a loop head (including having an empty loop head stack) all
-    // predecessors should be preceding blocks and we shall merge all of them anyway.
-    bool use_all_predecessors = true;
-    uint16_t loop_head_idx = 0u;  // Used only if !use_all_predecessors.
-    if (mode_ == kModeGvn && mir_graph_->GetTopologicalSortOrderLoopHeadStack()->size() != 0) {
-      // Full GVN inside a loop, see if we're at the loop head for the first time.
-      modifications_allowed_ = false;
-      auto top = mir_graph_->GetTopologicalSortOrderLoopHeadStack()->back();
-      loop_head_idx = top.first;
-      bool recalculating = top.second;
-      use_all_predecessors = recalculating ||
-          loop_head_idx != mir_graph_->GetTopologicalSortOrderIndexes()[bb->id];
-    } else {
-      modifications_allowed_ = true;
-    }
-    for (BasicBlockId pred_id : bb->predecessors) {
-      DCHECK_NE(pred_id, NullBasicBlockId);
-      if (lvns_[pred_id] != nullptr &&
-          (use_all_predecessors ||
-              mir_graph_->GetTopologicalSortOrderIndexes()[pred_id] < loop_head_idx)) {
-        merge_lvns_.push_back(lvns_[pred_id]);
-      }
-    }
-    // Determine merge type.
-    LocalValueNumbering::MergeType merge_type = LocalValueNumbering::kNormalMerge;
-    if (bb->catch_entry) {
-      merge_type = LocalValueNumbering::kCatchMerge;
-    } else if (bb->last_mir_insn != nullptr &&
-        IsInstructionReturn(bb->last_mir_insn->dalvikInsn.opcode) &&
-        bb->GetFirstNonPhiInsn() == bb->last_mir_insn) {
-      merge_type = LocalValueNumbering::kReturnMerge;
-    }
-    // At least one predecessor must have been processed before this bb.
-    CHECK(!merge_lvns_.empty());
-    if (merge_lvns_.size() == 1u) {
-      work_lvn_->MergeOne(*merge_lvns_[0], merge_type);
-    } else {
-      work_lvn_->Merge(merge_type);
-    }
-  }
-  return work_lvn_.get();
-}
-
-bool GlobalValueNumbering::FinishBasicBlock(BasicBlock* bb) {
-  DCHECK(work_lvn_ != nullptr);
-  DCHECK_EQ(bb->id, work_lvn_->Id());
-  ++bbs_processed_;
-  merge_lvns_.clear();
-
-  bool change = false;
-  if (mode_ == kModeGvn) {
-    change = (lvns_[bb->id] == nullptr) || !lvns_[bb->id]->Equals(*work_lvn_);
-    // In GVN mode, keep the latest LVN even if Equals() indicates no change. This is
-    // to keep the correct values of fields that do not contribute to Equals() as long
-    // as they depend only on predecessor LVNs' fields that do contribute to Equals().
-    // Currently, that's LVN::merge_map_ used by LVN::GetStartingVregValueNumberImpl().
-    std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
-    lvns_[bb->id] = work_lvn_.release();
-  } else {
-    DCHECK_EQ(mode_, kModeGvnPostProcessing);  // kModeLvn doesn't use FinishBasicBlock().
-    DCHECK(lvns_[bb->id] != nullptr);
-    DCHECK(lvns_[bb->id]->Equals(*work_lvn_));
-    work_lvn_.reset();
-  }
-  return change;
-}
-
-uint16_t GlobalValueNumbering::GetArrayLocation(uint16_t base, uint16_t index) {
-  auto cmp = array_location_map_.key_comp();
-  ArrayLocation key = { base, index };
-  auto lb = array_location_map_.lower_bound(key);
-  if (lb != array_location_map_.end() && !cmp(key, lb->first)) {
-    return lb->second;
-  }
-  uint16_t location = static_cast<uint16_t>(array_location_reverse_map_.size());
-  DCHECK_EQ(location, array_location_reverse_map_.size());  // No overflow.
-  auto it = array_location_map_.PutBefore(lb, key, location);
-  array_location_reverse_map_.push_back(&*it);
-  return location;
-}
-
-bool GlobalValueNumbering::NullCheckedInAllPredecessors(
-    const ScopedArenaVector<uint16_t>& merge_names) const {
-  // Implicit parameters:
-  //   - *work_lvn_: the LVN for which we're checking predecessors.
-  //   - merge_lvns_: the predecessor LVNs.
-  DCHECK_EQ(merge_lvns_.size(), merge_names.size());
-  for (size_t i = 0, size = merge_lvns_.size(); i != size; ++i) {
-    const LocalValueNumbering* pred_lvn = merge_lvns_[i];
-    uint16_t value_name = merge_names[i];
-    if (!pred_lvn->IsValueNullChecked(value_name)) {
-      // Check if the predecessor has an IF_EQZ/IF_NEZ as the last insn.
-      const BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_lvn->Id());
-      if (!HasNullCheckLastInsn(pred_bb, work_lvn_->Id())) {
-        return false;
-      }
-      // IF_EQZ/IF_NEZ checks some sreg, see if that sreg contains the value_name.
-      int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
-      if (pred_lvn->GetSregValue(s_reg) != value_name) {
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
-bool GlobalValueNumbering::DivZeroCheckedInAllPredecessors(
-    const ScopedArenaVector<uint16_t>& merge_names) const {
-  // Implicit parameters:
-  //   - *work_lvn_: the LVN for which we're checking predecessors.
-  //   - merge_lvns_: the predecessor LVNs.
-  DCHECK_EQ(merge_lvns_.size(), merge_names.size());
-  for (size_t i = 0, size = merge_lvns_.size(); i != size; ++i) {
-    const LocalValueNumbering* pred_lvn = merge_lvns_[i];
-    uint16_t value_name = merge_names[i];
-    if (!pred_lvn->IsValueDivZeroChecked(value_name)) {
-      return false;
-    }
-  }
-  return true;
-}
-
-bool GlobalValueNumbering::IsBlockEnteredOnTrue(uint16_t cond, BasicBlockId bb_id) {
-  DCHECK_NE(cond, kNoValue);
-  BasicBlock* bb = mir_graph_->GetBasicBlock(bb_id);
-  if (bb->predecessors.size() == 1u) {
-    BasicBlockId pred_id = bb->predecessors[0];
-    BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
-    if (pred_bb->BranchesToSuccessorOnlyIfNotZero(bb_id)) {
-      DCHECK(lvns_[pred_id] != nullptr);
-      uint16_t operand = lvns_[pred_id]->GetSregValue(pred_bb->last_mir_insn->ssa_rep->uses[0]);
-      if (operand == cond) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-bool GlobalValueNumbering::IsTrueInBlock(uint16_t cond, BasicBlockId bb_id) {
-  // We're not doing proper value propagation, so just see if the condition is used
-  // with if-nez/if-eqz to branch/fall-through to this bb or one of its dominators.
-  DCHECK_NE(cond, kNoValue);
-  if (IsBlockEnteredOnTrue(cond, bb_id)) {
-    return true;
-  }
-  BasicBlock* bb = mir_graph_->GetBasicBlock(bb_id);
-  for (uint32_t dom_id : bb->dominators->Indexes()) {
-    if (IsBlockEnteredOnTrue(cond, dom_id)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-}  // namespace art
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
deleted file mode 100644
index c514f75..0000000
--- a/compiler/dex/global_value_numbering.h
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_GLOBAL_VALUE_NUMBERING_H_
-#define ART_COMPILER_DEX_GLOBAL_VALUE_NUMBERING_H_
-
-#include "base/arena_object.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "mir_graph.h"
-#include "compiler_ir.h"
-#include "dex_flags.h"
-
-namespace art {
-
-class LocalValueNumbering;
-class MirFieldInfo;
-
-class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
- public:
-  static constexpr uint16_t kNoValue = 0xffffu;
-  static constexpr uint16_t kNullValue = 1u;
-
-  enum Mode {
-    kModeGvn,
-    kModeGvnPostProcessing,
-    kModeLvn
-  };
-
-  static bool Skip(CompilationUnit* cu) {
-    return (cu->disable_opt & (1u << kGlobalValueNumbering)) != 0u ||
-        cu->mir_graph->GetMaxNestedLoops() > kMaxAllowedNestedLoops;
-  }
-
-  // Instance and static field id map is held by MIRGraph to avoid multiple recalculations
-  // when doing LVN.
-  template <typename Container>  // Container of MirIFieldLoweringInfo or MirSFieldLoweringInfo.
-  static uint16_t* PrepareGvnFieldIds(ScopedArenaAllocator* allocator,
-                                      const Container& field_infos);
-
-  GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator, Mode mode);
-  ~GlobalValueNumbering();
-
-  CompilationUnit* GetCompilationUnit() const {
-    return cu_;
-  }
-
-  MIRGraph* GetMirGraph() const {
-    return mir_graph_;
-  }
-
-  // Prepare LVN for the basic block.
-  LocalValueNumbering* PrepareBasicBlock(BasicBlock* bb,
-                                         ScopedArenaAllocator* allocator = nullptr);
-
-  // Finish processing the basic block.
-  bool FinishBasicBlock(BasicBlock* bb);
-
-  // Checks that the value names didn't overflow.
-  bool Good() const {
-    return last_value_ < kNoValue;
-  }
-
-  // Allow modifications.
-  void StartPostProcessing();
-
-  bool CanModify() const {
-    return modifications_allowed_ && Good();
-  }
-
-  // Retrieve the LVN with GVN results for a given BasicBlock.
-  const LocalValueNumbering* GetLvn(BasicBlockId bb_id) const;
-
- private:
-  // Allocate a new value name.
-  uint16_t NewValueName();
-
-  // Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
-  typedef ScopedArenaSafeMap<uint64_t, uint16_t> ValueMap;
-
-  static uint64_t BuildKey(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
-    return (static_cast<uint64_t>(op) << 48 | static_cast<uint64_t>(operand1) << 32 |
-            static_cast<uint64_t>(operand2) << 16 | static_cast<uint64_t>(modifier));
-  }
-
-  // Look up a value in the global value map, adding a new entry if there was none before.
-  uint16_t LookupValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
-    uint16_t res;
-    uint64_t key = BuildKey(op, operand1, operand2, modifier);
-    auto lb = global_value_map_.lower_bound(key);
-    if (lb != global_value_map_.end() && lb->first == key) {
-      res = lb->second;
-    } else {
-      res = NewValueName();
-      global_value_map_.PutBefore(lb, key, res);
-    }
-    return res;
-  }
-
-  // Look up a value in the global value map, don't add a new entry if there was none before.
-  uint16_t FindValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) const {
-    uint16_t res;
-    uint64_t key = BuildKey(op, operand1, operand2, modifier);
-    auto lb = global_value_map_.lower_bound(key);
-    if (lb != global_value_map_.end() && lb->first == key) {
-      res = lb->second;
-    } else {
-      res = kNoValue;
-    }
-    return res;
-  }
-
-  // Get an instance field id.
-  uint16_t GetIFieldId(MIR* mir) {
-    return GetMirGraph()->GetGvnIFieldId(mir);
-  }
-
-  // Get a static field id.
-  uint16_t GetSFieldId(MIR* mir) {
-    return GetMirGraph()->GetGvnSFieldId(mir);
-  }
-
-  // Get an instance field type based on field id.
-  uint16_t GetIFieldType(uint16_t field_id) {
-    return static_cast<uint16_t>(GetMirGraph()->GetIFieldLoweringInfo(field_id).MemAccessType());
-  }
-
-  // Get a static field type based on field id.
-  uint16_t GetSFieldType(uint16_t field_id) {
-    return static_cast<uint16_t>(GetMirGraph()->GetSFieldLoweringInfo(field_id).MemAccessType());
-  }
-
-  struct ArrayLocation {
-    uint16_t base;
-    uint16_t index;
-  };
-
-  struct ArrayLocationComparator {
-    bool operator()(const ArrayLocation& lhs, const ArrayLocation& rhs) const {
-      if (lhs.base != rhs.base) {
-        return lhs.base < rhs.base;
-      }
-      return lhs.index < rhs.index;
-    }
-  };
-
-  typedef ScopedArenaSafeMap<ArrayLocation, uint16_t, ArrayLocationComparator> ArrayLocationMap;
-
-  // Get an array location.
-  uint16_t GetArrayLocation(uint16_t base, uint16_t index);
-
-  // Get the array base from an array location.
-  uint16_t GetArrayLocationBase(uint16_t location) const {
-    return array_location_reverse_map_[location]->first.base;
-  }
-
-  // Get the array index from an array location.
-  uint16_t GetArrayLocationIndex(uint16_t location) const {
-    return array_location_reverse_map_[location]->first.index;
-  }
-
-  // A set of value names.
-  typedef ScopedArenaSet<uint16_t> ValueNameSet;
-
-  // A map from a set of references to the set id.
-  typedef ScopedArenaSafeMap<ValueNameSet, uint16_t> RefSetIdMap;
-
-  uint16_t GetRefSetId(const ValueNameSet& ref_set) {
-    uint16_t res = kNoValue;
-    auto lb = ref_set_map_.lower_bound(ref_set);
-    if (lb != ref_set_map_.end() && !ref_set_map_.key_comp()(ref_set, lb->first)) {
-      res = lb->second;
-    } else {
-      res = NewValueName();
-      ref_set_map_.PutBefore(lb, ref_set, res);
-    }
-    return res;
-  }
-
-  const BasicBlock* GetBasicBlock(uint16_t bb_id) const {
-    return mir_graph_->GetBasicBlock(bb_id);
-  }
-
-  static bool HasNullCheckLastInsn(const BasicBlock* pred_bb, BasicBlockId succ_id) {
-    return pred_bb->BranchesToSuccessorOnlyIfNotZero(succ_id);
-  }
-
-  bool NullCheckedInAllPredecessors(const ScopedArenaVector<uint16_t>& merge_names) const;
-
-  bool DivZeroCheckedInAllPredecessors(const ScopedArenaVector<uint16_t>& merge_names) const;
-
-  bool IsBlockEnteredOnTrue(uint16_t cond, BasicBlockId bb_id);
-  bool IsTrueInBlock(uint16_t cond, BasicBlockId bb_id);
-
-  ScopedArenaAllocator* Allocator() const {
-    return allocator_;
-  }
-
-  CompilationUnit* const cu_;
-  MIRGraph* const mir_graph_;
-  ScopedArenaAllocator* const allocator_;
-
-  // The maximum number of nested loops that we accept for GVN.
-  static constexpr size_t kMaxAllowedNestedLoops = 6u;
-
-  // The number of BBs that we need to process grows exponentially with the number
-  // of nested loops. Don't allow excessive processing for too many nested loops or
-  // otherwise expensive methods.
-  static constexpr uint32_t kMaxBbsToProcessMultiplyFactor = 20u;
-
-  uint32_t bbs_processed_;
-  uint32_t max_bbs_to_process_;  // Doesn't apply after the main GVN has converged.
-
-  // We have 32-bit last_value_ so that we can detect when we run out of value names, see Good().
-  // We usually don't check Good() until the end of LVN unless we're about to modify code.
-  uint32_t last_value_;
-
-  // Marks whether code modifications are allowed. The initial GVN is done without code
-  // modifications to settle the value names. Afterwards, we allow modifications and rerun
-  // LVN once for each BasicBlock.
-  bool modifications_allowed_;
-
-  // Specifies the mode of operation.
-  Mode mode_;
-
-  ValueMap global_value_map_;
-  ArrayLocationMap array_location_map_;
-  ScopedArenaVector<const ArrayLocationMap::value_type*> array_location_reverse_map_;
-  RefSetIdMap ref_set_map_;
-
-  ScopedArenaVector<const LocalValueNumbering*> lvns_;        // Owning.
-  std::unique_ptr<LocalValueNumbering> work_lvn_;
-  ScopedArenaVector<const LocalValueNumbering*> merge_lvns_;  // Not owning.
-
-  friend class LocalValueNumbering;
-  friend class GlobalValueNumberingTest;
-
-  DISALLOW_COPY_AND_ASSIGN(GlobalValueNumbering);
-};
-std::ostream& operator<<(std::ostream& os, const GlobalValueNumbering::Mode& rhs);
-
-inline const LocalValueNumbering* GlobalValueNumbering::GetLvn(BasicBlockId bb_id) const {
-  DCHECK_EQ(mode_, kModeGvnPostProcessing);
-  DCHECK_LT(bb_id, lvns_.size());
-  DCHECK(lvns_[bb_id] != nullptr);
-  return lvns_[bb_id];
-}
-
-inline void GlobalValueNumbering::StartPostProcessing() {
-  DCHECK(Good());
-  DCHECK_EQ(mode_, kModeGvn);
-  mode_ = kModeGvnPostProcessing;
-}
-
-inline uint16_t GlobalValueNumbering::NewValueName() {
-  DCHECK_NE(mode_, kModeGvnPostProcessing);
-  ++last_value_;
-  return last_value_;
-}
-
-template <typename Container>  // Container of MirIFieldLoweringInfo or MirSFieldLoweringInfo.
-uint16_t* GlobalValueNumbering::PrepareGvnFieldIds(ScopedArenaAllocator* allocator,
-                                                   const Container& field_infos) {
-  size_t size = field_infos.size();
-  uint16_t* field_ids = allocator->AllocArray<uint16_t>(size, kArenaAllocMisc);
-  for (size_t i = 0u; i != size; ++i) {
-    size_t idx = i;
-    const MirFieldInfo& cur_info = field_infos[i];
-    if (cur_info.IsResolved()) {
-      for (size_t j = 0; j != i; ++j) {
-        const MirFieldInfo& prev_info = field_infos[j];
-        if (prev_info.IsResolved() &&
-            prev_info.DeclaringDexFile() == cur_info.DeclaringDexFile() &&
-            prev_info.DeclaringFieldIndex() == cur_info.DeclaringFieldIndex()) {
-          DCHECK_EQ(cur_info.MemAccessType(), prev_info.MemAccessType());
-          idx = j;
-          break;
-        }
-      }
-    }
-    field_ids[i] = idx;
-  }
-  return field_ids;
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_GLOBAL_VALUE_NUMBERING_H_
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
deleted file mode 100644
index f2c2e22..0000000
--- a/compiler/dex/global_value_numbering_test.cc
+++ /dev/null
@@ -1,2428 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "dataflow_iterator-inl.h"
-#include "dex/mir_field_info.h"
-#include "global_value_numbering.h"
-#include "local_value_numbering.h"
-#include "gtest/gtest.h"
-
-namespace art {
-
-class GlobalValueNumberingTest : public testing::Test {
- protected:
-  static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
-
-  struct IFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct SFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct BBDef {
-    static constexpr size_t kMaxSuccessors = 4;
-    static constexpr size_t kMaxPredecessors = 4;
-
-    BBType type;
-    size_t num_successors;
-    BasicBlockId successors[kMaxPredecessors];
-    size_t num_predecessors;
-    BasicBlockId predecessors[kMaxPredecessors];
-  };
-
-  struct MIRDef {
-    static constexpr size_t kMaxSsaDefs = 2;
-    static constexpr size_t kMaxSsaUses = 4;
-
-    BasicBlockId bbid;
-    Instruction::Code opcode;
-    int64_t value;
-    uint32_t field_info;
-    size_t num_uses;
-    int32_t uses[kMaxSsaUses];
-    size_t num_defs;
-    int32_t defs[kMaxSsaDefs];
-  };
-
-#define DEF_SUCC0() \
-    0u, { }
-#define DEF_SUCC1(s1) \
-    1u, { s1 }
-#define DEF_SUCC2(s1, s2) \
-    2u, { s1, s2 }
-#define DEF_SUCC3(s1, s2, s3) \
-    3u, { s1, s2, s3 }
-#define DEF_SUCC4(s1, s2, s3, s4) \
-    4u, { s1, s2, s3, s4 }
-#define DEF_PRED0() \
-    0u, { }
-#define DEF_PRED1(p1) \
-    1u, { p1 }
-#define DEF_PRED2(p1, p2) \
-    2u, { p1, p2 }
-#define DEF_PRED3(p1, p2, p3) \
-    3u, { p1, p2, p3 }
-#define DEF_PRED4(p1, p2, p3, p4) \
-    4u, { p1, p2, p3, p4 }
-#define DEF_BB(type, succ, pred) \
-    { type, succ, pred }
-
-#define DEF_CONST(bb, opcode, reg, value) \
-    { bb, opcode, value, 0u, 0, { }, 1, { reg } }
-#define DEF_CONST_WIDE(bb, opcode, reg, value) \
-    { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_CONST_STRING(bb, opcode, reg, index) \
-    { bb, opcode, index, 0u, 0, { }, 1, { reg } }
-#define DEF_IGET(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
-#define DEF_AGET(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
-#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
-#define DEF_APUT(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
-#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
-#define DEF_INVOKE1(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_UNIQUE_REF(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 0, { }, 1, { reg } }  // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
-#define DEF_IFZ(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_MOVE(bb, opcode, reg, src) \
-    { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
-#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
-    { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
-#define DEF_PHI2(bb, reg, src1, src2) \
-    { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
-#define DEF_BINOP(bb, opcode, result, src1, src2) \
-    { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
-#define DEF_UNOP(bb, opcode, result, src) DEF_MOVE(bb, opcode, result, src)
-
-  void DoPrepareIFields(const IFieldDef* defs, size_t count) {
-    cu_.mir_graph->ifield_lowering_infos_.clear();
-    cu_.mir_graph->ifield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
-      }
-      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareIFields(const IFieldDef (&defs)[count]) {
-    DoPrepareIFields(defs, count);
-  }
-
-  void DoPrepareSFields(const SFieldDef* defs, size_t count) {
-    cu_.mir_graph->sfield_lowering_infos_.clear();
-    cu_.mir_graph->sfield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const SFieldDef* def = &defs[i];
-      MirSFieldLoweringInfo field_info(def->field_idx, def->type);
-      // Mark even unresolved fields as initialized.
-      field_info.flags_ |= MirSFieldLoweringInfo::kFlagClassIsInitialized;
-      // NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by GVN.
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
-      }
-      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareSFields(const SFieldDef (&defs)[count]) {
-    DoPrepareSFields(defs, count);
-  }
-
-  void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
-    cu_.mir_graph->block_id_map_.clear();
-    cu_.mir_graph->block_list_.clear();
-    ASSERT_LT(3u, count);  // null, entry, exit and at least one bytecode block.
-    ASSERT_EQ(kNullBlock, defs[0].type);
-    ASSERT_EQ(kEntryBlock, defs[1].type);
-    ASSERT_EQ(kExitBlock, defs[2].type);
-    for (size_t i = 0u; i != count; ++i) {
-      const BBDef* def = &defs[i];
-      BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
-      if (def->num_successors <= 2) {
-        bb->successor_block_list_type = kNotUsed;
-        bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
-        bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
-      } else {
-        bb->successor_block_list_type = kPackedSwitch;
-        bb->fall_through = 0u;
-        bb->taken = 0u;
-        bb->successor_blocks.reserve(def->num_successors);
-        for (size_t j = 0u; j != def->num_successors; ++j) {
-          SuccessorBlockInfo* successor_block_info =
-              static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
-                                                               kArenaAllocSuccessors));
-          successor_block_info->block = j;
-          successor_block_info->key = 0u;  // Not used by class init check elimination.
-          bb->successor_blocks.push_back(successor_block_info);
-        }
-      }
-      bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
-      if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
-        bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
-            cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
-        bb->data_flow_info->live_in_v = live_in_v_;
-      }
-    }
-    ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
-    cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
-    ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
-    cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
-    ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
-  }
-
-  template <size_t count>
-  void PrepareBasicBlocks(const BBDef (&defs)[count]) {
-    DoPrepareBasicBlocks(defs, count);
-  }
-
-  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
-    mir_count_ = count;
-    mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
-    ssa_reps_.resize(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const MIRDef* def = &defs[i];
-      MIR* mir = &mirs_[i];
-      ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
-      BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
-      bb->AppendMIR(mir);
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
-      mir->dalvikInsn.vB_wide = def->value;
-      if (IsInstructionIGetOrIPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
-        mir->meta.ifield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_info].MemAccessType(),
-                  IGetOrIPutMemAccessType(def->opcode));
-      } else if (IsInstructionSGetOrSPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
-        mir->meta.sfield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
-                  SGetOrSPutMemAccessType(def->opcode));
-      } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
-        mir->meta.phi_incoming =
-            allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
-        ASSERT_EQ(def->num_uses, bb->predecessors.size());
-        std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
-      }
-      mir->ssa_rep = &ssa_reps_[i];
-      mir->ssa_rep->num_uses = def->num_uses;
-      mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
-      mir->ssa_rep->num_defs = def->num_defs;
-      mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->offset = i;  // LVN uses offset only for debug output
-      mir->optimization_flags = 0u;
-    }
-    DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
-        cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
-    code_item->insns_size_in_code_units_ = 2u * count;
-    cu_.mir_graph->current_code_item_ = code_item;
-  }
-
-  template <size_t count>
-  void PrepareMIRs(const MIRDef (&defs)[count]) {
-    DoPrepareMIRs(defs, count);
-  }
-
-  void DoPrepareVregToSsaMapExit(BasicBlockId bb_id, const int32_t* map, size_t count) {
-    BasicBlock* bb = cu_.mir_graph->GetBasicBlock(bb_id);
-    ASSERT_TRUE(bb != nullptr);
-    ASSERT_TRUE(bb->data_flow_info != nullptr);
-    bb->data_flow_info->vreg_to_ssa_map_exit =
-        cu_.arena.AllocArray<int32_t>(count, kArenaAllocDFInfo);
-    std::copy_n(map, count, bb->data_flow_info->vreg_to_ssa_map_exit);
-  }
-
-  template <size_t count>
-  void PrepareVregToSsaMapExit(BasicBlockId bb_id, const int32_t (&map)[count]) {
-    DoPrepareVregToSsaMapExit(bb_id, map, count);
-  }
-
-  template <size_t count>
-  void MarkAsWideSRegs(const int32_t (&sregs)[count]) {
-    for (int32_t sreg : sregs) {
-      cu_.mir_graph->reg_location_[sreg].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].high_word = true;
-    }
-  }
-
-  void PerformGVN() {
-    DoPerformGVN<LoopRepeatingTopologicalSortIterator>();
-  }
-
-  void PerformPreOrderDfsGVN() {
-    DoPerformGVN<RepeatingPreOrderDfsIterator>();
-  }
-
-  template <typename IteratorType>
-  void DoPerformGVN() {
-    cu_.mir_graph->SSATransformationStart();
-    cu_.mir_graph->ComputeDFSOrders();
-    cu_.mir_graph->ComputeDominators();
-    cu_.mir_graph->ComputeTopologicalSortOrder();
-    cu_.mir_graph->SSATransformationEnd();
-    cu_.mir_graph->temp_.gvn.ifield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
-    cu_.mir_graph->temp_.gvn.sfield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
-    ASSERT_TRUE(gvn_ == nullptr);
-    gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
-                                                           GlobalValueNumbering::kModeGvn));
-    value_names_.resize(mir_count_, 0xffffu);
-    IteratorType iterator(cu_.mir_graph.get());
-    bool change = false;
-    for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
-      LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
-      if (lvn != nullptr) {
-        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-          value_names_[mir - mirs_] = lvn->GetValueNumber(mir);
-        }
-      }
-      change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
-      ASSERT_TRUE(gvn_->Good());
-    }
-  }
-
-  void PerformGVNCodeModifications() {
-    ASSERT_TRUE(gvn_ != nullptr);
-    ASSERT_TRUE(gvn_->Good());
-    gvn_->StartPostProcessing();
-    TopologicalSortIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
-      if (lvn != nullptr) {
-        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-          uint16_t value_name = lvn->GetValueNumber(mir);
-          ASSERT_EQ(value_name, value_names_[mir - mirs_]);
-        }
-      }
-      bool change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
-      ASSERT_FALSE(change);
-      ASSERT_TRUE(gvn_->Good());
-    }
-  }
-
-  GlobalValueNumberingTest()
-      : pool_(),
-        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
-        mir_count_(0u),
-        mirs_(nullptr),
-        ssa_reps_(),
-        allocator_(),
-        gvn_(),
-        value_names_(),
-        live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)) {
-    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
-    cu_.access_flags = kAccStatic;  // Don't let "this" interfere with this test.
-    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
-    // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
-    // 0 constants are integral, not references, and the values are all narrow.
-    // Nothing else is used by LVN/GVN. Tests can override the default values as needed.
-    cu_.mir_graph->reg_location_ =
-        cu_.arena.AllocArray<RegLocation>(kMaxSsaRegs, kArenaAllocRegAlloc);
-    cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
-    // Bind all possible sregs to live vregs for test purposes.
-    live_in_v_->SetInitialBits(kMaxSsaRegs);
-    cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
-    cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
-    for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
-      cu_.mir_graph->ssa_base_vregs_.push_back(i);
-      cu_.mir_graph->ssa_subscripts_.push_back(0);
-    }
-    // Set shorty for a void-returning method without arguments.
-    cu_.shorty = "V";
-  }
-
-  static constexpr size_t kMaxSsaRegs = 16384u;
-
-  ArenaPool pool_;
-  CompilationUnit cu_;
-  size_t mir_count_;
-  MIR* mirs_;
-  std::vector<SSARepresentation> ssa_reps_;
-  std::unique_ptr<ScopedArenaAllocator> allocator_;
-  std::unique_ptr<GlobalValueNumbering> gvn_;
-  std::vector<uint16_t> value_names_;
-  ArenaBitVector* live_in_v_;
-};
-
-constexpr uint16_t GlobalValueNumberingTest::kNoValue;
-
-class GlobalValueNumberingTestDiamond : public GlobalValueNumberingTest {
- public:
-  GlobalValueNumberingTestDiamond();
-
- private:
-  static const BBDef kDiamondBbs[];
-};
-
-const GlobalValueNumberingTest::BBDef GlobalValueNumberingTestDiamond::kDiamondBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // Block #3, top of the diamond.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #4, left side.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #5, right side.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),  // Block #6, bottom.
-};
-
-GlobalValueNumberingTestDiamond::GlobalValueNumberingTestDiamond()
-    : GlobalValueNumberingTest() {
-  PrepareBasicBlocks(kDiamondBbs);
-}
-
-class GlobalValueNumberingTestLoop : public GlobalValueNumberingTest {
- public:
-  GlobalValueNumberingTestLoop();
-
- private:
-  static const BBDef kLoopBbs[];
-};
-
-const GlobalValueNumberingTest::BBDef GlobalValueNumberingTestLoop::kLoopBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)),  // "taken" loops to self.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-};
-
-GlobalValueNumberingTestLoop::GlobalValueNumberingTestLoop()
-    : GlobalValueNumberingTest() {
-  PrepareBasicBlocks(kLoopBbs);
-}
-
-class GlobalValueNumberingTestCatch : public GlobalValueNumberingTest {
- public:
-  GlobalValueNumberingTestCatch();
-
- private:
-  static const BBDef kCatchBbs[];
-};
-
-const GlobalValueNumberingTest::BBDef GlobalValueNumberingTestCatch::kCatchBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),     // The top.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // The throwing insn.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Catch handler.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),  // The merged block.
-};
-
-GlobalValueNumberingTestCatch::GlobalValueNumberingTestCatch()
-    : GlobalValueNumberingTest() {
-  PrepareBasicBlocks(kCatchBbs);
-  // Mark catch handler.
-  BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(5u);
-  catch_handler->catch_entry = true;
-  // Add successor block info to the check block.
-  BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
-  check_bb->successor_block_list_type = kCatch;
-  SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
-      (cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessors));
-  successor_block_info->block = catch_handler->id;
-  check_bb->successor_blocks.push_back(successor_block_info);
-}
-
-class GlobalValueNumberingTestTwoConsecutiveLoops : public GlobalValueNumberingTest {
- public:
-  GlobalValueNumberingTestTwoConsecutiveLoops();
-
- private:
-  static const BBDef kTwoConsecutiveLoopsBbs[];
-};
-
-const GlobalValueNumberingTest::BBDef
-GlobalValueNumberingTestTwoConsecutiveLoops::kTwoConsecutiveLoopsBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(9)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 6), DEF_PRED2(3, 5)),  // "taken" skips over the loop.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(4)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(4)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(8, 9), DEF_PRED2(6, 8)),  // "taken" skips over the loop.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(7)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(7)),
-};
-
-GlobalValueNumberingTestTwoConsecutiveLoops::GlobalValueNumberingTestTwoConsecutiveLoops()
-    : GlobalValueNumberingTest() {
-  PrepareBasicBlocks(kTwoConsecutiveLoopsBbs);
-}
-
-class GlobalValueNumberingTestTwoNestedLoops : public GlobalValueNumberingTest {
- public:
-  GlobalValueNumberingTestTwoNestedLoops();
-
- private:
-  static const BBDef kTwoNestedLoopsBbs[];
-};
-
-const GlobalValueNumberingTest::BBDef
-GlobalValueNumberingTestTwoNestedLoops::kTwoNestedLoopsBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(8)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 8), DEF_PRED2(3, 7)),  // "taken" skips over the loop.
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 7), DEF_PRED2(4, 6)),  // "taken" skips over the loop.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(5)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(5)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-};
-
-GlobalValueNumberingTestTwoNestedLoops::GlobalValueNumberingTestTwoNestedLoops()
-    : GlobalValueNumberingTest() {
-  PrepareBasicBlocks(kTwoNestedLoopsBbs);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, NonAliasingIFields) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessShort },
-      { 5u, 1u, 5u, false, kDexMemAccessChar },
-      { 6u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-      { 7u, 1u, 7u, false, kDexMemAccessWord },
-      { 8u, 0u, 0u, false, kDexMemAccessWord },    // Unresolved.
-      { 9u, 1u, 9u, false, kDexMemAccessWord },
-      { 10u, 1u, 10u, false, kDexMemAccessWord },
-      { 11u, 1u, 11u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 100u),
-      DEF_IGET(3, Instruction::IGET, 1u, 100u, 0u),
-      DEF_IGET(6, Instruction::IGET, 2u, 100u, 0u),   // Same as at the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 200u),
-      DEF_IGET(4, Instruction::IGET, 4u, 200u, 1u),
-      DEF_IGET(6, Instruction::IGET, 5u, 200u, 1u),   // Same as at the left side.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 300u),
-      DEF_IGET(3, Instruction::IGET, 7u, 300u, 2u),
-      DEF_CONST(5, Instruction::CONST, 8u, 1000),
-      DEF_IPUT(5, Instruction::IPUT, 8u, 300u, 2u),
-      DEF_IGET(6, Instruction::IGET, 10u, 300u, 2u),  // Differs from the top and the CONST.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 400u),
-      DEF_IGET(3, Instruction::IGET, 12u, 400u, 3u),
-      DEF_CONST(3, Instruction::CONST, 13u, 2000),
-      DEF_IPUT(4, Instruction::IPUT, 13u, 400u, 3u),
-      DEF_IPUT(5, Instruction::IPUT, 13u, 400u, 3u),
-      DEF_IGET(6, Instruction::IGET, 16u, 400u, 3u),  // Differs from the top, equals the CONST.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 500u),
-      DEF_IGET(3, Instruction::IGET_SHORT, 18u, 500u, 4u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 19u, 500u, 5u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 20u, 500u, 6u),  // Clobbers field #4, not #5.
-      DEF_IGET(6, Instruction::IGET_SHORT, 21u, 500u, 4u),  // Differs from the top.
-      DEF_IGET(6, Instruction::IGET_CHAR, 22u, 500u, 5u),   // Same as the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 600u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 601u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 602u),
-      DEF_IGET(3, Instruction::IGET, 26u, 600u, 7u),
-      DEF_IGET(3, Instruction::IGET, 27u, 601u, 7u),
-      DEF_IPUT(4, Instruction::IPUT, 28u, 602u, 8u),  // Doesn't clobber field #7 for other refs.
-      DEF_IGET(6, Instruction::IGET, 29u, 600u, 7u),  // Same as the top.
-      DEF_IGET(6, Instruction::IGET, 30u, 601u, 7u),  // Same as the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 700u),
-      DEF_CONST(4, Instruction::CONST, 32u, 3000),
-      DEF_IPUT(4, Instruction::IPUT, 32u, 700u, 9u),
-      DEF_IPUT(4, Instruction::IPUT, 32u, 700u, 10u),
-      DEF_CONST(5, Instruction::CONST, 35u, 3001),
-      DEF_IPUT(5, Instruction::IPUT, 35u, 700u, 9u),
-      DEF_IPUT(5, Instruction::IPUT, 35u, 700u, 10u),
-      DEF_IGET(6, Instruction::IGET, 38u, 700u, 9u),
-      DEF_IGET(6, Instruction::IGET, 39u, 700u, 10u),  // Same value as read from field #9.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 800u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 801u),
-      DEF_CONST(4, Instruction::CONST, 42u, 3000),
-      DEF_IPUT(4, Instruction::IPUT, 42u, 800u, 11u),
-      DEF_IPUT(4, Instruction::IPUT, 42u, 801u, 11u),
-      DEF_CONST(5, Instruction::CONST, 45u, 3001),
-      DEF_IPUT(5, Instruction::IPUT, 45u, 800u, 11u),
-      DEF_IPUT(5, Instruction::IPUT, 45u, 801u, 11u),
-      DEF_IGET(6, Instruction::IGET, 48u, 800u, 11u),
-      DEF_IGET(6, Instruction::IGET, 49u, 801u, 11u),  // Same value as read from ref 46u.
-
-      // Invoke doesn't interfere with non-aliasing refs. There's one test above where a reference
-      // escapes in the left BB (we let a reference escape if we use it to store to an unresolved
-      // field) and the INVOKE in the right BB shouldn't interfere with that either.
-      DEF_INVOKE1(5, Instruction::INVOKE_STATIC, 48u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[1], value_names_[2]);
-
-  EXPECT_EQ(value_names_[4], value_names_[5]);
-
-  EXPECT_NE(value_names_[7], value_names_[10]);
-  EXPECT_NE(value_names_[8], value_names_[10]);
-
-  EXPECT_NE(value_names_[12], value_names_[16]);
-  EXPECT_EQ(value_names_[13], value_names_[16]);
-
-  EXPECT_NE(value_names_[18], value_names_[21]);
-  EXPECT_EQ(value_names_[19], value_names_[22]);
-
-  EXPECT_EQ(value_names_[26], value_names_[29]);
-  EXPECT_EQ(value_names_[27], value_names_[30]);
-
-  EXPECT_EQ(value_names_[38], value_names_[39]);
-
-  EXPECT_EQ(value_names_[48], value_names_[49]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, AliasingIFieldsSingleObject) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessShort },
-      { 5u, 1u, 5u, false, kDexMemAccessChar },
-      { 6u, 0u, 0u, false, kDexMemAccessShort },  // Unresolved.
-      { 7u, 1u, 7u, false, kDexMemAccessWord },
-      { 8u, 1u, 8u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_IGET(3, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IGET(6, Instruction::IGET, 1u, 100u, 0u),   // Same as at the top.
-
-      DEF_IGET(4, Instruction::IGET, 2u, 100u, 1u),
-      DEF_IGET(6, Instruction::IGET, 3u, 100u, 1u),   // Same as at the left side.
-
-      DEF_IGET(3, Instruction::IGET, 4u, 100u, 2u),
-      DEF_CONST(5, Instruction::CONST, 5u, 1000),
-      DEF_IPUT(5, Instruction::IPUT, 5u, 100u, 2u),
-      DEF_IGET(6, Instruction::IGET, 7u, 100u, 2u),   // Differs from the top and the CONST.
-
-      DEF_IGET(3, Instruction::IGET, 8u, 100u, 3u),
-      DEF_CONST(3, Instruction::CONST, 9u, 2000),
-      DEF_IPUT(4, Instruction::IPUT, 9u, 100u, 3u),
-      DEF_IPUT(5, Instruction::IPUT, 9u, 100u, 3u),
-      DEF_IGET(6, Instruction::IGET, 12u, 100u, 3u),  // Differs from the top, equals the CONST.
-
-      DEF_IGET(3, Instruction::IGET_SHORT, 13u, 100u, 4u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 14u, 100u, 5u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 15u, 100u, 6u),  // Clobbers field #4, not #5.
-      DEF_IGET(6, Instruction::IGET_SHORT, 16u, 100u, 4u),  // Differs from the top.
-      DEF_IGET(6, Instruction::IGET_CHAR, 17u, 100u, 5u),   // Same as the top.
-
-      DEF_CONST(4, Instruction::CONST, 18u, 3000),
-      DEF_IPUT(4, Instruction::IPUT, 18u, 100u, 7u),
-      DEF_IPUT(4, Instruction::IPUT, 18u, 100u, 8u),
-      DEF_CONST(5, Instruction::CONST, 21u, 3001),
-      DEF_IPUT(5, Instruction::IPUT, 21u, 100u, 7u),
-      DEF_IPUT(5, Instruction::IPUT, 21u, 100u, 8u),
-      DEF_IGET(6, Instruction::IGET, 24u, 100u, 7u),
-      DEF_IGET(6, Instruction::IGET, 25u, 100u, 8u),  // Same value as read from field #7.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  EXPECT_EQ(value_names_[2], value_names_[3]);
-
-  EXPECT_NE(value_names_[4], value_names_[7]);
-  EXPECT_NE(value_names_[5], value_names_[7]);
-
-  EXPECT_NE(value_names_[8], value_names_[12]);
-  EXPECT_EQ(value_names_[9], value_names_[12]);
-
-  EXPECT_NE(value_names_[13], value_names_[16]);
-  EXPECT_EQ(value_names_[14], value_names_[17]);
-
-  EXPECT_EQ(value_names_[24], value_names_[25]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, AliasingIFieldsTwoObjects) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessShort },
-      { 5u, 1u, 5u, false, kDexMemAccessChar },
-      { 6u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-      { 7u, 1u, 7u, false, kDexMemAccessWord },
-      { 8u, 1u, 8u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_IGET(3, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IPUT(4, Instruction::IPUT, 1u, 101u, 0u),   // May alias with the IGET at the top.
-      DEF_IGET(6, Instruction::IGET, 2u, 100u, 0u),   // Differs from the top.
-
-      DEF_IGET(3, Instruction::IGET, 3u, 100u, 1u),
-      DEF_IPUT(5, Instruction::IPUT, 3u, 101u, 1u),   // If aliasing, stores the same value.
-      DEF_IGET(6, Instruction::IGET, 5u, 100u, 1u),   // Same as the top.
-
-      DEF_IGET(3, Instruction::IGET, 6u, 100u, 2u),
-      DEF_CONST(5, Instruction::CONST, 7u, 1000),
-      DEF_IPUT(5, Instruction::IPUT, 7u, 101u, 2u),
-      DEF_IGET(6, Instruction::IGET, 9u, 100u, 2u),   // Differs from the top and the CONST.
-
-      DEF_IGET(3, Instruction::IGET, 10u, 100u, 3u),
-      DEF_CONST(3, Instruction::CONST, 11u, 2000),
-      DEF_IPUT(4, Instruction::IPUT, 11u, 101u, 3u),
-      DEF_IPUT(5, Instruction::IPUT, 11u, 101u, 3u),
-      DEF_IGET(6, Instruction::IGET, 14u, 100u, 3u),  // Differs from the top and the CONST.
-
-      DEF_IGET(3, Instruction::IGET_SHORT, 15u, 100u, 4u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 16u, 100u, 5u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 17u, 101u, 6u),  // Clobbers field #4, not #5.
-      DEF_IGET(6, Instruction::IGET_SHORT, 18u, 100u, 4u),  // Differs from the top.
-      DEF_IGET(6, Instruction::IGET_CHAR, 19u, 100u, 5u),   // Same as the top.
-
-      DEF_CONST(4, Instruction::CONST, 20u, 3000),
-      DEF_IPUT(4, Instruction::IPUT, 20u, 100u, 7u),
-      DEF_IPUT(4, Instruction::IPUT, 20u, 101u, 8u),
-      DEF_CONST(5, Instruction::CONST, 23u, 3001),
-      DEF_IPUT(5, Instruction::IPUT, 23u, 100u, 7u),
-      DEF_IPUT(5, Instruction::IPUT, 23u, 101u, 8u),
-      DEF_IGET(6, Instruction::IGET, 26u, 100u, 7u),
-      DEF_IGET(6, Instruction::IGET, 27u, 101u, 8u),  // Same value as read from field #7.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[2]);
-
-  EXPECT_EQ(value_names_[3], value_names_[5]);
-
-  EXPECT_NE(value_names_[6], value_names_[9]);
-  EXPECT_NE(value_names_[7], value_names_[9]);
-
-  EXPECT_NE(value_names_[10], value_names_[14]);
-  EXPECT_NE(value_names_[10], value_names_[14]);
-
-  EXPECT_NE(value_names_[15], value_names_[18]);
-  EXPECT_EQ(value_names_[16], value_names_[19]);
-
-  EXPECT_EQ(value_names_[26], value_names_[27]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, SFields) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessShort },
-      { 5u, 1u, 5u, false, kDexMemAccessChar },
-      { 6u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-      { 7u, 1u, 7u, false, kDexMemAccessWord },
-      { 8u, 1u, 8u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_SGET(3, Instruction::SGET, 0u, 0u),
-      DEF_SGET(6, Instruction::SGET, 1u, 0u),         // Same as at the top.
-
-      DEF_SGET(4, Instruction::SGET, 2u, 1u),
-      DEF_SGET(6, Instruction::SGET, 3u, 1u),         // Same as at the left side.
-
-      DEF_SGET(3, Instruction::SGET, 4u, 2u),
-      DEF_CONST(5, Instruction::CONST, 5u, 100),
-      DEF_SPUT(5, Instruction::SPUT, 5u, 2u),
-      DEF_SGET(6, Instruction::SGET, 7u, 2u),         // Differs from the top and the CONST.
-
-      DEF_SGET(3, Instruction::SGET, 8u, 3u),
-      DEF_CONST(3, Instruction::CONST, 9u, 200),
-      DEF_SPUT(4, Instruction::SPUT, 9u, 3u),
-      DEF_SPUT(5, Instruction::SPUT, 9u, 3u),
-      DEF_SGET(6, Instruction::SGET, 12u, 3u),        // Differs from the top, equals the CONST.
-
-      DEF_SGET(3, Instruction::SGET_SHORT, 13u, 4u),
-      DEF_SGET(3, Instruction::SGET_CHAR, 14u, 5u),
-      DEF_SPUT(4, Instruction::SPUT_SHORT, 15u, 6u),  // Clobbers field #4, not #5.
-      DEF_SGET(6, Instruction::SGET_SHORT, 16u, 4u),  // Differs from the top.
-      DEF_SGET(6, Instruction::SGET_CHAR, 17u, 5u),   // Same as the top.
-
-      DEF_CONST(4, Instruction::CONST, 18u, 300),
-      DEF_SPUT(4, Instruction::SPUT, 18u, 7u),
-      DEF_SPUT(4, Instruction::SPUT, 18u, 8u),
-      DEF_CONST(5, Instruction::CONST, 21u, 301),
-      DEF_SPUT(5, Instruction::SPUT, 21u, 7u),
-      DEF_SPUT(5, Instruction::SPUT, 21u, 8u),
-      DEF_SGET(6, Instruction::SGET, 24u, 7u),
-      DEF_SGET(6, Instruction::SGET, 25u, 8u),        // Same value as read from field #7.
-  };
-
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  EXPECT_EQ(value_names_[2], value_names_[3]);
-
-  EXPECT_NE(value_names_[4], value_names_[7]);
-  EXPECT_NE(value_names_[5], value_names_[7]);
-
-  EXPECT_NE(value_names_[8], value_names_[12]);
-  EXPECT_EQ(value_names_[9], value_names_[12]);
-
-  EXPECT_NE(value_names_[13], value_names_[16]);
-  EXPECT_EQ(value_names_[14], value_names_[17]);
-
-  EXPECT_EQ(value_names_[24], value_names_[25]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, NonAliasingArrays) {
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 100u),
-      DEF_AGET(3, Instruction::AGET, 1u, 100u, 101u),
-      DEF_AGET(6, Instruction::AGET, 2u, 100u, 101u),   // Same as at the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 200u),
-      DEF_IGET(4, Instruction::AGET, 4u, 200u, 201u),
-      DEF_IGET(6, Instruction::AGET, 5u, 200u, 201u),   // Same as at the left side.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 300u),
-      DEF_AGET(3, Instruction::AGET, 7u, 300u, 301u),
-      DEF_CONST(5, Instruction::CONST, 8u, 1000),
-      DEF_APUT(5, Instruction::APUT, 8u, 300u, 301u),
-      DEF_AGET(6, Instruction::AGET, 10u, 300u, 301u),  // Differs from the top and the CONST.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 400u),
-      DEF_AGET(3, Instruction::AGET, 12u, 400u, 401u),
-      DEF_CONST(3, Instruction::CONST, 13u, 2000),
-      DEF_APUT(4, Instruction::APUT, 13u, 400u, 401u),
-      DEF_APUT(5, Instruction::APUT, 13u, 400u, 401u),
-      DEF_AGET(6, Instruction::AGET, 16u, 400u, 401u),  // Differs from the top, equals the CONST.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 500u),
-      DEF_AGET(3, Instruction::AGET, 18u, 500u, 501u),
-      DEF_APUT(4, Instruction::APUT, 19u, 500u, 502u),  // Clobbers value at index 501u.
-      DEF_AGET(6, Instruction::AGET, 20u, 500u, 501u),  // Differs from the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 600u),
-      DEF_CONST(4, Instruction::CONST, 22u, 3000),
-      DEF_APUT(4, Instruction::APUT, 22u, 600u, 601u),
-      DEF_APUT(4, Instruction::APUT, 22u, 600u, 602u),
-      DEF_CONST(5, Instruction::CONST, 25u, 3001),
-      DEF_APUT(5, Instruction::APUT, 25u, 600u, 601u),
-      DEF_APUT(5, Instruction::APUT, 25u, 600u, 602u),
-      DEF_AGET(6, Instruction::AGET, 28u, 600u, 601u),
-      DEF_AGET(6, Instruction::AGET, 29u, 600u, 602u),  // Same value as read from index 601u.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 700u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 701u),
-      DEF_AGET(3, Instruction::AGET, 32u, 700u, 702u),
-      DEF_APUT(4, Instruction::APUT, 33u, 701u, 702u),  // Doesn't interfere with unrelated array.
-      DEF_AGET(6, Instruction::AGET, 34u, 700u, 702u),  // Same value as at the top.
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[1], value_names_[2]);
-
-  EXPECT_EQ(value_names_[4], value_names_[5]);
-
-  EXPECT_NE(value_names_[7], value_names_[10]);
-  EXPECT_NE(value_names_[8], value_names_[10]);
-
-  EXPECT_NE(value_names_[12], value_names_[16]);
-  EXPECT_EQ(value_names_[13], value_names_[16]);
-
-  EXPECT_NE(value_names_[18], value_names_[20]);
-
-  EXPECT_NE(value_names_[28], value_names_[22]);
-  EXPECT_NE(value_names_[28], value_names_[25]);
-  EXPECT_EQ(value_names_[28], value_names_[29]);
-
-  EXPECT_EQ(value_names_[32], value_names_[34]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, AliasingArrays) {
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      // NOTE: We're also testing that these tests really do not interfere with each other.
-
-      DEF_AGET(3, Instruction::AGET_BOOLEAN, 0u, 100u, 101u),
-      DEF_AGET(6, Instruction::AGET_BOOLEAN, 1u, 100u, 101u),  // Same as at the top.
-
-      DEF_IGET(4, Instruction::AGET_OBJECT, 2u, 200u, 201u),
-      DEF_IGET(6, Instruction::AGET_OBJECT, 3u, 200u, 201u),  // Same as at the left side.
-
-      DEF_AGET(3, Instruction::AGET_WIDE, 4u, 300u, 301u),
-      DEF_CONST(5, Instruction::CONST_WIDE, 6u, 1000),
-      DEF_APUT(5, Instruction::APUT_WIDE, 6u, 300u, 301u),
-      DEF_AGET(6, Instruction::AGET_WIDE, 8u, 300u, 301u),  // Differs from the top and the CONST.
-
-      DEF_AGET(3, Instruction::AGET_SHORT, 10u, 400u, 401u),
-      DEF_CONST(3, Instruction::CONST, 11u, 2000),
-      DEF_APUT(4, Instruction::APUT_SHORT, 11u, 400u, 401u),
-      DEF_APUT(5, Instruction::APUT_SHORT, 11u, 400u, 401u),
-      DEF_AGET(6, Instruction::AGET_SHORT, 12u, 400u, 401u),  // Differs from the top, == CONST.
-
-      DEF_AGET(3, Instruction::AGET_CHAR, 13u, 500u, 501u),
-      DEF_APUT(4, Instruction::APUT_CHAR, 14u, 500u, 502u),  // Clobbers value at index 501u.
-      DEF_AGET(6, Instruction::AGET_CHAR, 15u, 500u, 501u),  // Differs from the top.
-
-      DEF_AGET(3, Instruction::AGET_BYTE, 16u, 600u, 602u),
-      DEF_APUT(4, Instruction::APUT_BYTE, 17u, 601u, 602u),  // Clobbers values in array 600u.
-      DEF_AGET(6, Instruction::AGET_BYTE, 18u, 600u, 602u),  // Differs from the top.
-
-      DEF_CONST(4, Instruction::CONST, 19u, 3000),
-      DEF_APUT(4, Instruction::APUT, 19u, 700u, 701u),
-      DEF_APUT(4, Instruction::APUT, 19u, 700u, 702u),
-      DEF_CONST(5, Instruction::CONST, 22u, 3001),
-      DEF_APUT(5, Instruction::APUT, 22u, 700u, 701u),
-      DEF_APUT(5, Instruction::APUT, 22u, 700u, 702u),
-      DEF_AGET(6, Instruction::AGET, 25u, 700u, 701u),
-      DEF_AGET(6, Instruction::AGET, 26u, 700u, 702u),  // Same value as read from index 601u.
-  };
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 4, 6, 8 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  EXPECT_EQ(value_names_[2], value_names_[3]);
-
-  EXPECT_NE(value_names_[4], value_names_[7]);
-  EXPECT_NE(value_names_[5], value_names_[7]);
-
-  EXPECT_NE(value_names_[8], value_names_[12]);
-  EXPECT_EQ(value_names_[9], value_names_[12]);
-
-  EXPECT_NE(value_names_[13], value_names_[15]);
-
-  EXPECT_NE(value_names_[16], value_names_[18]);
-
-  EXPECT_NE(value_names_[25], value_names_[19]);
-  EXPECT_NE(value_names_[25], value_names_[22]);
-  EXPECT_EQ(value_names_[25], value_names_[26]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, Phi) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000),
-      DEF_CONST(4, Instruction::CONST, 1u, 2000),
-      DEF_CONST(5, Instruction::CONST, 2u, 3000),
-      DEF_MOVE(4, Instruction::MOVE, 3u, 0u),
-      DEF_MOVE(4, Instruction::MOVE, 4u, 1u),
-      DEF_MOVE(5, Instruction::MOVE, 5u, 0u),
-      DEF_MOVE(5, Instruction::MOVE, 6u, 2u),
-      DEF_PHI2(6, 7u, 3u, 5u),    // Same as CONST 0u (1000).
-      DEF_PHI2(6, 8u, 3u, 0u),    // Same as CONST 0u (1000).
-      DEF_PHI2(6, 9u, 0u, 5u),    // Same as CONST 0u (1000).
-      DEF_PHI2(6, 10u, 4u, 5u),   // Merge 1u (2000) and 0u (1000).
-      DEF_PHI2(6, 11u, 1u, 5u),   // Merge 1u (2000) and 0u (1000).
-      DEF_PHI2(6, 12u, 4u, 0u),   // Merge 1u (2000) and 0u (1000).
-      DEF_PHI2(6, 13u, 1u, 0u),   // Merge 1u (2000) and 0u (1000).
-      DEF_PHI2(6, 14u, 3u, 6u),   // Merge 0u (1000) and 2u (3000).
-      DEF_PHI2(6, 15u, 0u, 6u),   // Merge 0u (1000) and 2u (3000).
-      DEF_PHI2(6, 16u, 3u, 2u),   // Merge 0u (1000) and 2u (3000).
-      DEF_PHI2(6, 17u, 0u, 2u),   // Merge 0u (1000) and 2u (3000).
-      DEF_PHI2(6, 18u, 4u, 6u),   // Merge 1u (2000) and 2u (3000).
-      DEF_PHI2(6, 19u, 1u, 6u),   // Merge 1u (2000) and 2u (3000).
-      DEF_PHI2(6, 20u, 4u, 2u),   // Merge 1u (2000) and 2u (3000).
-      DEF_PHI2(6, 21u, 1u, 2u),   // Merge 1u (2000) and 2u (3000).
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[7]);
-  EXPECT_EQ(value_names_[0], value_names_[8]);
-  EXPECT_EQ(value_names_[0], value_names_[9]);
-  EXPECT_NE(value_names_[10], value_names_[0]);
-  EXPECT_NE(value_names_[10], value_names_[1]);
-  EXPECT_NE(value_names_[10], value_names_[2]);
-  EXPECT_EQ(value_names_[10], value_names_[11]);
-  EXPECT_EQ(value_names_[10], value_names_[12]);
-  EXPECT_EQ(value_names_[10], value_names_[13]);
-  EXPECT_NE(value_names_[14], value_names_[0]);
-  EXPECT_NE(value_names_[14], value_names_[1]);
-  EXPECT_NE(value_names_[14], value_names_[2]);
-  EXPECT_NE(value_names_[14], value_names_[10]);
-  EXPECT_EQ(value_names_[14], value_names_[15]);
-  EXPECT_EQ(value_names_[14], value_names_[16]);
-  EXPECT_EQ(value_names_[14], value_names_[17]);
-  EXPECT_NE(value_names_[18], value_names_[0]);
-  EXPECT_NE(value_names_[18], value_names_[1]);
-  EXPECT_NE(value_names_[18], value_names_[2]);
-  EXPECT_NE(value_names_[18], value_names_[10]);
-  EXPECT_NE(value_names_[18], value_names_[14]);
-  EXPECT_EQ(value_names_[18], value_names_[19]);
-  EXPECT_EQ(value_names_[18], value_names_[20]);
-  EXPECT_EQ(value_names_[18], value_names_[21]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, PhiWide) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000),
-      DEF_CONST_WIDE(4, Instruction::CONST_WIDE, 2u, 2000),
-      DEF_CONST_WIDE(5, Instruction::CONST_WIDE, 4u, 3000),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 6u, 0u),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 8u, 2u),
-      DEF_MOVE_WIDE(5, Instruction::MOVE_WIDE, 10u, 0u),
-      DEF_MOVE_WIDE(5, Instruction::MOVE_WIDE, 12u, 4u),
-      DEF_PHI2(6, 14u, 6u, 10u),    // Same as CONST_WIDE 0u (1000).
-      DEF_PHI2(6, 15u, 7u, 11u),    // Same as CONST_WIDE 0u (1000), high word.
-      DEF_PHI2(6, 16u, 6u,  0u),    // Same as CONST_WIDE 0u (1000).
-      DEF_PHI2(6, 17u, 7u,  1u),    // Same as CONST_WIDE 0u (1000), high word.
-      DEF_PHI2(6, 18u, 0u, 10u),    // Same as CONST_WIDE 0u (1000).
-      DEF_PHI2(6, 19u, 1u, 11u),    // Same as CONST_WIDE 0u (1000), high word.
-      DEF_PHI2(6, 20u, 8u, 10u),    // Merge 2u (2000) and 0u (1000).
-      DEF_PHI2(6, 21u, 9u, 11u),    // Merge 2u (2000) and 0u (1000), high word.
-      DEF_PHI2(6, 22u, 2u, 10u),    // Merge 2u (2000) and 0u (1000).
-      DEF_PHI2(6, 23u, 3u, 11u),    // Merge 2u (2000) and 0u (1000), high word.
-      DEF_PHI2(6, 24u, 8u,  0u),    // Merge 2u (2000) and 0u (1000).
-      DEF_PHI2(6, 25u, 9u,  1u),    // Merge 2u (2000) and 0u (1000), high word.
-      DEF_PHI2(6, 26u, 2u,  0u),    // Merge 2u (2000) and 0u (1000).
-      DEF_PHI2(6, 27u, 5u,  1u),    // Merge 2u (2000) and 0u (1000), high word.
-      DEF_PHI2(6, 28u, 6u, 12u),    // Merge 0u (1000) and 4u (3000).
-      DEF_PHI2(6, 29u, 7u, 13u),    // Merge 0u (1000) and 4u (3000), high word.
-      DEF_PHI2(6, 30u, 0u, 12u),    // Merge 0u (1000) and 4u (3000).
-      DEF_PHI2(6, 31u, 1u, 13u),    // Merge 0u (1000) and 4u (3000), high word.
-      DEF_PHI2(6, 32u, 6u,  4u),    // Merge 0u (1000) and 4u (3000).
-      DEF_PHI2(6, 33u, 7u,  5u),    // Merge 0u (1000) and 4u (3000), high word.
-      DEF_PHI2(6, 34u, 0u,  4u),    // Merge 0u (1000) and 4u (3000).
-      DEF_PHI2(6, 35u, 1u,  5u),    // Merge 0u (1000) and 4u (3000), high word.
-      DEF_PHI2(6, 36u, 8u, 12u),    // Merge 2u (2000) and 4u (3000).
-      DEF_PHI2(6, 37u, 9u, 13u),    // Merge 2u (2000) and 4u (3000), high word.
-      DEF_PHI2(6, 38u, 2u, 12u),    // Merge 2u (2000) and 4u (3000).
-      DEF_PHI2(6, 39u, 3u, 13u),    // Merge 2u (2000) and 4u (3000), high word.
-      DEF_PHI2(6, 40u, 8u,  4u),    // Merge 2u (2000) and 4u (3000).
-      DEF_PHI2(6, 41u, 9u,  5u),    // Merge 2u (2000) and 4u (3000), high word.
-      DEF_PHI2(6, 42u, 2u,  4u),    // Merge 2u (2000) and 4u (3000).
-      DEF_PHI2(6, 43u, 3u,  5u),    // Merge 2u (2000) and 4u (3000), high word.
-  };
-
-  PrepareMIRs(mirs);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    if ((mirs_[i].ssa_rep->defs[0] % 2) == 0) {
-      const int32_t wide_sregs[] = { mirs_[i].ssa_rep->defs[0] };
-      MarkAsWideSRegs(wide_sregs);
-    }
-  }
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[7]);
-  EXPECT_EQ(value_names_[0], value_names_[9]);
-  EXPECT_EQ(value_names_[0], value_names_[11]);
-  EXPECT_NE(value_names_[13], value_names_[0]);
-  EXPECT_NE(value_names_[13], value_names_[1]);
-  EXPECT_NE(value_names_[13], value_names_[2]);
-  EXPECT_EQ(value_names_[13], value_names_[15]);
-  EXPECT_EQ(value_names_[13], value_names_[17]);
-  EXPECT_EQ(value_names_[13], value_names_[19]);
-  EXPECT_NE(value_names_[21], value_names_[0]);
-  EXPECT_NE(value_names_[21], value_names_[1]);
-  EXPECT_NE(value_names_[21], value_names_[2]);
-  EXPECT_NE(value_names_[21], value_names_[13]);
-  EXPECT_EQ(value_names_[21], value_names_[23]);
-  EXPECT_EQ(value_names_[21], value_names_[25]);
-  EXPECT_EQ(value_names_[21], value_names_[27]);
-  EXPECT_NE(value_names_[29], value_names_[0]);
-  EXPECT_NE(value_names_[29], value_names_[1]);
-  EXPECT_NE(value_names_[29], value_names_[2]);
-  EXPECT_NE(value_names_[29], value_names_[13]);
-  EXPECT_NE(value_names_[29], value_names_[21]);
-  EXPECT_EQ(value_names_[29], value_names_[31]);
-  EXPECT_EQ(value_names_[29], value_names_[33]);
-  EXPECT_EQ(value_names_[29], value_names_[35]);
-  // High words should get kNoValue.
-  EXPECT_EQ(value_names_[8], kNoValue);
-  EXPECT_EQ(value_names_[10], kNoValue);
-  EXPECT_EQ(value_names_[12], kNoValue);
-  EXPECT_EQ(value_names_[14], kNoValue);
-  EXPECT_EQ(value_names_[16], kNoValue);
-  EXPECT_EQ(value_names_[18], kNoValue);
-  EXPECT_EQ(value_names_[20], kNoValue);
-  EXPECT_EQ(value_names_[22], kNoValue);
-  EXPECT_EQ(value_names_[24], kNoValue);
-  EXPECT_EQ(value_names_[26], kNoValue);
-  EXPECT_EQ(value_names_[28], kNoValue);
-  EXPECT_EQ(value_names_[30], kNoValue);
-  EXPECT_EQ(value_names_[32], kNoValue);
-  EXPECT_EQ(value_names_[34], kNoValue);
-  EXPECT_EQ(value_names_[36], kNoValue);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, NonAliasingIFields) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessWord },
-      { 5u, 1u, 5u, false, kDexMemAccessShort },
-      { 6u, 1u, 6u, false, kDexMemAccessChar },
-      { 7u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-      { 8u, 1u, 8u, false, kDexMemAccessWord },
-      { 9u, 0u, 0u, false, kDexMemAccessWord },    // Unresolved.
-      { 10u, 1u, 10u, false, kDexMemAccessWord },
-      { 11u, 1u, 11u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 100u),
-      DEF_IGET(3, Instruction::IGET, 1u, 100u, 0u),
-      DEF_IGET(4, Instruction::IGET, 2u, 100u, 0u),   // Same as at the top.
-      DEF_IGET(5, Instruction::IGET, 3u, 100u, 0u),   // Same as at the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 200u),
-      DEF_IGET(3, Instruction::IGET, 5u, 200u, 1u),
-      DEF_IGET(4, Instruction::IGET, 6u, 200u, 1u),   // Differs from top...
-      DEF_IPUT(4, Instruction::IPUT, 7u, 200u, 1u),   // Because of this IPUT.
-      DEF_IGET(5, Instruction::IGET, 8u, 200u, 1u),   // Differs from top and the loop IGET.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 300u),
-      DEF_IGET(3, Instruction::IGET, 10u, 300u, 2u),
-      DEF_IPUT(4, Instruction::IPUT, 11u, 300u, 2u),  // Because of this IPUT...
-      DEF_IGET(4, Instruction::IGET, 12u, 300u, 2u),  // Differs from top.
-      DEF_IGET(5, Instruction::IGET, 13u, 300u, 2u),  // Differs from top but same as the loop IGET.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 400u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 401u),
-      DEF_CONST(3, Instruction::CONST, 16u, 3000),
-      DEF_IPUT(3, Instruction::IPUT, 16u, 400u, 3u),
-      DEF_IPUT(3, Instruction::IPUT, 16u, 400u, 4u),
-      DEF_IPUT(3, Instruction::IPUT, 16u, 401u, 3u),
-      DEF_IGET(4, Instruction::IGET, 20u, 400u, 3u),  // Differs from 16u and 23u.
-      DEF_IGET(4, Instruction::IGET, 21u, 400u, 4u),  // Same as 20u.
-      DEF_IGET(4, Instruction::IGET, 22u, 401u, 3u),  // Same as 20u.
-      DEF_CONST(4, Instruction::CONST, 23u, 4000),
-      DEF_IPUT(4, Instruction::IPUT, 23u, 400u, 3u),
-      DEF_IPUT(4, Instruction::IPUT, 23u, 400u, 4u),
-      DEF_IPUT(4, Instruction::IPUT, 23u, 401u, 3u),
-      DEF_IGET(5, Instruction::IGET, 27u, 400u, 3u),  // Differs from 16u and 20u...
-      DEF_IGET(5, Instruction::IGET, 28u, 400u, 4u),  // and same as the CONST 23u
-      DEF_IGET(5, Instruction::IGET, 29u, 400u, 4u),  // and same as the CONST 23u.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 500u),
-      DEF_IGET(3, Instruction::IGET_SHORT, 31u, 500u, 5u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 32u, 500u, 6u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 33u, 500u, 7u),  // Clobbers field #5, not #6.
-      DEF_IGET(5, Instruction::IGET_SHORT, 34u, 500u, 5u),  // Differs from the top.
-      DEF_IGET(5, Instruction::IGET_CHAR, 35u, 500u, 6u),   // Same as the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 600u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 601u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 602u),
-      DEF_IGET(3, Instruction::IGET, 39u, 600u, 8u),
-      DEF_IGET(3, Instruction::IGET, 40u, 601u, 8u),
-      DEF_IPUT(4, Instruction::IPUT, 41u, 602u, 9u),  // Doesn't clobber field #8 for other refs.
-      DEF_IGET(5, Instruction::IGET, 42u, 600u, 8u),  // Same as the top.
-      DEF_IGET(5, Instruction::IGET, 43u, 601u, 8u),  // Same as the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 700u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 701u),
-      DEF_CONST(3, Instruction::CONST, 46u, 3000),
-      DEF_IPUT(3, Instruction::IPUT, 46u, 700u, 10u),
-      DEF_IPUT(3, Instruction::IPUT, 46u, 700u, 11u),
-      DEF_IPUT(3, Instruction::IPUT, 46u, 701u, 10u),
-      DEF_IGET(4, Instruction::IGET, 50u, 700u, 10u),  // Differs from the CONSTs 46u and 53u.
-      DEF_IGET(4, Instruction::IGET, 51u, 700u, 11u),  // Same as 50u.
-      DEF_IGET(4, Instruction::IGET, 52u, 701u, 10u),  // Same as 50u.
-      DEF_CONST(4, Instruction::CONST, 53u, 3001),
-      DEF_IPUT(4, Instruction::IPUT, 53u, 700u, 10u),
-      DEF_IPUT(4, Instruction::IPUT, 53u, 700u, 11u),
-      DEF_IPUT(4, Instruction::IPUT, 53u, 701u, 10u),
-      DEF_IGET(5, Instruction::IGET, 57u, 700u, 10u),  // Same as the CONST 53u.
-      DEF_IGET(5, Instruction::IGET, 58u, 700u, 11u),  // Same as the CONST 53u.
-      DEF_IGET(5, Instruction::IGET, 59u, 701u, 10u),  // Same as the CONST 53u.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[1], value_names_[2]);
-  EXPECT_EQ(value_names_[1], value_names_[3]);
-
-  EXPECT_NE(value_names_[5], value_names_[6]);
-  EXPECT_NE(value_names_[5], value_names_[7]);
-  EXPECT_NE(value_names_[6], value_names_[7]);
-
-  EXPECT_NE(value_names_[10], value_names_[12]);
-  EXPECT_EQ(value_names_[12], value_names_[13]);
-
-  EXPECT_NE(value_names_[20], value_names_[16]);
-  EXPECT_NE(value_names_[20], value_names_[23]);
-  EXPECT_EQ(value_names_[20], value_names_[21]);
-  EXPECT_EQ(value_names_[20], value_names_[22]);
-  EXPECT_NE(value_names_[27], value_names_[16]);
-  EXPECT_NE(value_names_[27], value_names_[20]);
-  EXPECT_EQ(value_names_[27], value_names_[28]);
-  EXPECT_EQ(value_names_[27], value_names_[29]);
-
-  EXPECT_NE(value_names_[31], value_names_[34]);
-  EXPECT_EQ(value_names_[32], value_names_[35]);
-
-  EXPECT_EQ(value_names_[39], value_names_[42]);
-  EXPECT_EQ(value_names_[40], value_names_[43]);
-
-  EXPECT_NE(value_names_[50], value_names_[46]);
-  EXPECT_NE(value_names_[50], value_names_[53]);
-  EXPECT_EQ(value_names_[50], value_names_[51]);
-  EXPECT_EQ(value_names_[50], value_names_[52]);
-  EXPECT_EQ(value_names_[57], value_names_[53]);
-  EXPECT_EQ(value_names_[58], value_names_[53]);
-  EXPECT_EQ(value_names_[59], value_names_[53]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, AliasingIFieldsSingleObject) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessWord },
-      { 5u, 1u, 5u, false, kDexMemAccessShort },
-      { 6u, 1u, 6u, false, kDexMemAccessChar },
-      { 7u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_IGET(3, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IGET(4, Instruction::IGET, 1u, 100u, 0u),   // Same as at the top.
-      DEF_IGET(5, Instruction::IGET, 2u, 100u, 0u),   // Same as at the top.
-
-      DEF_IGET(3, Instruction::IGET, 3u, 100u, 1u),
-      DEF_IGET(4, Instruction::IGET, 4u, 100u, 1u),   // Differs from top...
-      DEF_IPUT(4, Instruction::IPUT, 5u, 100u, 1u),   // Because of this IPUT.
-      DEF_IGET(5, Instruction::IGET, 6u, 100u, 1u),   // Differs from top and the loop IGET.
-
-      DEF_IGET(3, Instruction::IGET, 7u, 100u, 2u),
-      DEF_IPUT(4, Instruction::IPUT, 8u, 100u, 2u),   // Because of this IPUT...
-      DEF_IGET(4, Instruction::IGET, 9u, 100u, 2u),   // Differs from top.
-      DEF_IGET(5, Instruction::IGET, 10u, 100u, 2u),  // Differs from top but same as the loop IGET.
-
-      DEF_CONST(3, Instruction::CONST, 11u, 3000),
-      DEF_IPUT(3, Instruction::IPUT, 11u, 100u, 3u),
-      DEF_IPUT(3, Instruction::IPUT, 11u, 100u, 4u),
-      DEF_IGET(4, Instruction::IGET, 14u, 100u, 3u),  // Differs from 11u and 16u.
-      DEF_IGET(4, Instruction::IGET, 15u, 100u, 4u),  // Same as 14u.
-      DEF_CONST(4, Instruction::CONST, 16u, 4000),
-      DEF_IPUT(4, Instruction::IPUT, 16u, 100u, 3u),
-      DEF_IPUT(4, Instruction::IPUT, 16u, 100u, 4u),
-      DEF_IGET(5, Instruction::IGET, 19u, 100u, 3u),  // Differs from 11u and 14u...
-      DEF_IGET(5, Instruction::IGET, 20u, 100u, 4u),  // and same as the CONST 16u.
-
-      DEF_IGET(3, Instruction::IGET_SHORT, 21u, 100u, 5u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 22u, 100u, 6u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 23u, 100u, 7u),  // Clobbers field #5, not #6.
-      DEF_IGET(5, Instruction::IGET_SHORT, 24u, 100u, 5u),  // Differs from the top.
-      DEF_IGET(5, Instruction::IGET_CHAR, 25u, 100u, 6u),   // Same as the top.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  EXPECT_NE(value_names_[3], value_names_[4]);
-  EXPECT_NE(value_names_[3], value_names_[6]);
-  EXPECT_NE(value_names_[4], value_names_[6]);
-
-  EXPECT_NE(value_names_[7], value_names_[9]);
-  EXPECT_EQ(value_names_[9], value_names_[10]);
-
-  EXPECT_NE(value_names_[14], value_names_[11]);
-  EXPECT_NE(value_names_[14], value_names_[16]);
-  EXPECT_EQ(value_names_[14], value_names_[15]);
-  EXPECT_NE(value_names_[19], value_names_[11]);
-  EXPECT_NE(value_names_[19], value_names_[14]);
-  EXPECT_EQ(value_names_[19], value_names_[16]);
-  EXPECT_EQ(value_names_[19], value_names_[20]);
-
-  EXPECT_NE(value_names_[21], value_names_[24]);
-  EXPECT_EQ(value_names_[22], value_names_[25]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, AliasingIFieldsTwoObjects) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessShort },
-      { 4u, 1u, 4u, false, kDexMemAccessChar },
-      { 5u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-      { 6u, 1u, 6u, false, kDexMemAccessWord },
-      { 7u, 1u, 7u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_IGET(3, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IPUT(4, Instruction::IPUT, 1u, 101u, 0u),   // May alias with the IGET at the top.
-      DEF_IGET(5, Instruction::IGET, 2u, 100u, 0u),   // Differs from the top.
-
-      DEF_IGET(3, Instruction::IGET, 3u, 100u, 1u),
-      DEF_IPUT(4, Instruction::IPUT, 3u, 101u, 1u),   // If aliasing, stores the same value.
-      DEF_IGET(5, Instruction::IGET, 5u, 100u, 1u),   // Same as the top.
-
-      DEF_IGET(3, Instruction::IGET, 6u, 100u, 2u),
-      DEF_CONST(4, Instruction::CONST, 7u, 1000),
-      DEF_IPUT(4, Instruction::IPUT, 7u, 101u, 2u),
-      DEF_IGET(5, Instruction::IGET, 9u, 100u, 2u),   // Differs from the top and the CONST.
-
-      DEF_IGET(3, Instruction::IGET_SHORT, 10u, 100u, 3u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 11u, 100u, 4u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 12u, 101u, 5u),  // Clobbers field #3, not #4.
-      DEF_IGET(5, Instruction::IGET_SHORT, 13u, 100u, 3u),  // Differs from the top.
-      DEF_IGET(5, Instruction::IGET_CHAR, 14u, 100u, 4u),   // Same as the top.
-
-      DEF_CONST(3, Instruction::CONST, 15u, 3000),
-      DEF_IPUT(3, Instruction::IPUT, 15u, 100u, 6u),
-      DEF_IPUT(3, Instruction::IPUT, 15u, 100u, 7u),
-      DEF_IPUT(3, Instruction::IPUT, 15u, 101u, 6u),
-      DEF_IGET(4, Instruction::IGET, 19u, 100u, 6u),  // Differs from CONSTs 15u and 22u.
-      DEF_IGET(4, Instruction::IGET, 20u, 100u, 7u),  // Same value as 19u.
-      DEF_IGET(4, Instruction::IGET, 21u, 101u, 6u),  // Same value as read from field #7.
-      DEF_CONST(4, Instruction::CONST, 22u, 3001),
-      DEF_IPUT(4, Instruction::IPUT, 22u, 100u, 6u),
-      DEF_IPUT(4, Instruction::IPUT, 22u, 100u, 7u),
-      DEF_IPUT(4, Instruction::IPUT, 22u, 101u, 6u),
-      DEF_IGET(5, Instruction::IGET, 26u, 100u, 6u),  // Same as CONST 22u.
-      DEF_IGET(5, Instruction::IGET, 27u, 100u, 7u),  // Same as CONST 22u.
-      DEF_IGET(5, Instruction::IGET, 28u, 101u, 6u),  // Same as CONST 22u.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[2]);
-
-  EXPECT_EQ(value_names_[3], value_names_[5]);
-
-  EXPECT_NE(value_names_[6], value_names_[9]);
-  EXPECT_NE(value_names_[7], value_names_[9]);
-
-  EXPECT_NE(value_names_[10], value_names_[13]);
-  EXPECT_EQ(value_names_[11], value_names_[14]);
-
-  EXPECT_NE(value_names_[19], value_names_[15]);
-  EXPECT_NE(value_names_[19], value_names_[22]);
-  EXPECT_EQ(value_names_[22], value_names_[26]);
-  EXPECT_EQ(value_names_[22], value_names_[27]);
-  EXPECT_EQ(value_names_[22], value_names_[28]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, IFieldToBaseDependency) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // For the IGET that loads sreg 3u using base 2u, the following IPUT creates a dependency
-      // from the field value to the base. However, this dependency does not result in an
-      // infinite loop since the merge of the field value for base 0u gets assigned a value name
-      // based only on the base 0u, not on the actual value, and breaks the dependency cycle.
-      DEF_IGET(3, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_IGET(4, Instruction::IGET, 2u, 0u, 0u),
-      DEF_IGET(4, Instruction::IGET, 3u, 2u, 0u),
-      DEF_IPUT(4, Instruction::IPUT, 3u, 0u, 0u),
-      DEF_IGET(5, Instruction::IGET, 5u, 0u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[1], value_names_[2]);
-  EXPECT_EQ(value_names_[3], value_names_[5]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, SFields) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_SGET(3, Instruction::SGET, 0u, 0u),
-      DEF_SGET(4, Instruction::SGET, 1u, 0u),         // Same as at the top.
-      DEF_SGET(5, Instruction::SGET, 2u, 0u),         // Same as at the top.
-
-      DEF_SGET(3, Instruction::SGET, 3u, 1u),
-      DEF_SGET(4, Instruction::SGET, 4u, 1u),         // Differs from top...
-      DEF_SPUT(4, Instruction::SPUT, 5u, 1u),         // Because of this SPUT.
-      DEF_SGET(5, Instruction::SGET, 6u, 1u),         // Differs from top and the loop SGET.
-
-      DEF_SGET(3, Instruction::SGET, 7u, 2u),
-      DEF_SPUT(4, Instruction::SPUT, 8u, 2u),         // Because of this SPUT...
-      DEF_SGET(4, Instruction::SGET, 9u, 2u),         // Differs from top.
-      DEF_SGET(5, Instruction::SGET, 10u, 2u),        // Differs from top but same as the loop SGET.
-  };
-
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  EXPECT_NE(value_names_[3], value_names_[4]);
-  EXPECT_NE(value_names_[3], value_names_[6]);
-  EXPECT_NE(value_names_[4], value_names_[5]);
-
-  EXPECT_NE(value_names_[7], value_names_[9]);
-  EXPECT_EQ(value_names_[9], value_names_[10]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, NonAliasingArrays) {
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 100u),
-      DEF_AGET(3, Instruction::AGET, 1u, 100u, 101u),
-      DEF_AGET(4, Instruction::AGET, 2u, 100u, 101u),   // Same as at the top.
-      DEF_AGET(5, Instruction::AGET, 3u, 100u, 101u),   // Same as at the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 200u),
-      DEF_AGET(3, Instruction::AGET, 5u, 200u, 201u),
-      DEF_AGET(4, Instruction::AGET, 6u, 200u, 201u),  // Differs from top...
-      DEF_APUT(4, Instruction::APUT, 7u, 200u, 201u),  // Because of this IPUT.
-      DEF_AGET(5, Instruction::AGET, 8u, 200u, 201u),  // Differs from top and the loop AGET.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 300u),
-      DEF_AGET(3, Instruction::AGET, 10u, 300u, 301u),
-      DEF_APUT(4, Instruction::APUT, 11u, 300u, 301u),  // Because of this IPUT...
-      DEF_AGET(4, Instruction::AGET, 12u, 300u, 301u),  // Differs from top.
-      DEF_AGET(5, Instruction::AGET, 13u, 300u, 301u),  // Differs from top but == the loop AGET.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 400u),
-      DEF_CONST(3, Instruction::CONST, 15u, 3000),
-      DEF_APUT(3, Instruction::APUT, 15u, 400u, 401u),
-      DEF_APUT(3, Instruction::APUT, 15u, 400u, 402u),
-      DEF_AGET(4, Instruction::AGET, 18u, 400u, 401u),  // Differs from 15u and 20u.
-      DEF_AGET(4, Instruction::AGET, 19u, 400u, 402u),  // Same as 18u.
-      DEF_CONST(4, Instruction::CONST, 20u, 4000),
-      DEF_APUT(4, Instruction::APUT, 20u, 400u, 401u),
-      DEF_APUT(4, Instruction::APUT, 20u, 400u, 402u),
-      DEF_AGET(5, Instruction::AGET, 23u, 400u, 401u),  // Differs from 15u and 18u...
-      DEF_AGET(5, Instruction::AGET, 24u, 400u, 402u),  // and same as the CONST 20u.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 500u),
-      DEF_AGET(3, Instruction::AGET, 26u, 500u, 501u),
-      DEF_APUT(4, Instruction::APUT, 27u, 500u, 502u),  // Clobbers element at index 501u.
-      DEF_AGET(5, Instruction::AGET, 28u, 500u, 501u),  // Differs from the top.
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[1], value_names_[2]);
-  EXPECT_EQ(value_names_[1], value_names_[3]);
-
-  EXPECT_NE(value_names_[5], value_names_[6]);
-  EXPECT_NE(value_names_[5], value_names_[8]);
-  EXPECT_NE(value_names_[6], value_names_[8]);
-
-  EXPECT_NE(value_names_[10], value_names_[12]);
-  EXPECT_EQ(value_names_[12], value_names_[13]);
-
-  EXPECT_NE(value_names_[18], value_names_[15]);
-  EXPECT_NE(value_names_[18], value_names_[20]);
-  EXPECT_EQ(value_names_[18], value_names_[19]);
-  EXPECT_NE(value_names_[23], value_names_[15]);
-  EXPECT_NE(value_names_[23], value_names_[18]);
-  EXPECT_EQ(value_names_[23], value_names_[20]);
-  EXPECT_EQ(value_names_[23], value_names_[24]);
-
-  EXPECT_NE(value_names_[26], value_names_[28]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, AliasingArrays) {
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_AGET(3, Instruction::AGET_WIDE, 0u, 100u, 101u),
-      DEF_AGET(4, Instruction::AGET_WIDE, 2u, 100u, 101u),   // Same as at the top.
-      DEF_AGET(5, Instruction::AGET_WIDE, 4u, 100u, 101u),   // Same as at the top.
-
-      DEF_AGET(3, Instruction::AGET_BYTE, 6u, 200u, 201u),
-      DEF_AGET(4, Instruction::AGET_BYTE, 7u, 200u, 201u),  // Differs from top...
-      DEF_APUT(4, Instruction::APUT_BYTE, 8u, 200u, 201u),  // Because of this IPUT.
-      DEF_AGET(5, Instruction::AGET_BYTE, 9u, 200u, 201u),  // Differs from top and the loop AGET.
-
-      DEF_AGET(3, Instruction::AGET, 10u, 300u, 301u),
-      DEF_APUT(4, Instruction::APUT, 11u, 300u, 301u),  // Because of this IPUT...
-      DEF_AGET(4, Instruction::AGET, 12u, 300u, 301u),   // Differs from top.
-      DEF_AGET(5, Instruction::AGET, 13u, 300u, 301u),  // Differs from top but == the loop AGET.
-
-      DEF_CONST(3, Instruction::CONST, 14u, 3000),
-      DEF_APUT(3, Instruction::APUT_CHAR, 14u, 400u, 401u),
-      DEF_APUT(3, Instruction::APUT_CHAR, 14u, 400u, 402u),
-      DEF_AGET(4, Instruction::AGET_CHAR, 15u, 400u, 401u),  // Differs from 11u and 16u.
-      DEF_AGET(4, Instruction::AGET_CHAR, 16u, 400u, 402u),  // Same as 14u.
-      DEF_CONST(4, Instruction::CONST, 17u, 4000),
-      DEF_APUT(4, Instruction::APUT_CHAR, 17u, 400u, 401u),
-      DEF_APUT(4, Instruction::APUT_CHAR, 17u, 400u, 402u),
-      DEF_AGET(5, Instruction::AGET_CHAR, 19u, 400u, 401u),  // Differs from 11u and 14u...
-      DEF_AGET(5, Instruction::AGET_CHAR, 20u, 400u, 402u),  // and same as the CONST 16u.
-
-      DEF_AGET(3, Instruction::AGET_SHORT, 21u, 500u, 501u),
-      DEF_APUT(4, Instruction::APUT_SHORT, 22u, 500u, 502u),  // Clobbers element at index 501u.
-      DEF_AGET(5, Instruction::AGET_SHORT, 23u, 500u, 501u),  // Differs from the top.
-
-      DEF_AGET(3, Instruction::AGET_OBJECT, 24u, 600u, 601u),
-      DEF_APUT(4, Instruction::APUT_OBJECT, 25u, 601u, 602u),  // Clobbers 600u/601u.
-      DEF_AGET(5, Instruction::AGET_OBJECT, 26u, 600u, 601u),  // Differs from the top.
-
-      DEF_AGET(3, Instruction::AGET_BOOLEAN, 27u, 700u, 701u),
-      DEF_APUT(4, Instruction::APUT_BOOLEAN, 27u, 701u, 702u),  // Storing the same value.
-      DEF_AGET(5, Instruction::AGET_BOOLEAN, 29u, 700u, 701u),  // Differs from the top.
-  };
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 0, 2, 4 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  EXPECT_NE(value_names_[3], value_names_[4]);
-  EXPECT_NE(value_names_[3], value_names_[6]);
-  EXPECT_NE(value_names_[4], value_names_[6]);
-
-  EXPECT_NE(value_names_[7], value_names_[9]);
-  EXPECT_EQ(value_names_[9], value_names_[10]);
-
-  EXPECT_NE(value_names_[14], value_names_[11]);
-  EXPECT_NE(value_names_[14], value_names_[16]);
-  EXPECT_EQ(value_names_[14], value_names_[15]);
-  EXPECT_NE(value_names_[19], value_names_[11]);
-  EXPECT_NE(value_names_[19], value_names_[14]);
-  EXPECT_EQ(value_names_[19], value_names_[16]);
-  EXPECT_EQ(value_names_[19], value_names_[20]);
-
-  EXPECT_NE(value_names_[21], value_names_[23]);
-
-  EXPECT_NE(value_names_[24], value_names_[26]);
-
-  EXPECT_EQ(value_names_[27], value_names_[29]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, Phi) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000),
-      DEF_PHI2(4, 1u, 0u, 6u),                     // Merge CONST 0u (1000) with the same.
-      DEF_PHI2(4, 2u, 0u, 7u),                     // Merge CONST 0u (1000) with the Phi itself.
-      DEF_PHI2(4, 3u, 0u, 8u),                     // Merge CONST 0u (1000) and CONST 4u (2000).
-      DEF_PHI2(4, 4u, 0u, 9u),                     // Merge CONST 0u (1000) and Phi 3u.
-      DEF_CONST(4, Instruction::CONST, 5u, 2000),
-      DEF_MOVE(4, Instruction::MOVE, 6u, 0u),
-      DEF_MOVE(4, Instruction::MOVE, 7u, 2u),
-      DEF_MOVE(4, Instruction::MOVE, 8u, 5u),
-      DEF_MOVE(4, Instruction::MOVE, 9u, 3u),
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[1], value_names_[0]);
-  EXPECT_EQ(value_names_[2], value_names_[0]);
-
-  EXPECT_NE(value_names_[3], value_names_[0]);
-  EXPECT_NE(value_names_[3], value_names_[5]);
-  EXPECT_NE(value_names_[4], value_names_[0]);
-  EXPECT_NE(value_names_[4], value_names_[5]);
-  EXPECT_NE(value_names_[4], value_names_[3]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, IFieldLoopVariable) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 0),
-      DEF_IPUT(3, Instruction::IPUT, 0u, 100u, 0u),
-      DEF_IGET(4, Instruction::IGET, 2u, 100u, 0u),
-      DEF_BINOP(4, Instruction::ADD_INT, 3u, 2u, 101u),
-      DEF_IPUT(4, Instruction::IPUT, 3u, 100u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[2], value_names_[0]);
-  EXPECT_NE(value_names_[3], value_names_[0]);
-  EXPECT_NE(value_names_[3], value_names_[2]);
-
-
-  // Set up vreg_to_ssa_map_exit for prologue and loop and set post-processing mode
-  // as needed for GetStartingVregValueNumber().
-  const int32_t prologue_vreg_to_ssa_map_exit[] = { 0 };
-  const int32_t loop_vreg_to_ssa_map_exit[] = { 3 };
-  PrepareVregToSsaMapExit(3, prologue_vreg_to_ssa_map_exit);
-  PrepareVregToSsaMapExit(4, loop_vreg_to_ssa_map_exit);
-  gvn_->StartPostProcessing();
-
-  // Check that vreg 0 has the same value number as the result of IGET 2u.
-  const LocalValueNumbering* loop = gvn_->GetLvn(4);
-  EXPECT_EQ(value_names_[2], loop->GetStartingVregValueNumber(0));
-}
-
-TEST_F(GlobalValueNumberingTestCatch, IFields) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 200u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 201u),
-      DEF_IGET(3, Instruction::IGET, 2u, 100u, 0u),
-      DEF_IGET(3, Instruction::IGET, 3u, 200u, 0u),
-      DEF_IGET(3, Instruction::IGET, 4u, 201u, 0u),
-      DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 201u),     // Clobbering catch, 201u escapes.
-      DEF_IGET(4, Instruction::IGET, 6u, 100u, 0u),         // Differs from IGET 2u.
-      DEF_IPUT(4, Instruction::IPUT, 6u, 100u, 1u),
-      DEF_IPUT(4, Instruction::IPUT, 6u, 101u, 0u),
-      DEF_IPUT(4, Instruction::IPUT, 6u, 200u, 0u),
-      DEF_IGET(5, Instruction::IGET, 10u, 100u, 0u),        // Differs from IGETs 2u and 6u.
-      DEF_IGET(5, Instruction::IGET, 11u, 200u, 0u),        // Same as the top.
-      DEF_IGET(5, Instruction::IGET, 12u, 201u, 0u),        // Differs from the top, 201u escaped.
-      DEF_IPUT(5, Instruction::IPUT, 10u, 100u, 1u),
-      DEF_IPUT(5, Instruction::IPUT, 10u, 101u, 0u),
-      DEF_IPUT(5, Instruction::IPUT, 10u, 200u, 0u),
-      DEF_IGET(6, Instruction::IGET, 16u, 100u, 0u),        // Differs from IGETs 2u, 6u and 10u.
-      DEF_IGET(6, Instruction::IGET, 17u, 100u, 1u),        // Same as IGET 16u.
-      DEF_IGET(6, Instruction::IGET, 18u, 101u, 0u),        // Same as IGET 16u.
-      DEF_IGET(6, Instruction::IGET, 19u, 200u, 0u),        // Same as IGET 16u.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[2], value_names_[6]);
-  EXPECT_NE(value_names_[2], value_names_[10]);
-  EXPECT_NE(value_names_[6], value_names_[10]);
-  EXPECT_EQ(value_names_[3], value_names_[11]);
-  EXPECT_NE(value_names_[4], value_names_[12]);
-
-  EXPECT_NE(value_names_[2], value_names_[16]);
-  EXPECT_NE(value_names_[6], value_names_[16]);
-  EXPECT_NE(value_names_[10], value_names_[16]);
-  EXPECT_EQ(value_names_[16], value_names_[17]);
-  EXPECT_EQ(value_names_[16], value_names_[18]);
-  EXPECT_EQ(value_names_[16], value_names_[19]);
-}
-
-TEST_F(GlobalValueNumberingTestCatch, SFields) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(3, Instruction::SGET, 0u, 0u),
-      DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 100u),     // Clobbering catch.
-      DEF_SGET(4, Instruction::SGET, 2u, 0u),               // Differs from SGET 0u.
-      DEF_SPUT(4, Instruction::SPUT, 2u, 1u),
-      DEF_SGET(5, Instruction::SGET, 4u, 0u),               // Differs from SGETs 0u and 2u.
-      DEF_SPUT(5, Instruction::SPUT, 4u, 1u),
-      DEF_SGET(6, Instruction::SGET, 6u, 0u),               // Differs from SGETs 0u, 2u and 4u.
-      DEF_SGET(6, Instruction::SGET, 7u, 1u),               // Same as field #1.
-  };
-
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_NE(value_names_[0], value_names_[4]);
-  EXPECT_NE(value_names_[2], value_names_[4]);
-  EXPECT_NE(value_names_[0], value_names_[6]);
-  EXPECT_NE(value_names_[2], value_names_[6]);
-  EXPECT_NE(value_names_[4], value_names_[6]);
-  EXPECT_EQ(value_names_[6], value_names_[7]);
-}
-
-TEST_F(GlobalValueNumberingTestCatch, Arrays) {
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 200u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 201u),
-      DEF_AGET(3, Instruction::AGET, 2u, 100u, 101u),
-      DEF_AGET(3, Instruction::AGET, 3u, 200u, 202u),
-      DEF_AGET(3, Instruction::AGET, 4u, 200u, 203u),
-      DEF_AGET(3, Instruction::AGET, 5u, 201u, 202u),
-      DEF_AGET(3, Instruction::AGET, 6u, 201u, 203u),
-      DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 201u),     // Clobbering catch, 201u escapes.
-      DEF_AGET(4, Instruction::AGET, 8u, 100u, 101u),       // Differs from AGET 2u.
-      DEF_APUT(4, Instruction::APUT, 8u, 100u, 102u),
-      DEF_APUT(4, Instruction::APUT, 8u, 200u, 202u),
-      DEF_APUT(4, Instruction::APUT, 8u, 200u, 203u),
-      DEF_APUT(4, Instruction::APUT, 8u, 201u, 202u),
-      DEF_APUT(4, Instruction::APUT, 8u, 201u, 203u),
-      DEF_AGET(5, Instruction::AGET, 14u, 100u, 101u),      // Differs from AGETs 2u and 8u.
-      DEF_AGET(5, Instruction::AGET, 15u, 200u, 202u),      // Same as AGET 3u.
-      DEF_AGET(5, Instruction::AGET, 16u, 200u, 203u),      // Same as AGET 4u.
-      DEF_AGET(5, Instruction::AGET, 17u, 201u, 202u),      // Differs from AGET 5u.
-      DEF_AGET(5, Instruction::AGET, 18u, 201u, 203u),      // Differs from AGET 6u.
-      DEF_APUT(5, Instruction::APUT, 14u, 100u, 102u),
-      DEF_APUT(5, Instruction::APUT, 14u, 200u, 202u),
-      DEF_APUT(5, Instruction::APUT, 14u, 200u, 203u),
-      DEF_APUT(5, Instruction::APUT, 14u, 201u, 202u),
-      DEF_APUT(5, Instruction::APUT, 14u, 201u, 203u),
-      DEF_AGET(6, Instruction::AGET, 24u, 100u, 101u),      // Differs from AGETs 2u, 8u and 14u.
-      DEF_AGET(6, Instruction::AGET, 25u, 100u, 101u),      // Same as AGET 24u.
-      DEF_AGET(6, Instruction::AGET, 26u, 200u, 202u),      // Same as AGET 24u.
-      DEF_AGET(6, Instruction::AGET, 27u, 200u, 203u),      // Same as AGET 24u.
-      DEF_AGET(6, Instruction::AGET, 28u, 201u, 202u),      // Same as AGET 24u.
-      DEF_AGET(6, Instruction::AGET, 29u, 201u, 203u),      // Same as AGET 24u.
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[2], value_names_[8]);
-  EXPECT_NE(value_names_[2], value_names_[14]);
-  EXPECT_NE(value_names_[8], value_names_[14]);
-  EXPECT_EQ(value_names_[3], value_names_[15]);
-  EXPECT_EQ(value_names_[4], value_names_[16]);
-  EXPECT_NE(value_names_[5], value_names_[17]);
-  EXPECT_NE(value_names_[6], value_names_[18]);
-  EXPECT_NE(value_names_[2], value_names_[24]);
-  EXPECT_NE(value_names_[8], value_names_[24]);
-  EXPECT_NE(value_names_[14], value_names_[24]);
-  EXPECT_EQ(value_names_[24], value_names_[25]);
-  EXPECT_EQ(value_names_[24], value_names_[26]);
-  EXPECT_EQ(value_names_[24], value_names_[27]);
-  EXPECT_EQ(value_names_[24], value_names_[28]);
-  EXPECT_EQ(value_names_[24], value_names_[29]);
-}
-
-TEST_F(GlobalValueNumberingTestCatch, Phi) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000),
-      DEF_CONST(3, Instruction::CONST, 1u, 2000),
-      DEF_MOVE(3, Instruction::MOVE, 2u, 1u),
-      DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 100u),     // Clobbering catch.
-      DEF_CONST(5, Instruction::CONST, 4u, 1000),
-      DEF_CONST(5, Instruction::CONST, 5u, 3000),
-      DEF_MOVE(5, Instruction::MOVE, 6u, 5u),
-      DEF_PHI2(6, 7u, 0u, 4u),
-      DEF_PHI2(6, 8u, 0u, 5u),
-      DEF_PHI2(6, 9u, 0u, 6u),
-      DEF_PHI2(6, 10u, 1u, 4u),
-      DEF_PHI2(6, 11u, 1u, 5u),
-      DEF_PHI2(6, 12u, 1u, 6u),
-      DEF_PHI2(6, 13u, 2u, 4u),
-      DEF_PHI2(6, 14u, 2u, 5u),
-      DEF_PHI2(6, 15u, 2u, 6u),
-  };
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  ASSERT_EQ(value_names_[4], value_names_[0]);  // Both CONSTs are 1000.
-  EXPECT_EQ(value_names_[7], value_names_[0]);  // Merging CONST 0u and CONST 4u, both 1000.
-  EXPECT_NE(value_names_[8], value_names_[0]);
-  EXPECT_NE(value_names_[8], value_names_[5]);
-  EXPECT_EQ(value_names_[9], value_names_[8]);
-  EXPECT_NE(value_names_[10], value_names_[1]);
-  EXPECT_NE(value_names_[10], value_names_[4]);
-  EXPECT_NE(value_names_[10], value_names_[8]);
-  EXPECT_NE(value_names_[11], value_names_[1]);
-  EXPECT_NE(value_names_[11], value_names_[5]);
-  EXPECT_NE(value_names_[11], value_names_[8]);
-  EXPECT_NE(value_names_[11], value_names_[10]);
-  EXPECT_EQ(value_names_[12], value_names_[11]);
-  EXPECT_EQ(value_names_[13], value_names_[10]);
-  EXPECT_EQ(value_names_[14], value_names_[11]);
-  EXPECT_EQ(value_names_[15], value_names_[11]);
-}
-
-TEST_F(GlobalValueNumberingTest, NullCheckIFields) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },  // Object.
-      { 1u, 1u, 1u, false, kDexMemAccessObject },  // Object.
-  };
-  static const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // 4 is fall-through, 5 is taken.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET(3, Instruction::IGET_OBJECT, 0u, 100u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 100u, 1u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 101u, 0u),
-      DEF_IFZ(3, Instruction::IF_NEZ, 0u),            // Null-check for field #0 for taken.
-      DEF_UNIQUE_REF(4, Instruction::NEW_ARRAY, 4u),
-      DEF_IPUT(4, Instruction::IPUT_OBJECT, 4u, 100u, 0u),
-      DEF_IPUT(4, Instruction::IPUT_OBJECT, 4u, 100u, 1u),
-      DEF_IPUT(4, Instruction::IPUT_OBJECT, 4u, 101u, 0u),
-      DEF_IGET(5, Instruction::IGET_OBJECT, 8u, 100u, 0u),   // 100u/#0, IF_NEZ/NEW_ARRAY.
-      DEF_IGET(5, Instruction::IGET_OBJECT, 9u, 100u, 1u),   // 100u/#1, -/NEW_ARRAY.
-      DEF_IGET(5, Instruction::IGET_OBJECT, 10u, 101u, 0u),  // 101u/#0, -/NEW_ARRAY.
-      DEF_CONST(5, Instruction::CONST, 11u, 0),
-      DEF_AGET(5, Instruction::AGET, 12u, 8u, 11u),   // Null-check eliminated.
-      DEF_AGET(5, Instruction::AGET, 13u, 9u, 11u),   // Null-check kept.
-      DEF_AGET(5, Instruction::AGET, 14u, 10u, 11u),  // Null-check kept.
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, true, false, false,                      // BB #3; unimportant.
-      false, true, true, true,                        // BB #4; unimportant.
-      true, true, true, false, true, false, false,    // BB #5; only the last three are important.
-  };
-
-  PrepareIFields(ifields);
-  PrepareBasicBlocks(bbs);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTest, NullCheckSFields) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-      { 1u, 1u, 1u, false, kDexMemAccessObject },
-  };
-  static const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // 4 is fall-through, 5 is taken.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(3, Instruction::SGET_OBJECT, 0u, 0u),
-      DEF_SGET(3, Instruction::SGET_OBJECT, 1u, 1u),
-      DEF_IFZ(3, Instruction::IF_NEZ, 0u),            // Null-check for field #0 for taken.
-      DEF_UNIQUE_REF(4, Instruction::NEW_ARRAY, 3u),
-      DEF_SPUT(4, Instruction::SPUT_OBJECT, 3u, 0u),
-      DEF_SPUT(4, Instruction::SPUT_OBJECT, 3u, 1u),
-      DEF_SGET(5, Instruction::SGET_OBJECT, 6u, 0u),  // Field #0 is null-checked, IF_NEZ/NEW_ARRAY.
-      DEF_SGET(5, Instruction::SGET_OBJECT, 7u, 1u),  // Field #1 is not null-checked, -/NEW_ARRAY.
-      DEF_CONST(5, Instruction::CONST, 8u, 0),
-      DEF_AGET(5, Instruction::AGET, 9u, 6u, 8u),     // Null-check eliminated.
-      DEF_AGET(5, Instruction::AGET, 10u, 7u, 8u),    // Null-check kept.
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, false, false, false, false, false, false, false, false, true, false
-  };
-
-  PrepareSFields(sfields);
-  PrepareBasicBlocks(bbs);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTest, NullCheckArrays) {
-  static const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // 4 is fall-through, 5 is taken.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
-  };
-  static const MIRDef mirs[] = {
-      DEF_AGET(3, Instruction::AGET_OBJECT, 0u, 100u, 102u),
-      DEF_AGET(3, Instruction::AGET_OBJECT, 1u, 100u, 103u),
-      DEF_AGET(3, Instruction::AGET_OBJECT, 2u, 101u, 102u),
-      DEF_IFZ(3, Instruction::IF_NEZ, 0u),            // Null-check for field #0 for taken.
-      DEF_UNIQUE_REF(4, Instruction::NEW_ARRAY, 4u),
-      DEF_APUT(4, Instruction::APUT_OBJECT, 4u, 100u, 102u),
-      DEF_APUT(4, Instruction::APUT_OBJECT, 4u, 100u, 103u),
-      DEF_APUT(4, Instruction::APUT_OBJECT, 4u, 101u, 102u),
-      DEF_AGET(5, Instruction::AGET_OBJECT, 8u, 100u, 102u),   // Null-checked, IF_NEZ/NEW_ARRAY.
-      DEF_AGET(5, Instruction::AGET_OBJECT, 9u, 100u, 103u),   // Not null-checked, -/NEW_ARRAY.
-      DEF_AGET(5, Instruction::AGET_OBJECT, 10u, 101u, 102u),  // Not null-checked, -/NEW_ARRAY.
-      DEF_CONST(5, Instruction::CONST, 11u, 0),
-      DEF_AGET(5, Instruction::AGET, 12u, 8u, 11u),    // Null-check eliminated.
-      DEF_AGET(5, Instruction::AGET, 13u, 9u, 11u),    // Null-check kept.
-      DEF_AGET(5, Instruction::AGET, 14u, 10u, 11u),   // Null-check kept.
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, true, false, false,                      // BB #3; unimportant.
-      false, true, true, true,                        // BB #4; unimportant.
-      true, true, true, false, true, false, false,    // BB #5; only the last three are important.
-  };
-
-  PrepareBasicBlocks(bbs);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, RangeCheckArrays) {
-  // NOTE: We don't merge range checks when we merge value names for Phis or memory locations.
-  static const MIRDef mirs[] = {
-      DEF_AGET(4, Instruction::AGET, 0u, 100u, 101u),
-      DEF_AGET(5, Instruction::AGET, 1u, 100u, 101u),
-      DEF_APUT(6, Instruction::APUT, 2u, 100u, 101u),
-
-      DEF_AGET(4, Instruction::AGET, 3u, 200u, 201u),
-      DEF_AGET(5, Instruction::AGET, 4u, 200u, 202u),
-      DEF_APUT(6, Instruction::APUT, 5u, 200u, 201u),
-
-      DEF_AGET(4, Instruction::AGET, 6u, 300u, 302u),
-      DEF_AGET(5, Instruction::AGET, 7u, 301u, 302u),
-      DEF_APUT(6, Instruction::APUT, 8u, 300u, 302u),
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, false, true,
-      false, false, true,
-      false, false, false,
-  };
-  static const bool expected_ignore_range_check[] = {
-      false, false, true,
-      false, false, false,
-      false, false, false,
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  ASSERT_EQ(arraysize(expected_ignore_range_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-    EXPECT_EQ(expected_ignore_range_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, MergeSameValueInDifferentMemoryLocations) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 100u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 200u),
-      DEF_CONST(4, Instruction::CONST, 2u, 1000),
-      DEF_IPUT(4, Instruction::IPUT, 2u, 100u, 0u),
-      DEF_IPUT(4, Instruction::IPUT, 2u, 100u, 1u),
-      DEF_IPUT(4, Instruction::IPUT, 2u, 101u, 0u),
-      DEF_APUT(4, Instruction::APUT, 2u, 200u, 202u),
-      DEF_APUT(4, Instruction::APUT, 2u, 200u, 203u),
-      DEF_APUT(4, Instruction::APUT, 2u, 201u, 202u),
-      DEF_APUT(4, Instruction::APUT, 2u, 201u, 203u),
-      DEF_SPUT(4, Instruction::SPUT, 2u, 0u),
-      DEF_SPUT(4, Instruction::SPUT, 2u, 1u),
-      DEF_CONST(5, Instruction::CONST, 12u, 2000),
-      DEF_IPUT(5, Instruction::IPUT, 12u, 100u, 0u),
-      DEF_IPUT(5, Instruction::IPUT, 12u, 100u, 1u),
-      DEF_IPUT(5, Instruction::IPUT, 12u, 101u, 0u),
-      DEF_APUT(5, Instruction::APUT, 12u, 200u, 202u),
-      DEF_APUT(5, Instruction::APUT, 12u, 200u, 203u),
-      DEF_APUT(5, Instruction::APUT, 12u, 201u, 202u),
-      DEF_APUT(5, Instruction::APUT, 12u, 201u, 203u),
-      DEF_SPUT(5, Instruction::SPUT, 12u, 0u),
-      DEF_SPUT(5, Instruction::SPUT, 12u, 1u),
-      DEF_PHI2(6, 22u, 2u, 12u),
-      DEF_IGET(6, Instruction::IGET, 23u, 100u, 0u),
-      DEF_IGET(6, Instruction::IGET, 24u, 100u, 1u),
-      DEF_IGET(6, Instruction::IGET, 25u, 101u, 0u),
-      DEF_AGET(6, Instruction::AGET, 26u, 200u, 202u),
-      DEF_AGET(6, Instruction::AGET, 27u, 200u, 203u),
-      DEF_AGET(6, Instruction::AGET, 28u, 201u, 202u),
-      DEF_AGET(6, Instruction::AGET, 29u, 201u, 203u),
-      DEF_SGET(6, Instruction::SGET, 30u, 0u),
-      DEF_SGET(6, Instruction::SGET, 31u, 1u),
-  };
-  PrepareIFields(ifields);
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[2], value_names_[12]);
-  EXPECT_NE(value_names_[2], value_names_[22]);
-  EXPECT_NE(value_names_[12], value_names_[22]);
-  for (size_t i = 23; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(value_names_[22], value_names_[i]) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTest, InfiniteLocationLoop) {
-  // This is a pattern that lead to an infinite loop during the GVN development. This has been
-  // fixed by rewriting the merging of AliasingValues to merge only locations read from or
-  // written to in each incoming LVN rather than merging all locations read from or written to
-  // in any incoming LVN. It also showed up only when the GVN used the DFS ordering instead of
-  // the "topological" ordering but, since the "topological" ordering is not really topological
-  // when there are cycles and an optimizing Java compiler (or a tool like proguard) could
-  // theoretically create any sort of flow graph, this could have shown up in real code.
-  //
-  // While we were merging all the locations:
-  // The first time the Phi evaluates to the same value name as CONST 0u.  After the second
-  // evaluation, when the BB #9 has been processed, the Phi receives its own value name.
-  // However, the index from the first evaluation keeps disappearing and reappearing in the
-  // LVN's aliasing_array_value_map_'s load_value_map for BBs #9, #4, #5, #7 because of the
-  // DFS ordering of LVN evaluation.
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-  };
-  static const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(4)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 2), DEF_PRED2(3, 9)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 7), DEF_PRED1(4)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(9), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(8, 9), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(9), DEF_PRED1(7)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED3(6, 7, 8)),
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 0),
-      DEF_PHI2(4, 1u, 0u, 10u),
-      DEF_INVOKE1(6, Instruction::INVOKE_STATIC, 100u),
-      DEF_IGET(6, Instruction::IGET_OBJECT, 3u, 100u, 0u),
-      DEF_CONST(6, Instruction::CONST, 4u, 1000),
-      DEF_APUT(6, Instruction::APUT, 4u, 3u, 1u),            // Index is Phi 1u.
-      DEF_INVOKE1(8, Instruction::INVOKE_STATIC, 100u),
-      DEF_IGET(8, Instruction::IGET_OBJECT, 7u, 100u, 0u),
-      DEF_CONST(8, Instruction::CONST, 8u, 2000),
-      DEF_APUT(8, Instruction::APUT, 9u, 7u, 1u),            // Index is Phi 1u.
-      DEF_CONST(9, Instruction::CONST, 10u, 3000),
-  };
-  PrepareIFields(ifields);
-  PrepareBasicBlocks(bbs);
-  PrepareMIRs(mirs);
-  // Using DFS order for this test. The GVN result should not depend on the used ordering
-  // once the GVN actually converges. But creating a test for this convergence issue with
-  // the topological ordering could be a very challenging task.
-  PerformPreOrderDfsGVN();
-}
-
-TEST_F(GlobalValueNumberingTestTwoConsecutiveLoops, IFieldAndPhi) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-  };
-  static const MIRDef mirs[] = {
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 0u, 100u),
-      DEF_IPUT(3, Instruction::IPUT_OBJECT, 0u, 200u, 0u),
-      DEF_PHI2(4, 2u, 0u, 3u),
-      DEF_MOVE(5, Instruction::MOVE_OBJECT, 3u, 300u),
-      DEF_IPUT(5, Instruction::IPUT_OBJECT, 3u, 200u, 0u),
-      DEF_MOVE(6, Instruction::MOVE_OBJECT, 5u, 2u),
-      DEF_IGET(6, Instruction::IGET_OBJECT, 6u, 200u, 0u),
-      DEF_MOVE(7, Instruction::MOVE_OBJECT, 7u, 5u),
-      DEF_IGET(7, Instruction::IGET_OBJECT, 8u, 200u, 0u),
-      DEF_MOVE(8, Instruction::MOVE_OBJECT, 9u, 5u),
-      DEF_IGET(8, Instruction::IGET_OBJECT, 10u, 200u, 0u),
-      DEF_MOVE(9, Instruction::MOVE_OBJECT, 11u, 5u),
-      DEF_IGET(9, Instruction::IGET_OBJECT, 12u, 200u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[3]);
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_NE(value_names_[3], value_names_[2]);
-  EXPECT_EQ(value_names_[2], value_names_[5]);
-  EXPECT_EQ(value_names_[5], value_names_[6]);
-  EXPECT_EQ(value_names_[5], value_names_[7]);
-  EXPECT_EQ(value_names_[5], value_names_[8]);
-  EXPECT_EQ(value_names_[5], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[5], value_names_[11]);
-  EXPECT_EQ(value_names_[5], value_names_[12]);
-}
-
-TEST_F(GlobalValueNumberingTestTwoConsecutiveLoops, NullCheck) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-  };
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-  };
-  static const MIRDef mirs[] = {
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 0u, 100u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 200u, 0u),
-      DEF_SGET(3, Instruction::SGET_OBJECT, 2u, 0u),
-      DEF_AGET(3, Instruction::AGET_OBJECT, 3u, 300u, 201u),
-      DEF_PHI2(4, 4u, 0u, 8u),
-      DEF_IGET(5, Instruction::IGET_OBJECT, 5u, 200u, 0u),
-      DEF_SGET(5, Instruction::SGET_OBJECT, 6u, 0u),
-      DEF_AGET(5, Instruction::AGET_OBJECT, 7u, 300u, 201u),
-      DEF_MOVE(5, Instruction::MOVE_OBJECT, 8u, 400u),
-      DEF_IPUT(5, Instruction::IPUT_OBJECT, 4u, 200u, 0u),          // PUT the Phi 4u.
-      DEF_SPUT(5, Instruction::SPUT_OBJECT, 4u, 0u),                // PUT the Phi 4u.
-      DEF_APUT(5, Instruction::APUT_OBJECT, 4u, 300u, 201u),        // PUT the Phi 4u.
-      DEF_MOVE(6, Instruction::MOVE_OBJECT, 12u, 4u),
-      DEF_IGET(6, Instruction::IGET_OBJECT, 13u, 200u, 0u),
-      DEF_SGET(6, Instruction::SGET_OBJECT, 14u, 0u),
-      DEF_AGET(6, Instruction::AGET_OBJECT, 15u, 300u, 201u),
-      DEF_AGET(6, Instruction::AGET_OBJECT, 16u, 12u, 600u),
-      DEF_AGET(6, Instruction::AGET_OBJECT, 17u, 13u, 600u),
-      DEF_AGET(6, Instruction::AGET_OBJECT, 18u, 14u, 600u),
-      DEF_AGET(6, Instruction::AGET_OBJECT, 19u, 15u, 600u),
-      DEF_MOVE(8, Instruction::MOVE_OBJECT, 20u, 12u),
-      DEF_IGET(8, Instruction::IGET_OBJECT, 21u, 200u, 0u),
-      DEF_SGET(8, Instruction::SGET_OBJECT, 22u, 0u),
-      DEF_AGET(8, Instruction::AGET_OBJECT, 23u, 300u, 201u),
-      DEF_AGET(8, Instruction::AGET_OBJECT, 24u, 12u, 600u),
-      DEF_AGET(8, Instruction::AGET_OBJECT, 25u, 13u, 600u),
-      DEF_AGET(8, Instruction::AGET_OBJECT, 26u, 14u, 600u),
-      DEF_AGET(8, Instruction::AGET_OBJECT, 27u, 15u, 600u),
-      DEF_MOVE(9, Instruction::MOVE_OBJECT, 28u, 12u),
-      DEF_IGET(9, Instruction::IGET_OBJECT, 29u, 200u, 0u),
-      DEF_SGET(9, Instruction::SGET_OBJECT, 30u, 0u),
-      DEF_AGET(9, Instruction::AGET_OBJECT, 31u, 300u, 201u),
-      DEF_AGET(9, Instruction::AGET_OBJECT, 32u, 12u, 600u),
-      DEF_AGET(9, Instruction::AGET_OBJECT, 33u, 13u, 600u),
-      DEF_AGET(9, Instruction::AGET_OBJECT, 34u, 14u, 600u),
-      DEF_AGET(9, Instruction::AGET_OBJECT, 35u, 15u, 600u),
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, false, false, false,                                   // BB #3.
-      false, true, false, true, false, true, false, true,           // BBs #4 and #5.
-      false, true, false, true, false, false, false, false,         // BB #6.
-      false, true, false, true, true, true, true, true,             // BB #7.
-      false, true, false, true, true, true, true, true,             // BB #8.
-  };
-  static const bool expected_ignore_range_check[] = {
-      false, false, false, false,                                   // BB #3.
-      false, false, false, true, false, false, false, true,         // BBs #4 and #5.
-      false, false, false, true, false, false, false, false,        // BB #6.
-      false, false, false, true, true, true, true, true,            // BB #7.
-      false, false, false, true, true, true, true, true,            // BB #8.
-  };
-
-  PrepareIFields(ifields);
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[4]);
-  EXPECT_NE(value_names_[1], value_names_[5]);
-  EXPECT_NE(value_names_[2], value_names_[6]);
-  EXPECT_NE(value_names_[3], value_names_[7]);
-  EXPECT_NE(value_names_[4], value_names_[8]);
-  EXPECT_EQ(value_names_[4], value_names_[12]);
-  EXPECT_EQ(value_names_[5], value_names_[13]);
-  EXPECT_EQ(value_names_[6], value_names_[14]);
-  EXPECT_EQ(value_names_[7], value_names_[15]);
-  EXPECT_EQ(value_names_[12], value_names_[20]);
-  EXPECT_EQ(value_names_[13], value_names_[21]);
-  EXPECT_EQ(value_names_[14], value_names_[22]);
-  EXPECT_EQ(value_names_[15], value_names_[23]);
-  EXPECT_EQ(value_names_[12], value_names_[28]);
-  EXPECT_EQ(value_names_[13], value_names_[29]);
-  EXPECT_EQ(value_names_[14], value_names_[30]);
-  EXPECT_EQ(value_names_[15], value_names_[31]);
-  PerformGVNCodeModifications();
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-    EXPECT_EQ(expected_ignore_range_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTestTwoNestedLoops, IFieldAndPhi) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-  };
-  static const MIRDef mirs[] = {
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 0u, 100u),
-      DEF_IPUT(3, Instruction::IPUT_OBJECT, 0u, 200u, 0u),
-      DEF_PHI2(4, 2u, 0u, 11u),
-      DEF_MOVE(4, Instruction::MOVE_OBJECT, 3u, 2u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 4u, 200u, 0u),
-      DEF_MOVE(5, Instruction::MOVE_OBJECT, 5u, 3u),
-      DEF_IGET(5, Instruction::IGET_OBJECT, 6u, 200u, 0u),
-      DEF_MOVE(6, Instruction::MOVE_OBJECT, 7u, 3u),
-      DEF_IGET(6, Instruction::IGET_OBJECT, 8u, 200u, 0u),
-      DEF_MOVE(7, Instruction::MOVE_OBJECT, 9u, 3u),
-      DEF_IGET(7, Instruction::IGET_OBJECT, 10u, 200u, 0u),
-      DEF_MOVE(7, Instruction::MOVE_OBJECT, 11u, 300u),
-      DEF_IPUT(7, Instruction::IPUT_OBJECT, 11u, 200u, 0u),
-      DEF_MOVE(8, Instruction::MOVE_OBJECT, 13u, 3u),
-      DEF_IGET(8, Instruction::IGET_OBJECT, 14u, 200u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[11]);
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_NE(value_names_[11], value_names_[2]);
-  EXPECT_EQ(value_names_[2], value_names_[3]);
-  EXPECT_EQ(value_names_[3], value_names_[4]);
-  EXPECT_EQ(value_names_[3], value_names_[5]);
-  EXPECT_EQ(value_names_[3], value_names_[6]);
-  EXPECT_EQ(value_names_[3], value_names_[7]);
-  EXPECT_EQ(value_names_[3], value_names_[8]);
-  EXPECT_EQ(value_names_[3], value_names_[9]);
-  EXPECT_EQ(value_names_[3], value_names_[10]);
-  EXPECT_EQ(value_names_[3], value_names_[13]);
-  EXPECT_EQ(value_names_[3], value_names_[14]);
-}
-
-TEST_F(GlobalValueNumberingTest, NormalPathToCatchEntry) {
-  // When there's an empty catch block, all the exception paths lead to the next block in
-  // the normal path and we can also have normal "taken" or "fall-through" branches to that
-  // path. Check that LocalValueNumbering::PruneNonAliasingRefsForCatch() can handle it.
-  static const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
-  };
-  static const MIRDef mirs[] = {
-      DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 100u),
-  };
-  PrepareBasicBlocks(bbs);
-  BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(5u);
-  catch_handler->catch_entry = true;
-  // Add successor block info to the check block.
-  BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
-  check_bb->successor_block_list_type = kCatch;
-  SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
-      (cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessors));
-  successor_block_info->block = catch_handler->id;
-  check_bb->successor_blocks.push_back(successor_block_info);
-  BasicBlock* merge_block = cu_.mir_graph->GetBasicBlock(4u);
-  std::swap(merge_block->taken, merge_block->fall_through);
-  PrepareMIRs(mirs);
-  PerformGVN();
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, DivZeroCheckDiamond) {
-  static const MIRDef mirs[] = {
-      DEF_BINOP(3u, Instruction::DIV_INT, 1u, 20u, 21u),
-      DEF_BINOP(3u, Instruction::DIV_INT, 2u, 24u, 21u),
-      DEF_BINOP(3u, Instruction::DIV_INT, 3u, 20u, 23u),
-      DEF_BINOP(4u, Instruction::DIV_INT, 4u, 24u, 22u),
-      DEF_BINOP(4u, Instruction::DIV_INT, 9u, 24u, 25u),
-      DEF_BINOP(5u, Instruction::DIV_INT, 5u, 24u, 21u),
-      DEF_BINOP(5u, Instruction::DIV_INT, 10u, 24u, 26u),
-      DEF_PHI2(6u, 27u, 25u, 26u),
-      DEF_BINOP(6u, Instruction::DIV_INT, 12u, 20u, 27u),
-      DEF_BINOP(6u, Instruction::DIV_INT, 6u, 24u, 21u),
-      DEF_BINOP(6u, Instruction::DIV_INT, 7u, 20u, 23u),
-      DEF_BINOP(6u, Instruction::DIV_INT, 8u, 20u, 22u),
-  };
-
-  static const bool expected_ignore_div_zero_check[] = {
-      false,  // New divisor seen.
-      true,   // Eliminated since it has first divisor as first one.
-      false,  // New divisor seen.
-      false,  // New divisor seen.
-      false,  // New divisor seen.
-      true,   // Eliminated in dominating block.
-      false,  // New divisor seen.
-      false,  // Phi node.
-      true,   // Eliminated on both sides of diamond and merged via phi.
-      true,   // Eliminated in dominating block.
-      true,   // Eliminated in dominating block.
-      false,  // Only eliminated on one path of diamond.
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_div_zero_check), mir_count_);
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected = expected_ignore_div_zero_check[i] ? MIR_IGNORE_DIV_ZERO_CHECK : 0u;
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, CheckCastDiamond) {
-  static const MIRDef mirs[] = {
-      DEF_UNOP(3u, Instruction::INSTANCE_OF, 0u, 100u),
-      DEF_UNOP(3u, Instruction::INSTANCE_OF, 1u, 200u),
-      DEF_IFZ(3u, Instruction::IF_NEZ, 0u),
-      DEF_INVOKE1(4u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(5u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(5u, Instruction::CHECK_CAST, 200u),
-      DEF_INVOKE1(5u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(6u, Instruction::CHECK_CAST, 100u),
-  };
-
-  static const bool expected_ignore_check_cast[] = {
-      false,  // instance-of
-      false,  // instance-of
-      false,  // if-nez
-      false,  // Not eliminated, fall-through branch.
-      true,   // Eliminated.
-      false,  // Not eliminated, different value.
-      false,  // Not eliminated, different type.
-      false,  // Not eliminated, bottom block.
-  };
-
-  PrepareMIRs(mirs);
-  mirs_[0].dalvikInsn.vC = 1234;  // type for instance-of
-  mirs_[1].dalvikInsn.vC = 1234;  // type for instance-of
-  mirs_[3].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[4].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[5].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[6].dalvikInsn.vB = 4321;  // type for check-cast
-  mirs_[7].dalvikInsn.vB = 1234;  // type for check-cast
-  PerformGVN();
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_check_cast), mir_count_);
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected = expected_ignore_check_cast[i] ? MIR_IGNORE_CHECK_CAST : 0u;
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTest, CheckCastDominators) {
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(7)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // Block #3, top of the diamond.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(3)),     // Block #4, left side.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #5, right side.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(5)),     // Block #6, right side.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 6)),  // Block #7, bottom.
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNOP(3u, Instruction::INSTANCE_OF, 0u, 100u),
-      DEF_UNOP(3u, Instruction::INSTANCE_OF, 1u, 200u),
-      DEF_IFZ(3u, Instruction::IF_NEZ, 0u),
-      DEF_INVOKE1(4u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(6u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(6u, Instruction::CHECK_CAST, 200u),
-      DEF_INVOKE1(6u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(7u, Instruction::CHECK_CAST, 100u),
-  };
-
-  static const bool expected_ignore_check_cast[] = {
-      false,  // instance-of
-      false,  // instance-of
-      false,  // if-nez
-      false,  // Not eliminated, fall-through branch.
-      true,   // Eliminated.
-      false,  // Not eliminated, different value.
-      false,  // Not eliminated, different type.
-      false,  // Not eliminated, bottom block.
-  };
-
-  PrepareBasicBlocks(bbs);
-  PrepareMIRs(mirs);
-  mirs_[0].dalvikInsn.vC = 1234;  // type for instance-of
-  mirs_[1].dalvikInsn.vC = 1234;  // type for instance-of
-  mirs_[3].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[4].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[5].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[6].dalvikInsn.vB = 4321;  // type for check-cast
-  mirs_[7].dalvikInsn.vB = 1234;  // type for check-cast
-  PerformGVN();
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_check_cast), mir_count_);
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected = expected_ignore_check_cast[i] ? MIR_IGNORE_CHECK_CAST : 0u;
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
deleted file mode 100644
index 445859c..0000000
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ /dev/null
@@ -1,1473 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <sstream>
-
-#include "gvn_dead_code_elimination.h"
-
-#include "base/arena_bit_vector.h"
-#include "base/bit_vector-inl.h"
-#include "base/macros.h"
-#include "base/allocator.h"
-#include "compiler_enums.h"
-#include "dataflow_iterator-inl.h"
-#include "dex_instruction.h"
-#include "dex/mir_graph.h"
-#include "local_value_numbering.h"
-
-namespace art {
-
-constexpr uint16_t GvnDeadCodeElimination::kNoValue;
-constexpr uint16_t GvnDeadCodeElimination::kNPos;
-
-inline uint16_t GvnDeadCodeElimination::MIRData::PrevChange(int v_reg) const {
-  DCHECK(has_def);
-  DCHECK(v_reg == vreg_def || v_reg == vreg_def + 1);
-  return (v_reg == vreg_def) ? prev_value.change : prev_value_high.change;
-}
-
-inline void GvnDeadCodeElimination::MIRData::SetPrevChange(int v_reg, uint16_t change) {
-  DCHECK(has_def);
-  DCHECK(v_reg == vreg_def || v_reg == vreg_def + 1);
-  if (v_reg == vreg_def) {
-    prev_value.change = change;
-  } else {
-    prev_value_high.change = change;
-  }
-}
-
-inline void GvnDeadCodeElimination::MIRData::RemovePrevChange(int v_reg, MIRData* prev_data) {
-  DCHECK_NE(PrevChange(v_reg), kNPos);
-  DCHECK(v_reg == prev_data->vreg_def || v_reg == prev_data->vreg_def + 1);
-  if (vreg_def == v_reg) {
-    if (prev_data->vreg_def == v_reg) {
-      prev_value = prev_data->prev_value;
-      low_def_over_high_word = prev_data->low_def_over_high_word;
-    } else {
-      prev_value = prev_data->prev_value_high;
-      low_def_over_high_word = !prev_data->high_def_over_low_word;
-    }
-  } else {
-    if (prev_data->vreg_def == v_reg) {
-      prev_value_high = prev_data->prev_value;
-      high_def_over_low_word = !prev_data->low_def_over_high_word;
-    } else {
-      prev_value_high = prev_data->prev_value_high;
-      high_def_over_low_word = prev_data->high_def_over_low_word;
-    }
-  }
-}
-
-GvnDeadCodeElimination::VRegChains::VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc)
-    : num_vregs_(num_vregs),
-      vreg_data_(alloc->AllocArray<VRegValue>(num_vregs, kArenaAllocMisc)),
-      vreg_high_words_(false, Allocator::GetNoopAllocator(),
-                       BitVector::BitsToWords(num_vregs),
-                       alloc->AllocArray<uint32_t>(BitVector::BitsToWords(num_vregs))),
-      mir_data_(alloc->Adapter()) {
-  mir_data_.reserve(100);
-}
-
-inline void GvnDeadCodeElimination::VRegChains::Reset() {
-  DCHECK(mir_data_.empty());
-  std::fill_n(vreg_data_, num_vregs_, VRegValue());
-  vreg_high_words_.ClearAllBits();
-}
-
-void GvnDeadCodeElimination::VRegChains::AddMIRWithDef(MIR* mir, int v_reg, bool wide,
-                                                       uint16_t new_value) {
-  uint16_t pos = mir_data_.size();
-  mir_data_.emplace_back(mir);
-  MIRData* data = &mir_data_.back();
-  data->has_def = true;
-  data->wide_def = wide;
-  data->vreg_def = v_reg;
-
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  data->prev_value = vreg_data_[v_reg];
-  data->low_def_over_high_word =
-      (vreg_data_[v_reg].change != kNPos)
-      ? GetMIRData(vreg_data_[v_reg].change)->vreg_def + 1 == v_reg
-      : vreg_high_words_.IsBitSet(v_reg);
-  vreg_data_[v_reg].value = new_value;
-  vreg_data_[v_reg].change = pos;
-  vreg_high_words_.ClearBit(v_reg);
-
-  if (wide) {
-    DCHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
-    data->prev_value_high = vreg_data_[v_reg + 1];
-    data->high_def_over_low_word =
-        (vreg_data_[v_reg + 1].change != kNPos)
-        ? GetMIRData(vreg_data_[v_reg + 1].change)->vreg_def == v_reg + 1
-        : !vreg_high_words_.IsBitSet(v_reg + 1);
-    vreg_data_[v_reg + 1].value = new_value;
-    vreg_data_[v_reg + 1].change = pos;
-    vreg_high_words_.SetBit(v_reg + 1);
-  }
-}
-
-inline void GvnDeadCodeElimination::VRegChains::AddMIRWithoutDef(MIR* mir) {
-  mir_data_.emplace_back(mir);
-}
-
-void GvnDeadCodeElimination::VRegChains::RemoveLastMIRData() {
-  MIRData* data = LastMIRData();
-  if (data->has_def) {
-    DCHECK_EQ(vreg_data_[data->vreg_def].change, NumMIRs() - 1u);
-    vreg_data_[data->vreg_def] = data->prev_value;
-    DCHECK(!vreg_high_words_.IsBitSet(data->vreg_def));
-    if (data->low_def_over_high_word) {
-      vreg_high_words_.SetBit(data->vreg_def);
-    }
-    if (data->wide_def) {
-      DCHECK_EQ(vreg_data_[data->vreg_def + 1].change, NumMIRs() - 1u);
-      vreg_data_[data->vreg_def + 1] = data->prev_value_high;
-      DCHECK(vreg_high_words_.IsBitSet(data->vreg_def + 1));
-      if (data->high_def_over_low_word) {
-        vreg_high_words_.ClearBit(data->vreg_def + 1);
-      }
-    }
-  }
-  mir_data_.pop_back();
-}
-
-void GvnDeadCodeElimination::VRegChains::RemoveTrailingNops() {
-  // There's at least one NOP to drop. There may be more.
-  MIRData* last_data = LastMIRData();
-  DCHECK(!last_data->must_keep && !last_data->has_def);
-  do {
-    DCHECK_EQ(static_cast<int>(last_data->mir->dalvikInsn.opcode), static_cast<int>(kMirOpNop));
-    mir_data_.pop_back();
-    if (mir_data_.empty()) {
-      break;
-    }
-    last_data = LastMIRData();
-  } while (!last_data->must_keep && !last_data->has_def);
-}
-
-inline size_t GvnDeadCodeElimination::VRegChains::NumMIRs() const {
-  return mir_data_.size();
-}
-
-inline GvnDeadCodeElimination::MIRData* GvnDeadCodeElimination::VRegChains::GetMIRData(size_t pos) {
-  DCHECK_LT(pos, mir_data_.size());
-  return &mir_data_[pos];
-}
-
-inline GvnDeadCodeElimination::MIRData* GvnDeadCodeElimination::VRegChains::LastMIRData() {
-  DCHECK(!mir_data_.empty());
-  return &mir_data_.back();
-}
-
-uint32_t GvnDeadCodeElimination::VRegChains::NumVRegs() const {
-  return num_vregs_;
-}
-
-void GvnDeadCodeElimination::VRegChains::InsertInitialValueHigh(int v_reg, uint16_t value) {
-  DCHECK_NE(value, kNoValue);
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  uint16_t change = vreg_data_[v_reg].change;
-  if (change == kNPos) {
-    vreg_data_[v_reg].value = value;
-    vreg_high_words_.SetBit(v_reg);
-  } else {
-    while (true) {
-      MIRData* data = &mir_data_[change];
-      DCHECK(data->vreg_def == v_reg || data->vreg_def + 1 == v_reg);
-      if (data->vreg_def == v_reg) {  // Low word, use prev_value.
-        if (data->prev_value.change == kNPos) {
-          DCHECK_EQ(data->prev_value.value, kNoValue);
-          data->prev_value.value = value;
-          data->low_def_over_high_word = true;
-          break;
-        }
-        change = data->prev_value.change;
-      } else {  // High word, use prev_value_high.
-        if (data->prev_value_high.change == kNPos) {
-          DCHECK_EQ(data->prev_value_high.value, kNoValue);
-          data->prev_value_high.value = value;
-          break;
-        }
-        change = data->prev_value_high.change;
-      }
-    }
-  }
-}
-
-void GvnDeadCodeElimination::VRegChains::UpdateInitialVRegValue(int v_reg, bool wide,
-                                                                const LocalValueNumbering* lvn) {
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  if (!wide) {
-    if (vreg_data_[v_reg].value == kNoValue) {
-      uint16_t old_value = lvn->GetStartingVregValueNumber(v_reg);
-      if (old_value == kNoValue) {
-        // Maybe there was a wide value in v_reg before. Do not check for wide value in v_reg-1,
-        // that will be done only if we see a definition of v_reg-1, otherwise it's unnecessary.
-        old_value = lvn->GetStartingVregValueNumberWide(v_reg);
-        if (old_value != kNoValue) {
-          InsertInitialValueHigh(v_reg + 1, old_value);
-        }
-      }
-      vreg_data_[v_reg].value = old_value;
-      DCHECK(!vreg_high_words_.IsBitSet(v_reg));  // Keep marked as low word.
-    }
-  } else {
-    DCHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
-    bool check_high = true;
-    if (vreg_data_[v_reg].value == kNoValue) {
-      uint16_t old_value = lvn->GetStartingVregValueNumberWide(v_reg);
-      if (old_value != kNoValue) {
-        InsertInitialValueHigh(v_reg + 1, old_value);
-        check_high = false;  // High word has been processed.
-      } else {
-        // Maybe there was a narrow value before. Do not check for wide value in v_reg-1,
-        // that will be done only if we see a definition of v_reg-1, otherwise it's unnecessary.
-        old_value = lvn->GetStartingVregValueNumber(v_reg);
-      }
-      vreg_data_[v_reg].value = old_value;
-      DCHECK(!vreg_high_words_.IsBitSet(v_reg));  // Keep marked as low word.
-    }
-    if (check_high && vreg_data_[v_reg + 1].value == kNoValue) {
-      uint16_t old_value = lvn->GetStartingVregValueNumber(v_reg + 1);
-      if (old_value == kNoValue && static_cast<size_t>(v_reg + 2) < num_vregs_) {
-        // Maybe there was a wide value before.
-        old_value = lvn->GetStartingVregValueNumberWide(v_reg + 1);
-        if (old_value != kNoValue) {
-          InsertInitialValueHigh(v_reg + 2, old_value);
-        }
-      }
-      vreg_data_[v_reg + 1].value = old_value;
-      DCHECK(!vreg_high_words_.IsBitSet(v_reg + 1));  // Keep marked as low word.
-    }
-  }
-}
-
-inline uint16_t GvnDeadCodeElimination::VRegChains::LastChange(int v_reg) {
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  return vreg_data_[v_reg].change;
-}
-
-inline uint16_t GvnDeadCodeElimination::VRegChains::CurrentValue(int v_reg) {
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  return vreg_data_[v_reg].value;
-}
-
-uint16_t GvnDeadCodeElimination::VRegChains::FindKillHead(int v_reg, uint16_t cutoff) {
-  uint16_t current_value = this->CurrentValue(v_reg);
-  DCHECK_NE(current_value, kNoValue);
-  uint16_t change = LastChange(v_reg);
-  DCHECK_LT(change, mir_data_.size());
-  DCHECK_GE(change, cutoff);
-  bool match_high_word = (mir_data_[change].vreg_def != v_reg);
-  do {
-    MIRData* data = &mir_data_[change];
-    DCHECK(data->vreg_def == v_reg || data->vreg_def + 1 == v_reg);
-    if (data->vreg_def == v_reg) {  // Low word, use prev_value.
-      if (data->prev_value.value == current_value &&
-          match_high_word == data->low_def_over_high_word) {
-        break;
-      }
-      change = data->prev_value.change;
-    } else {  // High word, use prev_value_high.
-      if (data->prev_value_high.value == current_value &&
-          match_high_word != data->high_def_over_low_word) {
-        break;
-      }
-      change = data->prev_value_high.change;
-    }
-    if (change < cutoff) {
-      change = kNPos;
-    }
-  } while (change != kNPos);
-  return change;
-}
-
-uint16_t GvnDeadCodeElimination::VRegChains::FindFirstChangeAfter(int v_reg,
-                                                                  uint16_t change) const {
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  DCHECK_LT(change, mir_data_.size());
-  uint16_t result = kNPos;
-  uint16_t search_change = vreg_data_[v_reg].change;
-  while (search_change != kNPos && search_change > change) {
-    result = search_change;
-    search_change = mir_data_[search_change].PrevChange(v_reg);
-  }
-  return result;
-}
-
-void GvnDeadCodeElimination::VRegChains::ReplaceChange(uint16_t old_change, uint16_t new_change) {
-  const MIRData* old_data = GetMIRData(old_change);
-  DCHECK(old_data->has_def);
-  int count = old_data->wide_def ? 2 : 1;
-  for (int v_reg = old_data->vreg_def, end = old_data->vreg_def + count; v_reg != end; ++v_reg) {
-    uint16_t next_change = FindFirstChangeAfter(v_reg, old_change);
-    if (next_change == kNPos) {
-      DCHECK_EQ(vreg_data_[v_reg].change, old_change);
-      vreg_data_[v_reg].change = new_change;
-      DCHECK_EQ(vreg_high_words_.IsBitSet(v_reg), v_reg == old_data->vreg_def + 1);
-      // No change in vreg_high_words_.
-    } else {
-      DCHECK_EQ(mir_data_[next_change].PrevChange(v_reg), old_change);
-      mir_data_[next_change].SetPrevChange(v_reg, new_change);
-    }
-  }
-}
-
-void GvnDeadCodeElimination::VRegChains::RemoveChange(uint16_t change) {
-  MIRData* data = &mir_data_[change];
-  DCHECK(data->has_def);
-  int count = data->wide_def ? 2 : 1;
-  for (int v_reg = data->vreg_def, end = data->vreg_def + count; v_reg != end; ++v_reg) {
-    uint16_t next_change = FindFirstChangeAfter(v_reg, change);
-    if (next_change == kNPos) {
-      DCHECK_EQ(vreg_data_[v_reg].change, change);
-      vreg_data_[v_reg] = (data->vreg_def == v_reg) ? data->prev_value : data->prev_value_high;
-      DCHECK_EQ(vreg_high_words_.IsBitSet(v_reg), v_reg == data->vreg_def + 1);
-      if (data->vreg_def == v_reg && data->low_def_over_high_word) {
-        vreg_high_words_.SetBit(v_reg);
-      } else if (data->vreg_def != v_reg && data->high_def_over_low_word) {
-        vreg_high_words_.ClearBit(v_reg);
-      }
-    } else {
-      DCHECK_EQ(mir_data_[next_change].PrevChange(v_reg), change);
-      mir_data_[next_change].RemovePrevChange(v_reg, data);
-    }
-  }
-}
-
-inline bool GvnDeadCodeElimination::VRegChains::IsTopChange(uint16_t change) const {
-  DCHECK_LT(change, mir_data_.size());
-  const MIRData* data = &mir_data_[change];
-  DCHECK(data->has_def);
-  DCHECK_LT(data->wide_def ? data->vreg_def + 1u : data->vreg_def, num_vregs_);
-  return vreg_data_[data->vreg_def].change == change &&
-      (!data->wide_def || vreg_data_[data->vreg_def + 1u].change == change);
-}
-
-bool GvnDeadCodeElimination::VRegChains::IsSRegUsed(uint16_t first_change, uint16_t last_change,
-                                                    int s_reg) const {
-  DCHECK_LE(first_change, last_change);
-  DCHECK_LE(last_change, mir_data_.size());
-  for (size_t c = first_change; c != last_change; ++c) {
-    SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
-    for (int i = 0; i != ssa_rep->num_uses; ++i) {
-      if (ssa_rep->uses[i] == s_reg) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-bool GvnDeadCodeElimination::VRegChains::IsVRegUsed(uint16_t first_change, uint16_t last_change,
-                                                    int v_reg, MIRGraph* mir_graph) const {
-  DCHECK_LE(first_change, last_change);
-  DCHECK_LE(last_change, mir_data_.size());
-  for (size_t c = first_change; c != last_change; ++c) {
-    SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
-    for (int i = 0; i != ssa_rep->num_uses; ++i) {
-      if (mir_graph->SRegToVReg(ssa_rep->uses[i]) == v_reg) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-void GvnDeadCodeElimination::VRegChains::RenameSRegUses(uint16_t first_change, uint16_t last_change,
-                                                        int old_s_reg, int new_s_reg, bool wide) {
-  for (size_t c = first_change; c != last_change; ++c) {
-    SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
-    for (int i = 0; i != ssa_rep->num_uses; ++i) {
-      if (ssa_rep->uses[i] == old_s_reg) {
-        ssa_rep->uses[i] = new_s_reg;
-        if (wide) {
-          ++i;
-          DCHECK_LT(i, ssa_rep->num_uses);
-          ssa_rep->uses[i] = new_s_reg + 1;
-        }
-      }
-    }
-  }
-}
-
-void GvnDeadCodeElimination::VRegChains::RenameVRegUses(uint16_t first_change, uint16_t last_change,
-                                                    int old_s_reg, int old_v_reg,
-                                                    int new_s_reg, int new_v_reg) {
-  for (size_t c = first_change; c != last_change; ++c) {
-    MIR* mir = mir_data_[c].mir;
-    if (IsInstructionBinOp2Addr(mir->dalvikInsn.opcode) &&
-        mir->ssa_rep->uses[0] == old_s_reg && old_v_reg != new_v_reg) {
-      // Rewrite binop_2ADDR with plain binop before doing the register rename.
-      ChangeBinOp2AddrToPlainBinOp(mir);
-    }
-    uint64_t df_attr = MIRGraph::GetDataFlowAttributes(mir);
-    size_t use = 0u;
-#define REPLACE_VREG(REG) \
-    if ((df_attr & DF_U##REG) != 0) {                                         \
-      if (mir->ssa_rep->uses[use] == old_s_reg) {                             \
-        DCHECK_EQ(mir->dalvikInsn.v##REG, static_cast<uint32_t>(old_v_reg));  \
-        mir->dalvikInsn.v##REG = new_v_reg;                                   \
-        mir->ssa_rep->uses[use] = new_s_reg;                                  \
-        if ((df_attr & DF_##REG##_WIDE) != 0) {                               \
-          DCHECK_EQ(mir->ssa_rep->uses[use + 1], old_s_reg + 1);              \
-          mir->ssa_rep->uses[use + 1] = new_s_reg + 1;                        \
-        }                                                                     \
-      }                                                                       \
-      use += ((df_attr & DF_##REG##_WIDE) != 0) ? 2 : 1;                      \
-    }
-    REPLACE_VREG(A)
-    REPLACE_VREG(B)
-    REPLACE_VREG(C)
-#undef REPLACE_VREG
-    // We may encounter an out-of-order Phi which we need to ignore, otherwise we should
-    // only be asked to rename registers specified by DF_UA, DF_UB and DF_UC.
-    DCHECK_EQ(use,
-              static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi
-              ? 0u
-              : static_cast<size_t>(mir->ssa_rep->num_uses));
-  }
-}
-
-GvnDeadCodeElimination::GvnDeadCodeElimination(const GlobalValueNumbering* gvn,
-                                         ScopedArenaAllocator* alloc)
-    : gvn_(gvn),
-      mir_graph_(gvn_->GetMirGraph()),
-      vreg_chains_(mir_graph_->GetNumOfCodeAndTempVRs(), alloc),
-      bb_(nullptr),
-      lvn_(nullptr),
-      no_uses_all_since_(0u),
-      unused_vregs_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)),
-      vregs_to_kill_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)),
-      kill_heads_(alloc->AllocArray<uint16_t>(vreg_chains_.NumVRegs(), kArenaAllocMisc)),
-      changes_to_kill_(alloc->Adapter()),
-      dependent_vregs_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)) {
-  changes_to_kill_.reserve(16u);
-}
-
-void GvnDeadCodeElimination::Apply(BasicBlock* bb) {
-  bb_ = bb;
-  lvn_ = gvn_->GetLvn(bb->id);
-
-  RecordPass();
-  BackwardPass();
-
-  DCHECK_EQ(no_uses_all_since_, 0u);
-  lvn_ = nullptr;
-  bb_ = nullptr;
-}
-
-void GvnDeadCodeElimination::RecordPass() {
-  // Record MIRs with vreg definition data, eliminate single instructions.
-  vreg_chains_.Reset();
-  DCHECK_EQ(no_uses_all_since_, 0u);
-  for (MIR* mir = bb_->first_mir_insn; mir != nullptr; mir = mir->next) {
-    if (RecordMIR(mir)) {
-      RecordPassTryToKillOverwrittenMoveOrMoveSrc();
-      RecordPassTryToKillLastMIR();
-    }
-  }
-}
-
-void GvnDeadCodeElimination::BackwardPass() {
-  // Now process MIRs in reverse order, trying to eliminate them.
-  unused_vregs_->ClearAllBits();  // Implicitly depend on all vregs at the end of BB.
-  while (vreg_chains_.NumMIRs() != 0u) {
-    if (BackwardPassTryToKillLastMIR()) {
-      continue;
-    }
-    BackwardPassProcessLastMIR();
-  }
-}
-
-void GvnDeadCodeElimination::KillMIR(MIRData* data) {
-  DCHECK(!data->must_keep);
-  DCHECK(!data->uses_all_vregs);
-  DCHECK(data->has_def);
-  DCHECK(data->mir->ssa_rep->num_defs == 1 || data->mir->ssa_rep->num_defs == 2);
-
-  KillMIR(data->mir);
-  data->has_def = false;
-  data->is_move = false;
-  data->is_move_src = false;
-}
-
-void GvnDeadCodeElimination::KillMIR(MIR* mir) {
-  mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-  mir->ssa_rep->num_uses = 0;
-  mir->ssa_rep->num_defs = 0;
-}
-
-void GvnDeadCodeElimination::ChangeBinOp2AddrToPlainBinOp(MIR* mir) {
-  mir->dalvikInsn.vC = mir->dalvikInsn.vB;
-  mir->dalvikInsn.vB = mir->dalvikInsn.vA;
-  mir->dalvikInsn.opcode = static_cast<Instruction::Code>(
-      mir->dalvikInsn.opcode - Instruction::ADD_INT_2ADDR +  Instruction::ADD_INT);
-}
-
-MIR* GvnDeadCodeElimination::CreatePhi(int s_reg) {
-  int v_reg = mir_graph_->SRegToVReg(s_reg);
-  MIR* phi = mir_graph_->NewMIR();
-  phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
-  phi->dalvikInsn.vA = v_reg;
-  phi->offset = bb_->start_offset;
-  phi->m_unit_index = 0;  // Arbitrarily assign all Phi nodes to outermost method.
-
-  phi->ssa_rep = static_cast<struct SSARepresentation *>(mir_graph_->GetArena()->Alloc(
-      sizeof(SSARepresentation), kArenaAllocDFInfo));
-
-  mir_graph_->AllocateSSADefData(phi, 1);
-  phi->ssa_rep->defs[0] = s_reg;
-
-  size_t num_uses = bb_->predecessors.size();
-  mir_graph_->AllocateSSAUseData(phi, num_uses);
-  size_t idx = 0u;
-  for (BasicBlockId pred_id : bb_->predecessors) {
-    BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
-    DCHECK(pred_bb != nullptr);
-    phi->ssa_rep->uses[idx] = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
-    DCHECK_NE(phi->ssa_rep->uses[idx], INVALID_SREG);
-    idx++;
-  }
-
-  phi->meta.phi_incoming = static_cast<BasicBlockId*>(mir_graph_->GetArena()->Alloc(
-      sizeof(BasicBlockId) * num_uses, kArenaAllocDFInfo));
-  std::copy(bb_->predecessors.begin(), bb_->predecessors.end(), phi->meta.phi_incoming);
-  bb_->PrependMIR(phi);
-  return phi;
-}
-
-MIR* GvnDeadCodeElimination::RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change,
-                                                      MIR* mir_to_kill) {
-  DCHECK(mir_to_kill->ssa_rep->num_defs == 1 || mir_to_kill->ssa_rep->num_defs == 2);
-  bool wide = (mir_to_kill->ssa_rep->num_defs != 1);
-  int new_s_reg = mir_to_kill->ssa_rep->defs[0];
-
-  // Just before we kill mir_to_kill, we need to replace the previous SSA reg assigned to the
-  // same dalvik reg to keep consistency with subsequent instructions. However, if there's no
-  // defining MIR for that dalvik reg, the preserved values must come from its predecessors
-  // and we need to create a new Phi (a degenerate Phi if there's only a single predecessor).
-  if (def_change == kNPos) {
-    if (wide) {
-      DCHECK_EQ(new_s_reg + 1, mir_to_kill->ssa_rep->defs[1]);
-      DCHECK_EQ(mir_graph_->SRegToVReg(new_s_reg) + 1, mir_graph_->SRegToVReg(new_s_reg + 1));
-      CreatePhi(new_s_reg + 1);  // High word Phi.
-    }
-    MIR* phi = CreatePhi(new_s_reg);
-    // If this is a degenerate Phi with all inputs being the same SSA reg, we need to its uses.
-    DCHECK_NE(phi->ssa_rep->num_uses, 0u);
-    int old_s_reg = phi->ssa_rep->uses[0];
-    bool all_same = true;
-    for (size_t i = 1u, num = phi->ssa_rep->num_uses; i != num; ++i) {
-      if (phi->ssa_rep->uses[i] != old_s_reg) {
-        all_same = false;
-        break;
-      }
-    }
-    if (all_same) {
-      vreg_chains_.RenameSRegUses(0u, last_change, old_s_reg, new_s_reg, wide);
-    }
-    return phi;
-  } else {
-    DCHECK_LT(def_change, last_change);
-    DCHECK_LE(last_change, vreg_chains_.NumMIRs());
-    MIRData* def_data = vreg_chains_.GetMIRData(def_change);
-    DCHECK(def_data->has_def);
-    int old_s_reg = def_data->mir->ssa_rep->defs[0];
-    DCHECK_NE(old_s_reg, new_s_reg);
-    DCHECK_EQ(mir_graph_->SRegToVReg(old_s_reg), mir_graph_->SRegToVReg(new_s_reg));
-    def_data->mir->ssa_rep->defs[0] = new_s_reg;
-    if (wide) {
-      if (static_cast<int>(def_data->mir->dalvikInsn.opcode) == kMirOpPhi) {
-        // Currently the high word Phi is always located after the low word Phi.
-        MIR* phi_high = def_data->mir->next;
-        DCHECK(phi_high != nullptr && static_cast<int>(phi_high->dalvikInsn.opcode) == kMirOpPhi);
-        DCHECK_EQ(phi_high->ssa_rep->defs[0], old_s_reg + 1);
-        phi_high->ssa_rep->defs[0] = new_s_reg + 1;
-      } else {
-        DCHECK_EQ(def_data->mir->ssa_rep->defs[1], old_s_reg + 1);
-        def_data->mir->ssa_rep->defs[1] = new_s_reg + 1;
-      }
-    }
-    vreg_chains_.RenameSRegUses(def_change + 1u, last_change, old_s_reg, new_s_reg, wide);
-    return nullptr;
-  }
-}
-
-
-void GvnDeadCodeElimination::BackwardPassProcessLastMIR() {
-  MIRData* data = vreg_chains_.LastMIRData();
-  if (data->uses_all_vregs) {
-    DCHECK(data->must_keep);
-    unused_vregs_->ClearAllBits();
-    DCHECK_EQ(no_uses_all_since_, vreg_chains_.NumMIRs());
-    --no_uses_all_since_;
-    while (no_uses_all_since_ != 0u &&
-        !vreg_chains_.GetMIRData(no_uses_all_since_ - 1u)->uses_all_vregs) {
-      --no_uses_all_since_;
-    }
-  } else {
-    if (data->has_def) {
-      unused_vregs_->SetBit(data->vreg_def);
-      if (data->wide_def) {
-        unused_vregs_->SetBit(data->vreg_def + 1);
-      }
-    }
-    for (int i = 0, num_uses = data->mir->ssa_rep->num_uses; i != num_uses; ++i) {
-      int v_reg = mir_graph_->SRegToVReg(data->mir->ssa_rep->uses[i]);
-      unused_vregs_->ClearBit(v_reg);
-    }
-  }
-  vreg_chains_.RemoveLastMIRData();
-}
-
-void GvnDeadCodeElimination::RecordPassKillMoveByRenamingSrcDef(uint16_t src_change,
-                                                                uint16_t move_change) {
-  DCHECK_LT(src_change, move_change);
-  MIRData* src_data = vreg_chains_.GetMIRData(src_change);
-  MIRData* move_data = vreg_chains_.GetMIRData(move_change);
-  DCHECK(src_data->is_move_src);
-  DCHECK_EQ(src_data->wide_def, move_data->wide_def);
-  DCHECK(move_data->prev_value.change == kNPos || move_data->prev_value.change <= src_change);
-  DCHECK(!move_data->wide_def || move_data->prev_value_high.change == kNPos ||
-         move_data->prev_value_high.change <= src_change);
-
-  int old_s_reg = src_data->mir->ssa_rep->defs[0];
-  // NOTE: old_s_reg may differ from move_data->mir->ssa_rep->uses[0]; value names must match.
-  int new_s_reg = move_data->mir->ssa_rep->defs[0];
-  DCHECK_NE(old_s_reg, new_s_reg);
-
-  if (IsInstructionBinOp2Addr(src_data->mir->dalvikInsn.opcode) &&
-      src_data->vreg_def != move_data->vreg_def) {
-    // Rewrite binop_2ADDR with plain binop before doing the register rename.
-    ChangeBinOp2AddrToPlainBinOp(src_data->mir);
-  }
-  // Remove src_change from the vreg chain(s).
-  vreg_chains_.RemoveChange(src_change);
-  // Replace the move_change with the src_change, copying all necessary data.
-  src_data->is_move_src = move_data->is_move_src;
-  src_data->low_def_over_high_word = move_data->low_def_over_high_word;
-  src_data->high_def_over_low_word = move_data->high_def_over_low_word;
-  src_data->vreg_def = move_data->vreg_def;
-  src_data->prev_value = move_data->prev_value;
-  src_data->prev_value_high = move_data->prev_value_high;
-  src_data->mir->dalvikInsn.vA = move_data->vreg_def;
-  src_data->mir->ssa_rep->defs[0] = new_s_reg;
-  if (move_data->wide_def) {
-    DCHECK_EQ(src_data->mir->ssa_rep->defs[1], old_s_reg + 1);
-    src_data->mir->ssa_rep->defs[1] = new_s_reg + 1;
-  }
-  vreg_chains_.ReplaceChange(move_change, src_change);
-
-  // Rename uses and kill the move.
-  vreg_chains_.RenameVRegUses(src_change + 1u, vreg_chains_.NumMIRs(),
-                              old_s_reg, mir_graph_->SRegToVReg(old_s_reg),
-                              new_s_reg, mir_graph_->SRegToVReg(new_s_reg));
-  KillMIR(move_data);
-}
-
-void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_t check_change) {
-  MIRData* data = vreg_chains_.GetMIRData(check_change);
-  DCHECK(data->is_move || data->is_move_src);
-  int32_t dest_s_reg = data->mir->ssa_rep->defs[0];
-
-  if (data->is_move) {
-    // Check if source vreg has changed since the MOVE.
-    int32_t src_s_reg = data->mir->ssa_rep->uses[0];
-    uint32_t src_v_reg = mir_graph_->SRegToVReg(src_s_reg);
-    uint16_t src_change = vreg_chains_.FindFirstChangeAfter(src_v_reg, check_change);
-    bool wide = data->wide_def;
-    if (wide) {
-      uint16_t src_change_high = vreg_chains_.FindFirstChangeAfter(src_v_reg + 1, check_change);
-      if (src_change_high != kNPos && (src_change == kNPos || src_change_high < src_change)) {
-        src_change = src_change_high;
-      }
-    }
-    if (src_change == kNPos ||
-        !vreg_chains_.IsSRegUsed(src_change + 1u, vreg_chains_.NumMIRs(), dest_s_reg)) {
-      // We can simply change all uses of dest to src.
-      size_t rename_end = (src_change != kNPos) ? src_change + 1u : vreg_chains_.NumMIRs();
-      vreg_chains_.RenameVRegUses(check_change + 1u, rename_end,
-                                  dest_s_reg, mir_graph_->SRegToVReg(dest_s_reg),
-                                  src_s_reg,  mir_graph_->SRegToVReg(src_s_reg));
-
-      // Now, remove the MOVE from the vreg chain(s) and kill it.
-      vreg_chains_.RemoveChange(check_change);
-      KillMIR(data);
-      return;
-    }
-  }
-
-  if (data->is_move_src) {
-    // Try to find a MOVE to a vreg that wasn't changed since check_change.
-    uint16_t value_name =
-        data->wide_def ? lvn_->GetSregValueWide(dest_s_reg) : lvn_->GetSregValue(dest_s_reg);
-    uint32_t dest_v_reg = mir_graph_->SRegToVReg(dest_s_reg);
-    for (size_t c = check_change + 1u, size = vreg_chains_.NumMIRs(); c != size; ++c) {
-      MIRData* d = vreg_chains_.GetMIRData(c);
-      if (d->is_move && d->wide_def == data->wide_def &&
-          (d->prev_value.change == kNPos || d->prev_value.change <= check_change) &&
-          (!d->wide_def ||
-           d->prev_value_high.change == kNPos || d->prev_value_high.change <= check_change)) {
-        // Compare value names to find move to move.
-        int32_t src_s_reg = d->mir->ssa_rep->uses[0];
-        uint16_t src_name =
-            (d->wide_def ? lvn_->GetSregValueWide(src_s_reg) : lvn_->GetSregValue(src_s_reg));
-        if (value_name == src_name) {
-          // Check if the move's destination vreg is unused between check_change and the move.
-          uint32_t new_dest_v_reg = mir_graph_->SRegToVReg(d->mir->ssa_rep->defs[0]);
-          if (!vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg, mir_graph_) &&
-              (!d->wide_def ||
-               !vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg + 1, mir_graph_))) {
-            // If the move's destination vreg changed, check if the vreg we're trying
-            // to rename is unused after that change.
-            uint16_t dest_change = vreg_chains_.FindFirstChangeAfter(new_dest_v_reg, c);
-            if (d->wide_def) {
-              uint16_t dest_change_high = vreg_chains_.FindFirstChangeAfter(new_dest_v_reg + 1, c);
-              if (dest_change_high != kNPos &&
-                  (dest_change == kNPos || dest_change_high < dest_change)) {
-                dest_change = dest_change_high;
-              }
-            }
-            if (dest_change == kNPos ||
-                !vreg_chains_.IsVRegUsed(dest_change + 1u, size, dest_v_reg, mir_graph_)) {
-              RecordPassKillMoveByRenamingSrcDef(check_change, c);
-              return;
-            }
-          }
-        }
-      }
-    }
-  }
-}
-
-void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc() {
-  // Check if we're overwriting a the result of a move or the definition of a source of a move.
-  // For MOVE_WIDE, we may be overwriting partially; if that's the case, check that the other
-  // word wasn't previously overwritten - we would have tried to rename back then.
-  MIRData* data = vreg_chains_.LastMIRData();
-  if (!data->has_def) {
-    return;
-  }
-  // NOTE: Instructions such as new-array implicitly use all vregs (if they throw) but they can
-  // define a move source which can be renamed. Therefore we allow the checked change to be the
-  // change before no_uses_all_since_. This has no effect on moves as they never use all vregs.
-  if (data->prev_value.change != kNPos && data->prev_value.change + 1u >= no_uses_all_since_) {
-    MIRData* check_data = vreg_chains_.GetMIRData(data->prev_value.change);
-    bool try_to_kill = false;
-    if (!check_data->is_move && !check_data->is_move_src) {
-      DCHECK(!try_to_kill);
-    } else if (!check_data->wide_def) {
-      // Narrow move; always fully overwritten by the last MIR.
-      try_to_kill = true;
-    } else if (data->low_def_over_high_word) {
-      // Overwriting only the high word; is the low word still valid?
-      DCHECK_EQ(check_data->vreg_def + 1u, data->vreg_def);
-      if (vreg_chains_.LastChange(check_data->vreg_def) == data->prev_value.change) {
-        try_to_kill = true;
-      }
-    } else if (!data->wide_def) {
-      // Overwriting only the low word, is the high word still valid?
-      if (vreg_chains_.LastChange(data->vreg_def + 1) == data->prev_value.change) {
-        try_to_kill = true;
-      }
-    } else {
-      // Overwriting both words; was the high word still from the same move?
-      if (data->prev_value_high.change == data->prev_value.change) {
-        try_to_kill = true;
-      }
-    }
-    if (try_to_kill) {
-      RecordPassTryToKillOverwrittenMoveOrMoveSrc(data->prev_value.change);
-    }
-  }
-  if (data->wide_def && data->high_def_over_low_word &&
-      data->prev_value_high.change != kNPos &&
-      data->prev_value_high.change + 1u >= no_uses_all_since_) {
-    MIRData* check_data = vreg_chains_.GetMIRData(data->prev_value_high.change);
-    bool try_to_kill = false;
-    if (!check_data->is_move && !check_data->is_move_src) {
-      DCHECK(!try_to_kill);
-    } else if (!check_data->wide_def) {
-      // Narrow move; always fully overwritten by the last MIR.
-      try_to_kill = true;
-    } else if (vreg_chains_.LastChange(check_data->vreg_def + 1) ==
-        data->prev_value_high.change) {
-      // High word is still valid.
-      try_to_kill = true;
-    }
-    if (try_to_kill) {
-      RecordPassTryToKillOverwrittenMoveOrMoveSrc(data->prev_value_high.change);
-    }
-  }
-}
-
-void GvnDeadCodeElimination::RecordPassTryToKillLastMIR() {
-  MIRData* last_data = vreg_chains_.LastMIRData();
-  if (last_data->must_keep) {
-    return;
-  }
-  if (UNLIKELY(!last_data->has_def)) {
-    // Must be an eliminated MOVE. Drop its data and data of all eliminated MIRs before it.
-    vreg_chains_.RemoveTrailingNops();
-    return;
-  }
-
-  // Try to kill a sequence of consecutive definitions of the same vreg. Allow mixing
-  // wide and non-wide defs; consider high word dead if low word has been overwritten.
-  uint16_t current_value = vreg_chains_.CurrentValue(last_data->vreg_def);
-  uint16_t change = vreg_chains_.NumMIRs() - 1u;
-  MIRData* data = last_data;
-  while (data->prev_value.value != current_value) {
-    --change;
-    if (data->prev_value.change == kNPos || data->prev_value.change != change) {
-      return;
-    }
-    data = vreg_chains_.GetMIRData(data->prev_value.change);
-    if (data->must_keep || !data->has_def || data->vreg_def != last_data->vreg_def) {
-      return;
-    }
-  }
-
-  bool wide = last_data->wide_def;
-  if (wide) {
-    // Check that the low word is valid.
-    if (data->low_def_over_high_word) {
-      return;
-    }
-    // Check that the high word is valid.
-    MIRData* high_data = data;
-    if (!high_data->wide_def) {
-      uint16_t high_change = vreg_chains_.FindFirstChangeAfter(data->vreg_def + 1, change);
-      DCHECK_NE(high_change, kNPos);
-      high_data = vreg_chains_.GetMIRData(high_change);
-      DCHECK_EQ(high_data->vreg_def, data->vreg_def);
-    }
-    if (high_data->prev_value_high.value != current_value || high_data->high_def_over_low_word) {
-      return;
-    }
-  }
-
-  MIR* phi = RenameSRegDefOrCreatePhi(data->prev_value.change, change, last_data->mir);
-  for (size_t i = 0, count = vreg_chains_.NumMIRs() - change; i != count; ++i) {
-    KillMIR(vreg_chains_.LastMIRData()->mir);
-    vreg_chains_.RemoveLastMIRData();
-  }
-  if (phi != nullptr) {
-    // Though the Phi has been added to the beginning, we can put the MIRData at the end.
-    vreg_chains_.AddMIRWithDef(phi, phi->dalvikInsn.vA, wide, current_value);
-    // Reset the previous value to avoid eventually eliminating the Phi itself (unless unused).
-    last_data = vreg_chains_.LastMIRData();
-    last_data->prev_value.value = kNoValue;
-    last_data->prev_value_high.value = kNoValue;
-  }
-}
-
-uint16_t GvnDeadCodeElimination::FindChangesToKill(uint16_t first_change, uint16_t last_change) {
-  // Process dependencies for changes in range [first_change, last_change) and record all
-  // changes that we need to kill. Return kNPos if there's a dependent change that must be
-  // kept unconditionally; otherwise the end of the range processed before encountering
-  // a change that defines a dalvik reg that we need to keep (last_change on full success).
-  changes_to_kill_.clear();
-  dependent_vregs_->ClearAllBits();
-  for (size_t change = first_change; change != last_change; ++change) {
-    MIRData* data = vreg_chains_.GetMIRData(change);
-    DCHECK(!data->uses_all_vregs);
-    bool must_not_depend = data->must_keep;
-    bool depends = false;
-    // Check if the MIR defines a vreg we're trying to eliminate.
-    if (data->has_def && vregs_to_kill_->IsBitSet(data->vreg_def)) {
-      if (change < kill_heads_[data->vreg_def]) {
-        must_not_depend = true;
-      } else {
-        depends = true;
-      }
-    }
-    if (data->has_def && data->wide_def && vregs_to_kill_->IsBitSet(data->vreg_def + 1)) {
-      if (change < kill_heads_[data->vreg_def + 1]) {
-        must_not_depend = true;
-      } else {
-        depends = true;
-      }
-    }
-    if (!depends) {
-      // Check for dependency through SSA reg uses.
-      SSARepresentation* ssa_rep = data->mir->ssa_rep;
-      for (int i = 0; i != ssa_rep->num_uses; ++i) {
-        if (dependent_vregs_->IsBitSet(mir_graph_->SRegToVReg(ssa_rep->uses[i]))) {
-          depends = true;
-          break;
-        }
-      }
-    }
-    // Now check if we can eliminate the insn if we need to.
-    if (depends && must_not_depend) {
-      return kNPos;
-    }
-    if (depends && data->has_def &&
-        vreg_chains_.IsTopChange(change) && !vregs_to_kill_->IsBitSet(data->vreg_def) &&
-        !unused_vregs_->IsBitSet(data->vreg_def) &&
-        (!data->wide_def || !unused_vregs_->IsBitSet(data->vreg_def + 1))) {
-      // This is a top change but neither unnecessary nor one of the top kill changes.
-      return change;
-    }
-    // Finally, update the data.
-    if (depends) {
-      changes_to_kill_.push_back(change);
-      if (data->has_def) {
-        dependent_vregs_->SetBit(data->vreg_def);
-        if (data->wide_def) {
-          dependent_vregs_->SetBit(data->vreg_def + 1);
-        }
-      }
-    } else {
-      if (data->has_def) {
-        dependent_vregs_->ClearBit(data->vreg_def);
-        if (data->wide_def) {
-          dependent_vregs_->ClearBit(data->vreg_def + 1);
-        }
-      }
-    }
-  }
-  return last_change;
-}
-
-void GvnDeadCodeElimination::BackwardPassTryToKillRevertVRegs() {
-}
-
-bool GvnDeadCodeElimination::BackwardPassTryToKillLastMIR() {
-  MIRData* last_data = vreg_chains_.LastMIRData();
-  if (last_data->must_keep) {
-    return false;
-  }
-  DCHECK(!last_data->uses_all_vregs);
-  if (!last_data->has_def) {
-    // Previously eliminated.
-    DCHECK_EQ(static_cast<int>(last_data->mir->dalvikInsn.opcode), static_cast<int>(kMirOpNop));
-    vreg_chains_.RemoveTrailingNops();
-    return true;
-  }
-  if (unused_vregs_->IsBitSet(last_data->vreg_def) ||
-      (last_data->wide_def && unused_vregs_->IsBitSet(last_data->vreg_def + 1))) {
-    if (last_data->wide_def) {
-      // For wide defs, one of the vregs may still be considered needed, fix that.
-      unused_vregs_->SetBit(last_data->vreg_def);
-      unused_vregs_->SetBit(last_data->vreg_def + 1);
-    }
-    KillMIR(last_data->mir);
-    vreg_chains_.RemoveLastMIRData();
-    return true;
-  }
-
-  vregs_to_kill_->ClearAllBits();
-  size_t num_mirs = vreg_chains_.NumMIRs();
-  DCHECK_NE(num_mirs, 0u);
-  uint16_t kill_change = num_mirs - 1u;
-  uint16_t start = num_mirs;
-  size_t num_killed_top_changes = 0u;
-  while (num_killed_top_changes != kMaxNumTopChangesToKill &&
-      kill_change != kNPos && kill_change != num_mirs) {
-    ++num_killed_top_changes;
-
-    DCHECK(vreg_chains_.IsTopChange(kill_change));
-    MIRData* data = vreg_chains_.GetMIRData(kill_change);
-    int count = data->wide_def ? 2 : 1;
-    for (int v_reg = data->vreg_def, end = data->vreg_def + count; v_reg != end; ++v_reg) {
-      uint16_t kill_head = vreg_chains_.FindKillHead(v_reg, no_uses_all_since_);
-      if (kill_head == kNPos) {
-        return false;
-      }
-      kill_heads_[v_reg] = kill_head;
-      vregs_to_kill_->SetBit(v_reg);
-      start = std::min(start, kill_head);
-    }
-    DCHECK_LT(start, vreg_chains_.NumMIRs());
-
-    kill_change = FindChangesToKill(start, num_mirs);
-  }
-
-  if (kill_change != num_mirs) {
-    return false;
-  }
-
-  // Kill all MIRs marked as dependent.
-  for (uint32_t v_reg : vregs_to_kill_->Indexes()) {
-    // Rename s_regs or create Phi only once for each MIR (only for low word).
-    MIRData* data = vreg_chains_.GetMIRData(vreg_chains_.LastChange(v_reg));
-    DCHECK(data->has_def);
-    if (data->vreg_def == v_reg) {
-      MIRData* kill_head_data = vreg_chains_.GetMIRData(kill_heads_[v_reg]);
-      RenameSRegDefOrCreatePhi(kill_head_data->PrevChange(v_reg), num_mirs, data->mir);
-    } else {
-      DCHECK_EQ(data->vreg_def + 1u, v_reg);
-      DCHECK_EQ(vreg_chains_.GetMIRData(kill_heads_[v_reg - 1u])->PrevChange(v_reg - 1u),
-                vreg_chains_.GetMIRData(kill_heads_[v_reg])->PrevChange(v_reg));
-    }
-  }
-  for (auto it = changes_to_kill_.rbegin(), end = changes_to_kill_.rend(); it != end; ++it) {
-    MIRData* data = vreg_chains_.GetMIRData(*it);
-    DCHECK(!data->must_keep);
-    DCHECK(data->has_def);
-    vreg_chains_.RemoveChange(*it);
-    KillMIR(data);
-  }
-
-  // Each dependent register not in vregs_to_kill_ is either already marked unused or
-  // it's one word of a wide register where the other word has been overwritten.
-  unused_vregs_->UnionIfNotIn(dependent_vregs_, vregs_to_kill_);
-
-  vreg_chains_.RemoveTrailingNops();
-  return true;
-}
-
-bool GvnDeadCodeElimination::RecordMIR(MIR* mir) {
-  bool must_keep = false;
-  bool uses_all_vregs = false;
-  bool is_move = false;
-  uint16_t opcode = mir->dalvikInsn.opcode;
-  switch (opcode) {
-    case kMirOpPhi: {
-      // Determine if this Phi is merging wide regs.
-      RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
-      if (raw_dest.high_word) {
-        // This is the high part of a wide reg. Ignore the Phi.
-        return false;
-      }
-      bool wide = raw_dest.wide;
-      // Record the value.
-      DCHECK_EQ(mir->ssa_rep->num_defs, 1);
-      int s_reg = mir->ssa_rep->defs[0];
-      uint16_t new_value = wide ? lvn_->GetSregValueWide(s_reg) : lvn_->GetSregValue(s_reg);
-
-      int v_reg = mir_graph_->SRegToVReg(s_reg);
-      DCHECK_EQ(vreg_chains_.CurrentValue(v_reg), kNoValue);  // No previous def for v_reg.
-      if (wide) {
-        DCHECK_EQ(vreg_chains_.CurrentValue(v_reg + 1), kNoValue);
-      }
-      vreg_chains_.AddMIRWithDef(mir, v_reg, wide, new_value);
-      return true;  // Avoid the common processing.
-    }
-
-    case kMirOpNop:
-    case Instruction::NOP:
-      // Don't record NOPs.
-      return false;
-
-    case kMirOpCheck:
-      must_keep = true;
-      uses_all_vregs = true;
-      break;
-
-    case Instruction::RETURN_VOID:
-    case Instruction::RETURN:
-    case Instruction::RETURN_OBJECT:
-    case Instruction::RETURN_WIDE:
-    case Instruction::GOTO:
-    case Instruction::GOTO_16:
-    case Instruction::GOTO_32:
-    case Instruction::PACKED_SWITCH:
-    case Instruction::SPARSE_SWITCH:
-    case Instruction::IF_EQ:
-    case Instruction::IF_NE:
-    case Instruction::IF_LT:
-    case Instruction::IF_GE:
-    case Instruction::IF_GT:
-    case Instruction::IF_LE:
-    case Instruction::IF_EQZ:
-    case Instruction::IF_NEZ:
-    case Instruction::IF_LTZ:
-    case Instruction::IF_GEZ:
-    case Instruction::IF_GTZ:
-    case Instruction::IF_LEZ:
-    case kMirOpFusedCmplFloat:
-    case kMirOpFusedCmpgFloat:
-    case kMirOpFusedCmplDouble:
-    case kMirOpFusedCmpgDouble:
-    case kMirOpFusedCmpLong:
-      must_keep = true;
-      uses_all_vregs = true;  // Keep the implicit dependencies on all vregs.
-      break;
-
-    case Instruction::CONST_CLASS:
-    case Instruction::CONST_STRING:
-    case Instruction::CONST_STRING_JUMBO:
-      // NOTE: While we're currently treating CONST_CLASS, CONST_STRING and CONST_STRING_JUMBO
-      // as throwing but we could conceivably try and eliminate those exceptions if we're
-      // retrieving the class/string repeatedly.
-      must_keep = true;
-      uses_all_vregs = true;
-      break;
-
-    case Instruction::MONITOR_ENTER:
-    case Instruction::MONITOR_EXIT:
-      // We can actually try and optimize across the acquire operation of MONITOR_ENTER,
-      // the value names provided by GVN reflect the possible changes to memory visibility.
-      // NOTE: In ART, MONITOR_ENTER and MONITOR_EXIT can throw only NPE.
-      must_keep = true;
-      uses_all_vregs = (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0;
-      break;
-
-    case Instruction::INVOKE_DIRECT:
-    case Instruction::INVOKE_DIRECT_RANGE:
-    case Instruction::INVOKE_VIRTUAL:
-    case Instruction::INVOKE_VIRTUAL_RANGE:
-    case Instruction::INVOKE_SUPER:
-    case Instruction::INVOKE_SUPER_RANGE:
-    case Instruction::INVOKE_INTERFACE:
-    case Instruction::INVOKE_INTERFACE_RANGE:
-    case Instruction::INVOKE_STATIC:
-    case Instruction::INVOKE_STATIC_RANGE:
-    case Instruction::THROW:
-    case Instruction::FILLED_NEW_ARRAY:
-    case Instruction::FILLED_NEW_ARRAY_RANGE:
-    case Instruction::FILL_ARRAY_DATA:
-      must_keep = true;
-      uses_all_vregs = true;
-      break;
-
-    case Instruction::NEW_INSTANCE:
-    case Instruction::NEW_ARRAY:
-      must_keep = true;
-      uses_all_vregs = true;
-      break;
-
-    case Instruction::CHECK_CAST:
-      DCHECK_EQ(mir->ssa_rep->num_uses, 1);
-      must_keep = true;  // Keep for type information even if MIR_IGNORE_CHECK_CAST.
-      uses_all_vregs = (mir->optimization_flags & MIR_IGNORE_CHECK_CAST) == 0;
-      break;
-
-    case kMirOpNullCheck:
-      DCHECK_EQ(mir->ssa_rep->num_uses, 1);
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
-        mir->ssa_rep->num_uses = 0;
-        mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-        return false;
-      }
-      must_keep = true;
-      uses_all_vregs = true;
-      break;
-
-    case Instruction::MOVE_RESULT:
-    case Instruction::MOVE_RESULT_OBJECT:
-    case Instruction::MOVE_RESULT_WIDE:
-      break;
-
-    case Instruction::INSTANCE_OF:
-      break;
-
-    case Instruction::MOVE_EXCEPTION:
-      must_keep = true;
-      break;
-
-    case kMirOpCopy:
-    case Instruction::MOVE:
-    case Instruction::MOVE_FROM16:
-    case Instruction::MOVE_16:
-    case Instruction::MOVE_WIDE:
-    case Instruction::MOVE_WIDE_FROM16:
-    case Instruction::MOVE_WIDE_16:
-    case Instruction::MOVE_OBJECT:
-    case Instruction::MOVE_OBJECT_FROM16:
-    case Instruction::MOVE_OBJECT_16: {
-      is_move = true;
-      // If the MIR defining src vreg is known, allow renaming all uses of src vreg to dest vreg
-      // while updating the defining MIR to directly define dest vreg. However, changing Phi's
-      // def this way doesn't work without changing MIRs in other BBs.
-      int src_v_reg = mir_graph_->SRegToVReg(mir->ssa_rep->uses[0]);
-      int src_change = vreg_chains_.LastChange(src_v_reg);
-      if (src_change != kNPos) {
-        MIRData* src_data = vreg_chains_.GetMIRData(src_change);
-        if (static_cast<int>(src_data->mir->dalvikInsn.opcode) != kMirOpPhi) {
-          src_data->is_move_src = true;
-        }
-      }
-      break;
-    }
-
-    case Instruction::CONST_4:
-    case Instruction::CONST_16:
-    case Instruction::CONST:
-    case Instruction::CONST_HIGH16:
-    case Instruction::CONST_WIDE_16:
-    case Instruction::CONST_WIDE_32:
-    case Instruction::CONST_WIDE:
-    case Instruction::CONST_WIDE_HIGH16:
-    case Instruction::CMPL_FLOAT:
-    case Instruction::CMPG_FLOAT:
-    case Instruction::CMPL_DOUBLE:
-    case Instruction::CMPG_DOUBLE:
-    case Instruction::CMP_LONG:
-    case Instruction::NEG_INT:
-    case Instruction::NOT_INT:
-    case Instruction::NEG_LONG:
-    case Instruction::NOT_LONG:
-    case Instruction::NEG_FLOAT:
-    case Instruction::NEG_DOUBLE:
-    case Instruction::INT_TO_LONG:
-    case Instruction::INT_TO_FLOAT:
-    case Instruction::INT_TO_DOUBLE:
-    case Instruction::LONG_TO_INT:
-    case Instruction::LONG_TO_FLOAT:
-    case Instruction::LONG_TO_DOUBLE:
-    case Instruction::FLOAT_TO_INT:
-    case Instruction::FLOAT_TO_LONG:
-    case Instruction::FLOAT_TO_DOUBLE:
-    case Instruction::DOUBLE_TO_INT:
-    case Instruction::DOUBLE_TO_LONG:
-    case Instruction::DOUBLE_TO_FLOAT:
-    case Instruction::INT_TO_BYTE:
-    case Instruction::INT_TO_CHAR:
-    case Instruction::INT_TO_SHORT:
-    case Instruction::ADD_INT:
-    case Instruction::SUB_INT:
-    case Instruction::MUL_INT:
-    case Instruction::AND_INT:
-    case Instruction::OR_INT:
-    case Instruction::XOR_INT:
-    case Instruction::SHL_INT:
-    case Instruction::SHR_INT:
-    case Instruction::USHR_INT:
-    case Instruction::ADD_LONG:
-    case Instruction::SUB_LONG:
-    case Instruction::MUL_LONG:
-    case Instruction::AND_LONG:
-    case Instruction::OR_LONG:
-    case Instruction::XOR_LONG:
-    case Instruction::SHL_LONG:
-    case Instruction::SHR_LONG:
-    case Instruction::USHR_LONG:
-    case Instruction::ADD_FLOAT:
-    case Instruction::SUB_FLOAT:
-    case Instruction::MUL_FLOAT:
-    case Instruction::DIV_FLOAT:
-    case Instruction::REM_FLOAT:
-    case Instruction::ADD_DOUBLE:
-    case Instruction::SUB_DOUBLE:
-    case Instruction::MUL_DOUBLE:
-    case Instruction::DIV_DOUBLE:
-    case Instruction::REM_DOUBLE:
-    case Instruction::ADD_INT_2ADDR:
-    case Instruction::SUB_INT_2ADDR:
-    case Instruction::MUL_INT_2ADDR:
-    case Instruction::AND_INT_2ADDR:
-    case Instruction::OR_INT_2ADDR:
-    case Instruction::XOR_INT_2ADDR:
-    case Instruction::SHL_INT_2ADDR:
-    case Instruction::SHR_INT_2ADDR:
-    case Instruction::USHR_INT_2ADDR:
-    case Instruction::ADD_LONG_2ADDR:
-    case Instruction::SUB_LONG_2ADDR:
-    case Instruction::MUL_LONG_2ADDR:
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::OR_LONG_2ADDR:
-    case Instruction::XOR_LONG_2ADDR:
-    case Instruction::SHL_LONG_2ADDR:
-    case Instruction::SHR_LONG_2ADDR:
-    case Instruction::USHR_LONG_2ADDR:
-    case Instruction::ADD_FLOAT_2ADDR:
-    case Instruction::SUB_FLOAT_2ADDR:
-    case Instruction::MUL_FLOAT_2ADDR:
-    case Instruction::DIV_FLOAT_2ADDR:
-    case Instruction::REM_FLOAT_2ADDR:
-    case Instruction::ADD_DOUBLE_2ADDR:
-    case Instruction::SUB_DOUBLE_2ADDR:
-    case Instruction::MUL_DOUBLE_2ADDR:
-    case Instruction::DIV_DOUBLE_2ADDR:
-    case Instruction::REM_DOUBLE_2ADDR:
-    case Instruction::ADD_INT_LIT16:
-    case Instruction::RSUB_INT:
-    case Instruction::MUL_INT_LIT16:
-    case Instruction::AND_INT_LIT16:
-    case Instruction::OR_INT_LIT16:
-    case Instruction::XOR_INT_LIT16:
-    case Instruction::ADD_INT_LIT8:
-    case Instruction::RSUB_INT_LIT8:
-    case Instruction::MUL_INT_LIT8:
-    case Instruction::AND_INT_LIT8:
-    case Instruction::OR_INT_LIT8:
-    case Instruction::XOR_INT_LIT8:
-    case Instruction::SHL_INT_LIT8:
-    case Instruction::SHR_INT_LIT8:
-    case Instruction::USHR_INT_LIT8:
-      break;
-
-    case Instruction::DIV_INT:
-    case Instruction::REM_INT:
-    case Instruction::DIV_LONG:
-    case Instruction::REM_LONG:
-    case Instruction::DIV_INT_2ADDR:
-    case Instruction::REM_INT_2ADDR:
-    case Instruction::DIV_LONG_2ADDR:
-    case Instruction::REM_LONG_2ADDR:
-      if ((mir->optimization_flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
-        must_keep = true;
-        uses_all_vregs = true;
-      }
-      break;
-
-    case Instruction::DIV_INT_LIT16:
-    case Instruction::REM_INT_LIT16:
-    case Instruction::DIV_INT_LIT8:
-    case Instruction::REM_INT_LIT8:
-      if (mir->dalvikInsn.vC == 0) {  // Explicit division by 0?
-        must_keep = true;
-        uses_all_vregs = true;
-      }
-      break;
-
-    case Instruction::ARRAY_LENGTH:
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0) {
-        must_keep = true;
-        uses_all_vregs = true;
-      }
-      break;
-
-    case Instruction::AGET_OBJECT:
-    case Instruction::AGET:
-    case Instruction::AGET_WIDE:
-    case Instruction::AGET_BOOLEAN:
-    case Instruction::AGET_BYTE:
-    case Instruction::AGET_CHAR:
-    case Instruction::AGET_SHORT:
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
-          (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) == 0) {
-        must_keep = true;
-        uses_all_vregs = true;
-      }
-      break;
-
-    case Instruction::APUT_OBJECT:
-    case Instruction::APUT:
-    case Instruction::APUT_WIDE:
-    case Instruction::APUT_BYTE:
-    case Instruction::APUT_BOOLEAN:
-    case Instruction::APUT_SHORT:
-    case Instruction::APUT_CHAR:
-      must_keep = true;
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
-          (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) == 0) {
-        uses_all_vregs = true;
-      }
-      break;
-
-    case Instruction::IGET_OBJECT:
-    case Instruction::IGET:
-    case Instruction::IGET_WIDE:
-    case Instruction::IGET_BOOLEAN:
-    case Instruction::IGET_BYTE:
-    case Instruction::IGET_CHAR:
-    case Instruction::IGET_SHORT: {
-      const MirIFieldLoweringInfo& info = mir_graph_->GetIFieldLoweringInfo(mir);
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
-          !info.IsResolved() || !info.FastGet()) {
-        must_keep = true;
-        uses_all_vregs = true;
-      } else if (info.IsVolatile()) {
-        must_keep = true;
-      }
-      break;
-    }
-
-    case Instruction::IPUT_OBJECT:
-    case Instruction::IPUT:
-    case Instruction::IPUT_WIDE:
-    case Instruction::IPUT_BOOLEAN:
-    case Instruction::IPUT_BYTE:
-    case Instruction::IPUT_CHAR:
-    case Instruction::IPUT_SHORT: {
-      must_keep = true;
-      const MirIFieldLoweringInfo& info = mir_graph_->GetIFieldLoweringInfo(mir);
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
-          !info.IsResolved() || !info.FastPut()) {
-        uses_all_vregs = true;
-      }
-      break;
-    }
-
-    case Instruction::SGET_OBJECT:
-    case Instruction::SGET:
-    case Instruction::SGET_WIDE:
-    case Instruction::SGET_BOOLEAN:
-    case Instruction::SGET_BYTE:
-    case Instruction::SGET_CHAR:
-    case Instruction::SGET_SHORT: {
-      const MirSFieldLoweringInfo& info = mir_graph_->GetSFieldLoweringInfo(mir);
-      if ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0 ||
-          !info.IsResolved() || !info.FastGet()) {
-        must_keep = true;
-        uses_all_vregs = true;
-      } else if (info.IsVolatile()) {
-        must_keep = true;
-      }
-      break;
-    }
-
-    case Instruction::SPUT_OBJECT:
-    case Instruction::SPUT:
-    case Instruction::SPUT_WIDE:
-    case Instruction::SPUT_BOOLEAN:
-    case Instruction::SPUT_BYTE:
-    case Instruction::SPUT_CHAR:
-    case Instruction::SPUT_SHORT: {
-      must_keep = true;
-      const MirSFieldLoweringInfo& info = mir_graph_->GetSFieldLoweringInfo(mir);
-      if ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0 ||
-          !info.IsResolved() || !info.FastPut()) {
-        uses_all_vregs = true;
-      }
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-      UNREACHABLE();
-  }
-
-  if (mir->ssa_rep->num_defs != 0) {
-    DCHECK(mir->ssa_rep->num_defs == 1 || mir->ssa_rep->num_defs == 2);
-    bool wide = (mir->ssa_rep->num_defs == 2);
-    int s_reg = mir->ssa_rep->defs[0];
-    int v_reg = mir_graph_->SRegToVReg(s_reg);
-    uint16_t new_value = wide ? lvn_->GetSregValueWide(s_reg) : lvn_->GetSregValue(s_reg);
-    DCHECK_NE(new_value, kNoValue);
-
-    vreg_chains_.UpdateInitialVRegValue(v_reg, wide, lvn_);
-    vreg_chains_.AddMIRWithDef(mir, v_reg, wide, new_value);
-    if (is_move) {
-      // Allow renaming all uses of dest vreg to src vreg.
-      vreg_chains_.LastMIRData()->is_move = true;
-    }
-  } else {
-    vreg_chains_.AddMIRWithoutDef(mir);
-    DCHECK(!is_move) << opcode;
-  }
-
-  if (must_keep) {
-    MIRData* last_data = vreg_chains_.LastMIRData();
-    last_data->must_keep = true;
-    if (uses_all_vregs) {
-      last_data->uses_all_vregs = true;
-      no_uses_all_since_ = vreg_chains_.NumMIRs();
-    }
-  } else {
-    DCHECK_NE(mir->ssa_rep->num_defs, 0) << opcode;
-    DCHECK(!uses_all_vregs) << opcode;
-  }
-  return true;
-}
-
-}  // namespace art
diff --git a/compiler/dex/gvn_dead_code_elimination.h b/compiler/dex/gvn_dead_code_elimination.h
deleted file mode 100644
index 06022db..0000000
--- a/compiler/dex/gvn_dead_code_elimination.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
-#define ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
-
-#include "base/arena_object.h"
-#include "base/scoped_arena_containers.h"
-#include "global_value_numbering.h"
-
-namespace art {
-
-class ArenaBitVector;
-class BasicBlock;
-class LocalValueNumbering;
-class MIR;
-class MIRGraph;
-
-/**
- * @class DeadCodeElimination
- * @details Eliminate dead code based on the results of global value numbering.
- * Also get rid of MOVE insns when we can use the source instead of destination
- * without affecting the vreg values at safepoints; this is useful in methods
- * with a large number of vregs that frequently move values to and from low vregs
- * to accommodate insns that can work only with the low 16 or 256 vregs.
- */
-class GvnDeadCodeElimination : public DeletableArenaObject<kArenaAllocMisc> {
- public:
-  GvnDeadCodeElimination(const GlobalValueNumbering* gvn, ScopedArenaAllocator* alloc);
-
-  // Apply the DCE to a basic block.
-  void Apply(BasicBlock* bb);
-
- private:
-  static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
-  static constexpr uint16_t kNPos = 0xffffu;
-  static constexpr size_t kMaxNumTopChangesToKill = 2;
-
-  struct VRegValue {
-    VRegValue() : value(kNoValue), change(kNPos) { }
-
-    // Value name as reported by GVN, kNoValue if not available.
-    uint16_t value;
-    // Index of the change in mir_data_ that defined the value, kNPos if initial value for the BB.
-    uint16_t change;
-  };
-
-  struct MIRData {
-    explicit MIRData(MIR* m)
-        : mir(m), uses_all_vregs(false), must_keep(false), is_move(false), is_move_src(false),
-          has_def(false), wide_def(false),
-          low_def_over_high_word(false), high_def_over_low_word(false), vreg_def(0u),
-          prev_value(), prev_value_high() {
-    }
-
-    uint16_t PrevChange(int v_reg) const;
-    void SetPrevChange(int v_reg, uint16_t change);
-    void RemovePrevChange(int v_reg, MIRData* prev_data);
-
-    MIR* mir;
-    bool uses_all_vregs : 1;  // If mir uses all vregs, uses in mir->ssa_rep are irrelevant.
-    bool must_keep : 1;
-    bool is_move : 1;
-    bool is_move_src : 1;
-    bool has_def : 1;
-    bool wide_def : 1;
-    bool low_def_over_high_word : 1;
-    bool high_def_over_low_word : 1;
-    uint16_t vreg_def;
-    VRegValue prev_value;
-    VRegValue prev_value_high;   // For wide defs.
-  };
-
-  class VRegChains {
-   public:
-    VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc);
-
-    void Reset();
-
-    void AddMIRWithDef(MIR* mir, int v_reg, bool wide, uint16_t new_value);
-    void AddMIRWithoutDef(MIR* mir);
-    void RemoveLastMIRData();
-    void RemoveTrailingNops();
-
-    size_t NumMIRs() const;
-    MIRData* GetMIRData(size_t pos);
-    MIRData* LastMIRData();
-
-    uint32_t NumVRegs() const;
-    void InsertInitialValueHigh(int v_reg, uint16_t value);
-    void UpdateInitialVRegValue(int v_reg, bool wide, const LocalValueNumbering* lvn);
-    uint16_t LastChange(int v_reg);
-    uint16_t CurrentValue(int v_reg);
-
-    uint16_t FindKillHead(int v_reg, uint16_t cutoff);
-    uint16_t FindFirstChangeAfter(int v_reg, uint16_t change) const;
-    void ReplaceChange(uint16_t old_change, uint16_t new_change);
-    void RemoveChange(uint16_t change);
-    bool IsTopChange(uint16_t change) const;
-    bool IsSRegUsed(uint16_t first_change, uint16_t last_change, int s_reg) const;
-    bool IsVRegUsed(uint16_t first_change, uint16_t last_change, int v_reg,
-                    MIRGraph* mir_graph) const;
-    void RenameSRegUses(uint16_t first_change, uint16_t last_change,
-                        int old_s_reg, int new_s_reg, bool wide);
-    void RenameVRegUses(uint16_t first_change, uint16_t last_change,
-                        int old_s_reg, int old_v_reg, int new_s_reg, int new_v_reg);
-
-   private:
-    const uint32_t num_vregs_;
-    VRegValue* const vreg_data_;
-    BitVector vreg_high_words_;
-    ScopedArenaVector<MIRData> mir_data_;
-  };
-
-  void RecordPass();
-  void BackwardPass();
-
-  void KillMIR(MIRData* data);
-  static void KillMIR(MIR* mir);
-  static void ChangeBinOp2AddrToPlainBinOp(MIR* mir);
-  MIR* CreatePhi(int s_reg);
-  MIR* RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change, MIR* mir_to_kill);
-
-  // Update state variables going backwards through a MIR.
-  void BackwardPassProcessLastMIR();
-
-  uint16_t FindChangesToKill(uint16_t first_change, uint16_t last_change);
-  void BackwardPassTryToKillRevertVRegs();
-  bool BackwardPassTryToKillLastMIR();
-
-  void RecordPassKillMoveByRenamingSrcDef(uint16_t src_change, uint16_t move_change);
-  void RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_t check_change);
-  void RecordPassTryToKillOverwrittenMoveOrMoveSrc();
-  void RecordPassTryToKillLastMIR();
-
-  bool RecordMIR(MIR* mir);
-
-  const GlobalValueNumbering* const gvn_;
-  MIRGraph* const mir_graph_;
-
-  VRegChains vreg_chains_;
-  BasicBlock* bb_;
-  const LocalValueNumbering* lvn_;
-  size_t no_uses_all_since_;  // The change index after the last change with uses_all_vregs set.
-
-  // Data used when processing MIRs in reverse order.
-  ArenaBitVector* unused_vregs_;              // vregs that are not needed later.
-  ArenaBitVector* vregs_to_kill_;             // vregs that revert to a previous value.
-  uint16_t* kill_heads_;  // For each vreg in vregs_to_kill_, the first change to kill.
-  ScopedArenaVector<uint16_t> changes_to_kill_;
-  ArenaBitVector* dependent_vregs_;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc
deleted file mode 100644
index 28c61a8..0000000
--- a/compiler/dex/gvn_dead_code_elimination_test.cc
+++ /dev/null
@@ -1,2201 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dataflow_iterator-inl.h"
-#include "dex/mir_field_info.h"
-#include "global_value_numbering.h"
-#include "gvn_dead_code_elimination.h"
-#include "local_value_numbering.h"
-#include "gtest/gtest.h"
-
-namespace art {
-
-class GvnDeadCodeEliminationTest : public testing::Test {
- protected:
-  static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
-
-  struct IFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct SFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct BBDef {
-    static constexpr size_t kMaxSuccessors = 4;
-    static constexpr size_t kMaxPredecessors = 4;
-
-    BBType type;
-    size_t num_successors;
-    BasicBlockId successors[kMaxPredecessors];
-    size_t num_predecessors;
-    BasicBlockId predecessors[kMaxPredecessors];
-  };
-
-  struct MIRDef {
-    static constexpr size_t kMaxSsaDefs = 2;
-    static constexpr size_t kMaxSsaUses = 4;
-
-    BasicBlockId bbid;
-    Instruction::Code opcode;
-    int64_t value;
-    uint32_t field_info;
-    size_t num_uses;
-    int32_t uses[kMaxSsaUses];
-    size_t num_defs;
-    int32_t defs[kMaxSsaDefs];
-  };
-
-#define DEF_SUCC0() \
-    0u, { }
-#define DEF_SUCC1(s1) \
-    1u, { s1 }
-#define DEF_SUCC2(s1, s2) \
-    2u, { s1, s2 }
-#define DEF_SUCC3(s1, s2, s3) \
-    3u, { s1, s2, s3 }
-#define DEF_SUCC4(s1, s2, s3, s4) \
-    4u, { s1, s2, s3, s4 }
-#define DEF_PRED0() \
-    0u, { }
-#define DEF_PRED1(p1) \
-    1u, { p1 }
-#define DEF_PRED2(p1, p2) \
-    2u, { p1, p2 }
-#define DEF_PRED3(p1, p2, p3) \
-    3u, { p1, p2, p3 }
-#define DEF_PRED4(p1, p2, p3, p4) \
-    4u, { p1, p2, p3, p4 }
-#define DEF_BB(type, succ, pred) \
-    { type, succ, pred }
-
-#define DEF_CONST(bb, opcode, reg, value) \
-    { bb, opcode, value, 0u, 0, { }, 1, { reg } }
-#define DEF_CONST_WIDE(bb, opcode, reg, value) \
-    { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_CONST_STRING(bb, opcode, reg, index) \
-    { bb, opcode, index, 0u, 0, { }, 1, { reg } }
-#define DEF_IGET(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
-#define DEF_AGET(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
-#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
-#define DEF_APUT(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
-#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
-#define DEF_INVOKE1(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_UNIQUE_REF(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 0, { }, 1, { reg } }  // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
-#define DEF_IFZ(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_MOVE(bb, opcode, reg, src) \
-    { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
-#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
-    { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
-#define DEF_PHI2(bb, reg, src1, src2) \
-    { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
-#define DEF_UNOP(bb, opcode, result, src1) \
-    { bb, opcode, 0u, 0u, 1, { src1 }, 1, { result } }
-#define DEF_BINOP(bb, opcode, result, src1, src2) \
-    { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
-#define DEF_BINOP_WIDE(bb, opcode, result, src1, src2) \
-    { bb, opcode, 0u, 0u, 4, { src1, src1 + 1, src2, src2 + 1 }, 2, { result, result + 1 } }
-
-  void DoPrepareIFields(const IFieldDef* defs, size_t count) {
-    cu_.mir_graph->ifield_lowering_infos_.clear();
-    cu_.mir_graph->ifield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ =
-            MirIFieldLoweringInfo::kFlagFastGet | MirIFieldLoweringInfo::kFlagFastPut |
-            (field_info.flags_ & ~(def->is_volatile ? 0u : MirIFieldLoweringInfo::kFlagIsVolatile));
-      }
-      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareIFields(const IFieldDef (&defs)[count]) {
-    DoPrepareIFields(defs, count);
-  }
-
-  void DoPrepareSFields(const SFieldDef* defs, size_t count) {
-    cu_.mir_graph->sfield_lowering_infos_.clear();
-    cu_.mir_graph->sfield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const SFieldDef* def = &defs[i];
-      MirSFieldLoweringInfo field_info(def->field_idx, def->type);
-      // Mark even unresolved fields as initialized.
-      field_info.flags_ |= MirSFieldLoweringInfo::kFlagClassIsInitialized;
-      // NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by GVN.
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ =
-            MirSFieldLoweringInfo::kFlagFastGet | MirSFieldLoweringInfo::kFlagFastPut |
-            (field_info.flags_ & ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile));
-      }
-      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareSFields(const SFieldDef (&defs)[count]) {
-    DoPrepareSFields(defs, count);
-  }
-
-  void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
-    cu_.mir_graph->block_id_map_.clear();
-    cu_.mir_graph->block_list_.clear();
-    ASSERT_LT(3u, count);  // null, entry, exit and at least one bytecode block.
-    ASSERT_EQ(kNullBlock, defs[0].type);
-    ASSERT_EQ(kEntryBlock, defs[1].type);
-    ASSERT_EQ(kExitBlock, defs[2].type);
-    for (size_t i = 0u; i != count; ++i) {
-      const BBDef* def = &defs[i];
-      BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
-      if (def->num_successors <= 2) {
-        bb->successor_block_list_type = kNotUsed;
-        bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
-        bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
-      } else {
-        bb->successor_block_list_type = kPackedSwitch;
-        bb->fall_through = 0u;
-        bb->taken = 0u;
-        bb->successor_blocks.reserve(def->num_successors);
-        for (size_t j = 0u; j != def->num_successors; ++j) {
-          SuccessorBlockInfo* successor_block_info =
-              static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
-                                                               kArenaAllocSuccessors));
-          successor_block_info->block = j;
-          successor_block_info->key = 0u;  // Not used by class init check elimination.
-          bb->successor_blocks.push_back(successor_block_info);
-        }
-      }
-      bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
-      if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
-        bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
-            cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
-        bb->data_flow_info->live_in_v = live_in_v_;
-        bb->data_flow_info->vreg_to_ssa_map_exit = nullptr;
-      }
-    }
-    ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
-    cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
-    ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
-    cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
-    ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
-  }
-
-  template <size_t count>
-  void PrepareBasicBlocks(const BBDef (&defs)[count]) {
-    DoPrepareBasicBlocks(defs, count);
-  }
-
-  int SRegToVReg(int32_t s_reg, bool wide) {
-    int v_reg = cu_.mir_graph->SRegToVReg(s_reg);
-    CHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-    if (wide) {
-      CHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
-    }
-    return v_reg;
-  }
-
-  int SRegToVReg(int32_t* uses, size_t* use, bool wide) {
-    int v_reg = SRegToVReg(uses[*use], wide);
-    if (wide) {
-      CHECK_EQ(uses[*use] + 1, uses[*use + 1]);
-      *use += 2u;
-    } else {
-      *use += 1u;
-    }
-    return v_reg;
-  }
-
-  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
-    mir_count_ = count;
-    mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
-    ssa_reps_.resize(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const MIRDef* def = &defs[i];
-      MIR* mir = &mirs_[i];
-      ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
-      BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
-      bb->AppendMIR(mir);
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
-      mir->dalvikInsn.vB_wide = def->value;
-      if (IsInstructionIGetOrIPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
-        mir->meta.ifield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_info].MemAccessType(),
-                  IGetOrIPutMemAccessType(def->opcode));
-      } else if (IsInstructionSGetOrSPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
-        mir->meta.sfield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
-                  SGetOrSPutMemAccessType(def->opcode));
-      } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
-        mir->meta.phi_incoming =
-            allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
-        ASSERT_EQ(def->num_uses, bb->predecessors.size());
-        std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
-      }
-      mir->ssa_rep = &ssa_reps_[i];
-      cu_.mir_graph->AllocateSSAUseData(mir, def->num_uses);
-      std::copy_n(def->uses, def->num_uses, mir->ssa_rep->uses);
-      // Keep mir->ssa_rep->fp_use[.] zero-initialized (false). Not used by DCE, only copied.
-      cu_.mir_graph->AllocateSSADefData(mir, def->num_defs);
-      std::copy_n(def->defs, def->num_defs, mir->ssa_rep->defs);
-      // Keep mir->ssa_rep->fp_def[.] zero-initialized (false). Not used by DCE, only copied.
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->offset = i;  // LVN uses offset only for debug output
-      mir->optimization_flags = 0u;
-      uint64_t df_attrs = MIRGraph::GetDataFlowAttributes(mir);
-      if ((df_attrs & DF_DA) != 0) {
-        CHECK_NE(def->num_defs, 0u);
-        mir->dalvikInsn.vA = SRegToVReg(def->defs[0], (df_attrs & DF_A_WIDE) != 0);
-        bb->data_flow_info->vreg_to_ssa_map_exit[mir->dalvikInsn.vA] = def->defs[0];
-        if ((df_attrs & DF_A_WIDE) != 0) {
-          CHECK_EQ(def->defs[0] + 1, def->defs[1]);
-          bb->data_flow_info->vreg_to_ssa_map_exit[mir->dalvikInsn.vA + 1u] = def->defs[0] + 1;
-        }
-      }
-      if ((df_attrs & (DF_UA | DF_UB | DF_UC)) != 0) {
-        size_t use = 0;
-        if ((df_attrs & DF_UA) != 0) {
-          mir->dalvikInsn.vA = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_A_WIDE) != 0);
-        }
-        if ((df_attrs & DF_UB) != 0) {
-          mir->dalvikInsn.vB = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_B_WIDE) != 0);
-        }
-        if ((df_attrs & DF_UC) != 0) {
-          mir->dalvikInsn.vC = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_C_WIDE) != 0);
-        }
-        DCHECK_EQ(def->num_uses, use);
-      }
-    }
-    DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
-        cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
-    code_item->insns_size_in_code_units_ = 2u * count;
-    code_item->registers_size_ = kMaxVRegs;
-    cu_.mir_graph->current_code_item_ = code_item;
-  }
-
-  template <size_t count>
-  void PrepareMIRs(const MIRDef (&defs)[count]) {
-    DoPrepareMIRs(defs, count);
-  }
-
-  template <size_t count>
-  void PrepareSRegToVRegMap(const int (&map)[count]) {
-    cu_.mir_graph->ssa_base_vregs_.assign(map, map + count);
-    num_vregs_ = *std::max_element(map, map + count) + 1u;
-    AllNodesIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      if (bb->data_flow_info != nullptr) {
-        bb->data_flow_info->vreg_to_ssa_map_exit = static_cast<int32_t*>(
-            cu_.arena.Alloc(sizeof(int32_t) * num_vregs_, kArenaAllocDFInfo));
-        std::fill_n(bb->data_flow_info->vreg_to_ssa_map_exit, num_vregs_, INVALID_SREG);
-      }
-    }
-  }
-
-  void PerformGVN() {
-    cu_.mir_graph->SSATransformationStart();
-    cu_.mir_graph->ComputeDFSOrders();
-    cu_.mir_graph->ComputeDominators();
-    cu_.mir_graph->ComputeTopologicalSortOrder();
-    cu_.mir_graph->SSATransformationEnd();
-    cu_.mir_graph->temp_.gvn.ifield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
-    cu_.mir_graph->temp_.gvn.sfield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
-    ASSERT_TRUE(gvn_ == nullptr);
-    gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
-                                                           GlobalValueNumbering::kModeGvn));
-    value_names_.resize(mir_count_, 0xffffu);
-    LoopRepeatingTopologicalSortIterator iterator(cu_.mir_graph.get());
-    bool change = false;
-    for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
-      LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
-      if (lvn != nullptr) {
-        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-          value_names_[mir - mirs_] = lvn->GetValueNumber(mir);
-        }
-      }
-      change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
-      ASSERT_TRUE(gvn_->Good());
-    }
-  }
-
-  void PerformGVNCodeModifications() {
-    ASSERT_TRUE(gvn_ != nullptr);
-    ASSERT_TRUE(gvn_->Good());
-    gvn_->StartPostProcessing();
-    TopologicalSortIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
-      if (lvn != nullptr) {
-        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-          uint16_t value_name = lvn->GetValueNumber(mir);
-          ASSERT_EQ(value_name, value_names_[mir - mirs_]);
-        }
-      }
-      bool change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
-      ASSERT_FALSE(change);
-      ASSERT_TRUE(gvn_->Good());
-    }
-  }
-
-  void FillVregToSsaRegExitMaps() {
-    // Fill in vreg_to_ssa_map_exit for each BB.
-    PreOrderDfsIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      if (bb->block_type == kDalvikByteCode) {
-        CHECK(!bb->predecessors.empty());
-        BasicBlock* pred_bb = cu_.mir_graph->GetBasicBlock(bb->predecessors[0]);
-        for (size_t v_reg = 0; v_reg != num_vregs_; ++v_reg) {
-          if (bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] == INVALID_SREG) {
-            bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] =
-                pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
-          }
-        }
-      }
-    }
-  }
-
-  template <size_t count>
-  void MarkAsWideSRegs(const int32_t (&sregs)[count]) {
-    for (int32_t sreg : sregs) {
-      cu_.mir_graph->reg_location_[sreg].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].high_word = true;
-    }
-  }
-
-  void PerformDCE() {
-    FillVregToSsaRegExitMaps();
-    cu_.mir_graph->GetNumOfCodeAndTempVRs();
-    dce_.reset(new (allocator_.get()) GvnDeadCodeElimination(gvn_.get(), allocator_.get()));
-    PreOrderDfsIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      if (bb->block_type == kDalvikByteCode) {
-        dce_->Apply(bb);
-      }
-    }
-  }
-
-  void PerformGVN_DCE() {
-    PerformGVN();
-    PerformGVNCodeModifications();  // Eliminate null/range checks.
-    PerformDCE();
-  }
-
-  template <size_t count>
-  void ExpectValueNamesNE(const size_t (&indexes)[count]) {
-    for (size_t i1 = 0; i1 != count; ++i1) {
-      size_t idx1 = indexes[i1];
-      for (size_t i2 = i1 + 1; i2 != count; ++i2) {
-        size_t idx2 = indexes[i2];
-        EXPECT_NE(value_names_[idx1], value_names_[idx2]) << idx1 << " " << idx2;
-      }
-    }
-  }
-
-  template <size_t count>
-  void ExpectNoNullCheck(const size_t (&indexes)[count]) {
-    for (size_t i = 0; i != count; ++i) {
-      size_t idx = indexes[i];
-      EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[idx].optimization_flags & MIR_IGNORE_NULL_CHECK)
-          << idx;
-    }
-    size_t num_no_null_ck = 0u;
-    for (size_t i = 0; i != mir_count_; ++i) {
-      if ((mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
-        ++num_no_null_ck;
-      }
-    }
-    EXPECT_EQ(count, num_no_null_ck);
-  }
-
-  GvnDeadCodeEliminationTest()
-      : pool_(),
-        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
-        num_vregs_(0u),
-        mir_count_(0u),
-        mirs_(nullptr),
-        ssa_reps_(),
-        allocator_(),
-        gvn_(),
-        dce_(),
-        value_names_(),
-        live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)) {
-    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
-    cu_.access_flags = kAccStatic;  // Don't let "this" interfere with this test.
-    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
-    // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
-    // 0 constants are integral, not references, and the values are all narrow.
-    // Nothing else is used by LVN/GVN. Tests can override the default values as needed.
-    cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
-        kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
-    cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
-    // Bind all possible sregs to live vregs for test purposes.
-    live_in_v_->SetInitialBits(kMaxSsaRegs);
-    cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
-    cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
-    for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
-      cu_.mir_graph->ssa_base_vregs_.push_back(i);
-      cu_.mir_graph->ssa_subscripts_.push_back(0);
-    }
-    // Set shorty for a void-returning method without arguments.
-    cu_.shorty = "V";
-  }
-
-  static constexpr size_t kMaxSsaRegs = 16384u;
-  static constexpr size_t kMaxVRegs = 256u;
-
-  ArenaPool pool_;
-  CompilationUnit cu_;
-  size_t num_vregs_;
-  size_t mir_count_;
-  MIR* mirs_;
-  std::vector<SSARepresentation> ssa_reps_;
-  std::unique_ptr<ScopedArenaAllocator> allocator_;
-  std::unique_ptr<GlobalValueNumbering> gvn_;
-  std::unique_ptr<GvnDeadCodeElimination> dce_;
-  std::vector<uint16_t> value_names_;
-  ArenaBitVector* live_in_v_;
-};
-
-constexpr uint16_t GvnDeadCodeEliminationTest::kNoValue;
-
-class GvnDeadCodeEliminationTestSimple : public GvnDeadCodeEliminationTest {
- public:
-  GvnDeadCodeEliminationTestSimple();
-
- private:
-  static const BBDef kSimpleBbs[];
-};
-
-const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestSimple::kSimpleBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
-};
-
-GvnDeadCodeEliminationTestSimple::GvnDeadCodeEliminationTestSimple()
-    : GvnDeadCodeEliminationTest() {
-  PrepareBasicBlocks(kSimpleBbs);
-}
-
-class GvnDeadCodeEliminationTestDiamond : public GvnDeadCodeEliminationTest {
- public:
-  GvnDeadCodeEliminationTestDiamond();
-
- private:
-  static const BBDef kDiamondBbs[];
-};
-
-const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestDiamond::kDiamondBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // Block #3, top of the diamond.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #4, left side.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #5, right side.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),  // Block #6, bottom.
-};
-
-GvnDeadCodeEliminationTestDiamond::GvnDeadCodeEliminationTestDiamond()
-    : GvnDeadCodeEliminationTest() {
-  PrepareBasicBlocks(kDiamondBbs);
-}
-
-class GvnDeadCodeEliminationTestLoop : public GvnDeadCodeEliminationTest {
- public:
-  GvnDeadCodeEliminationTestLoop();
-
- private:
-  static const BBDef kLoopBbs[];
-};
-
-const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestLoop::kLoopBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)),  // "taken" loops to self.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-};
-
-GvnDeadCodeEliminationTestLoop::GvnDeadCodeEliminationTestLoop()
-    : GvnDeadCodeEliminationTest() {
-  PrepareBasicBlocks(kLoopBbs);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename1) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
-      DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 3 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  const size_t no_null_ck_indexes[] = { 1, 3 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the IGET uses the s_reg 0, v_reg 0, defined by mirs_[0].
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
-  EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
-  EXPECT_EQ(0u, mirs_[3].dalvikInsn.vB);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename2) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
-      DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
-      DEF_CONST(3, Instruction::CONST, 4u, 1000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 3, 4 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  const size_t no_null_ck_indexes[] = { 1, 3 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, true, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the IGET uses the s_reg 0, v_reg 0, defined by mirs_[0].
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
-  EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
-  EXPECT_EQ(0u, mirs_[3].dalvikInsn.vB);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename3) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
-      DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 3 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  const size_t no_null_ck_indexes[] = { 1, 3 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the NEW_INSTANCE defines the s_reg 2, v_reg 2, originally defined by the move.
-  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(2u, mirs_[0].dalvikInsn.vA);
-  // Check that the first IGET is using the s_reg 2, v_reg 2.
-  ASSERT_EQ(1, mirs_[1].ssa_rep->num_uses);
-  EXPECT_EQ(2, mirs_[1].ssa_rep->uses[0]);
-  EXPECT_EQ(2u, mirs_[1].dalvikInsn.vB);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename4) {
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 1u, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 1u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 3u, 1000u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 0, 1 /* high word */ };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 3 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 3 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  static const bool eliminated[] = {
-      false, true, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the NEW_INSTANCE defines the s_reg 2, v_reg 2, originally defined by the move 2u.
-  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(2u, mirs_[0].dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename5) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 2u, 1u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 4u, 3u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 5u, 1000u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 3, 0, 1 /* high word */ };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 5 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 5 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-  EXPECT_EQ(value_names_[0], value_names_[4]);
-
-  static const bool eliminated[] = {
-      false, false, false, true, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the NEW_INSTANCE defines the s_reg 4, v_reg 3, originally defined by the move 4u.
-  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(4, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(3u, mirs_[0].dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename6) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1 /* high word */, 1, 2 /* high word */ };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 0, 2 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  static const bool eliminated[] = {
-      false, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the CONST_WIDE defines the s_reg 2, v_reg 1, originally defined by the move 2u.
-  ASSERT_EQ(2, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(3, mirs_[0].ssa_rep->defs[1]);
-  EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename7) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 2u, 0u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  static const bool eliminated[] = {
-      false, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the CONST defines the s_reg 1, v_reg 1, originally defined by the move 1u.
-  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(1, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
-  // Check that the ADD_INT inputs are both s_reg1, vreg 1.
-  ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
-  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
-  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[1]);
-  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vB);
-  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vC);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename8) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 2u, 0u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  static const bool eliminated[] = {
-      false, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the CONST defines the s_reg 1, v_reg 1, originally defined by the move 1u.
-  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(1, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
-  // Check that the ADD_INT_2ADDR was replaced by ADD_INT and inputs are both s_reg 1, vreg 1.
-  EXPECT_EQ(Instruction::ADD_INT, mirs_[2].dalvikInsn.opcode);
-  ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
-  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
-  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[1]);
-  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vB);
-  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vC);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename9) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 1u, 0u, 0u),
-      DEF_MOVE(3, Instruction::MOVE, 2u, 1u),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 0, 1, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 3 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[1], value_names_[2]);
-
-  static const bool eliminated[] = {
-      false, false, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the ADD_INT_2ADDR was replaced by ADD_INT and output is in s_reg 2, vreg 1.
-  EXPECT_EQ(Instruction::ADD_INT, mirs_[1].dalvikInsn.opcode);
-  ASSERT_EQ(2, mirs_[1].ssa_rep->num_uses);
-  EXPECT_EQ(0, mirs_[1].ssa_rep->uses[0]);
-  EXPECT_EQ(0, mirs_[1].ssa_rep->uses[1]);
-  EXPECT_EQ(0u, mirs_[1].dalvikInsn.vB);
-  EXPECT_EQ(0u, mirs_[1].dalvikInsn.vC);
-  ASSERT_EQ(1, mirs_[1].ssa_rep->num_defs);
-  EXPECT_EQ(2, mirs_[1].ssa_rep->defs[0]);
-  EXPECT_EQ(1u, mirs_[1].dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, NoRename1) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 2u, 1u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
-      DEF_CONST(3, Instruction::CONST, 4u, 1000),
-      DEF_IGET(3, Instruction::IGET, 5u, 3u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 0, 1 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 4, 5 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-
-  const size_t no_null_ck_indexes[] = { 1, 5 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, NoRename2) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 2u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
-      DEF_CONST(3, Instruction::CONST, 4u, 1000),
-      DEF_IGET(3, Instruction::IGET, 5u, 3u, 1u),
-      DEF_CONST(3, Instruction::CONST, 6u, 2000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2, 0, 3, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 4, 5, 6 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-
-  const size_t no_null_ck_indexes[] = { 1, 5 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, NoRename3) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_IGET(3, Instruction::IGET, 2u, 0u, 2u),
-      DEF_BINOP(3, Instruction::ADD_INT, 3u, 1u, 2u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 4u, 0u),
-      DEF_IGET(3, Instruction::IGET, 5u, 4u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 5 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[4]);
-
-  const size_t no_null_ck_indexes[] = { 1, 2, 5 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, NoRename4) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 1u),
-      DEF_CONST(3, Instruction::CONST, 2u, 100u),
-      DEF_CONST(3, Instruction::CONST, 3u, 200u),
-      DEF_BINOP(3, Instruction::OR_INT_2ADDR, 4u, 2u, 3u),   // 3. Find definition of the move src.
-      DEF_MOVE(3, Instruction::MOVE, 5u, 0u),                // 4. Uses move dest vreg.
-      DEF_MOVE(3, Instruction::MOVE, 6u, 4u),                // 2. Find overwritten move src.
-      DEF_CONST(3, Instruction::CONST, 7u, 2000u),           // 1. Overwrites 4u, look for moves.
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2, 4, 0, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 7 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[5]);
-  EXPECT_EQ(value_names_[4], value_names_[6]);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Simple1) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-      { 1u, 1u, 1u, false, kDexMemAccessObject },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 1u),
-      DEF_IGET(3, Instruction::IGET, 3u, 2u, 2u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 0u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 5u, 4u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[1]);
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_NE(value_names_[0], value_names_[3]);
-  EXPECT_NE(value_names_[1], value_names_[2]);
-  EXPECT_NE(value_names_[1], value_names_[3]);
-  EXPECT_NE(value_names_[2], value_names_[3]);
-  EXPECT_EQ(value_names_[1], value_names_[4]);
-  EXPECT_EQ(value_names_[2], value_names_[5]);
-
-  EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[4].optimization_flags & MIR_IGNORE_NULL_CHECK);
-  EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[5].optimization_flags & MIR_IGNORE_NULL_CHECK);
-
-  static const bool eliminated[] = {
-      false, false, false, false, true, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[1].ssa_rep->num_defs);
-  EXPECT_EQ(4, mirs_[1].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[1].ssa_rep->num_uses);
-  EXPECT_EQ(0, mirs_[1].ssa_rep->uses[0]);
-  ASSERT_EQ(1, mirs_[2].ssa_rep->num_defs);
-  EXPECT_EQ(5, mirs_[2].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[2].ssa_rep->num_uses);
-  EXPECT_EQ(4, mirs_[2].ssa_rep->uses[0]);
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
-  EXPECT_EQ(3, mirs_[3].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
-  EXPECT_EQ(5, mirs_[3].ssa_rep->uses[0]);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Simple2) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_IGET(3, Instruction::IGET, 2u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 3u, 2u, 1u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 4u, 3u),
-      DEF_IGET(3, Instruction::IGET, 5u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 6u, 5u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2, 3, 2, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[2], value_names_[5]);
-  EXPECT_EQ(value_names_[3], value_names_[6]);
-
-  const size_t no_null_ck_indexes[] = { 2, 5 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, true, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
-  EXPECT_EQ(6, mirs_[3].ssa_rep->defs[0]);
-  ASSERT_EQ(2, mirs_[3].ssa_rep->num_uses);
-  EXPECT_EQ(2, mirs_[3].ssa_rep->uses[0]);
-  EXPECT_EQ(1, mirs_[3].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[4].ssa_rep->num_defs);
-  EXPECT_EQ(4, mirs_[4].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[4].ssa_rep->num_uses);
-  EXPECT_EQ(6, mirs_[4].ssa_rep->uses[0]);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Simple3) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),  // Simple elimination of ADD+MUL
-      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),  // allows simple elimination of IGET+SUB.
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 5, 5, 4 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[11]);
-  EXPECT_EQ(value_names_[7], value_names_[12]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false, true, true, true, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[6].ssa_rep->num_defs);
-  EXPECT_EQ(11, mirs_[6].ssa_rep->defs[0]);  // 6 -> 11
-  ASSERT_EQ(2, mirs_[6].ssa_rep->num_uses);
-  EXPECT_EQ(5, mirs_[6].ssa_rep->uses[0]);
-  EXPECT_EQ(2, mirs_[6].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
-  EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]);  // 7 -> 12
-  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
-  EXPECT_EQ(11, mirs_[7].ssa_rep->uses[0]);  // 6 -> 11
-  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
-  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
-  EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]);  // 7 -> 12
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Simple4) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 1u, INT64_C(1)),
-      DEF_BINOP(3, Instruction::LONG_TO_FLOAT, 3u, 1u, 2u),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 5u, 4u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 6u, INT64_C(1)),
-      DEF_BINOP(3, Instruction::LONG_TO_FLOAT, 8u, 6u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3, 1, 2, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 1, 6 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[1], value_names_[5]);
-  EXPECT_EQ(value_names_[2], value_names_[6]);
-  EXPECT_EQ(value_names_[3], value_names_[7]);
-
-  const size_t no_null_ck_indexes[] = { 3, 7 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      // Simple elimination of CONST_WIDE+LONG_TO_FLOAT allows simple eliminatiion of IGET.
-      false, false, false, false, false, true, true, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[2].ssa_rep->num_defs);
-  EXPECT_EQ(8, mirs_[2].ssa_rep->defs[0]);   // 3 -> 8
-  ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
-  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
-  EXPECT_EQ(2, mirs_[2].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
-  EXPECT_EQ(9, mirs_[3].ssa_rep->defs[0]);   // 4 -> 9
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
-  EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
-  ASSERT_EQ(1, mirs_[4].ssa_rep->num_defs);
-  EXPECT_EQ(5, mirs_[4].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[4].ssa_rep->num_uses);
-  EXPECT_EQ(9, mirs_[4].ssa_rep->uses[0]);   // 4 -> 9
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, KillChain1) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 4, 5, 6, 4, 5, 4, 5 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[11]);
-  EXPECT_EQ(value_names_[7], value_names_[12]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false, true, true, true, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[6].ssa_rep->num_defs);
-  EXPECT_EQ(11, mirs_[6].ssa_rep->defs[0]);  // 6 -> 11
-  ASSERT_EQ(2, mirs_[6].ssa_rep->num_uses);
-  EXPECT_EQ(5, mirs_[6].ssa_rep->uses[0]);
-  EXPECT_EQ(2, mirs_[6].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
-  EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]);  // 7 -> 12
-  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
-  EXPECT_EQ(11, mirs_[7].ssa_rep->uses[0]);  // 6 -> 11
-  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
-  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
-  EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]);   // 7 -> 12
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, KillChain2) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
-      DEF_CONST(3, Instruction::CONST, 13u, 4000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 7, 4, 7 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 13 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[11]);
-  EXPECT_EQ(value_names_[7], value_names_[12]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false, true, true, true, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
-  EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]);  // 7 -> 12
-  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
-  EXPECT_EQ(6, mirs_[7].ssa_rep->uses[0]);
-  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
-  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
-  EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]);   // 7 -> 12
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, KillChain3) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
-      DEF_CONST(3, Instruction::CONST, 12u, 4000),
-      DEF_BINOP(3, Instruction::SUB_INT, 13u, 11u, 3u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 4, 7, 4 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 12 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[11]);
-  EXPECT_EQ(value_names_[7], value_names_[13]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false, true, true, true, false, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
-  EXPECT_EQ(13, mirs_[7].ssa_rep->defs[0]);  // 7 -> 13
-  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
-  EXPECT_EQ(6, mirs_[7].ssa_rep->uses[0]);
-  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
-  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
-  EXPECT_EQ(13, mirs_[8].ssa_rep->uses[0]);   // 7 -> 13
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, KeepChain1) {
-  // KillChain2 without the final CONST.
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 7, 4 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[11]);
-  EXPECT_EQ(value_names_[7], value_names_[12]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, KeepChain2) {
-  // KillChain1 with MIRs in the middle of the chain.
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_CONST(3, Instruction::CONST, 11u, 4000),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 12u, 11u),
-      DEF_BINOP(3, Instruction::MUL_INT, 13u, 10u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 14u, 13u, 3u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 4, 5, 6, 4, 5, 4, 7, 4, 5 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[13]);
-  EXPECT_EQ(value_names_[7], value_names_[14]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false,
-      false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000),
-      DEF_CONST(4, Instruction::CONST, 1u, 1000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  static const bool eliminated[] = {
-      false, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created a single-input Phi to replace the CONST 3u.
-  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
-  MIR* phi = bb4->first_mir_insn;
-  ASSERT_TRUE(phi != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
-  ASSERT_EQ(1, phi->ssa_rep->num_uses);
-  EXPECT_EQ(0, phi->ssa_rep->uses[0]);
-  ASSERT_EQ(1, phi->ssa_rep->num_defs);
-  EXPECT_EQ(1, phi->ssa_rep->defs[0]);
-  EXPECT_EQ(0u, phi->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi2) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000),
-      DEF_MOVE(4, Instruction::MOVE, 1u, 0u),
-      DEF_CONST(4, Instruction::CONST, 2u, 1000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  static const bool eliminated[] = {
-      false, false, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created a single-input Phi to replace the CONST 3u.
-  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
-  MIR* phi = bb4->first_mir_insn;
-  ASSERT_TRUE(phi != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
-  ASSERT_EQ(1, phi->ssa_rep->num_uses);
-  EXPECT_EQ(0, phi->ssa_rep->uses[0]);
-  ASSERT_EQ(1, phi->ssa_rep->num_defs);
-  EXPECT_EQ(2, phi->ssa_rep->defs[0]);
-  EXPECT_EQ(0u, phi->dalvikInsn.vA);
-  MIR* move = phi->next;
-  ASSERT_TRUE(move != nullptr);
-  ASSERT_EQ(Instruction::MOVE, move->dalvikInsn.opcode);
-  ASSERT_EQ(1, move->ssa_rep->num_uses);
-  EXPECT_EQ(2, move->ssa_rep->uses[0]);
-  ASSERT_EQ(1, move->ssa_rep->num_defs);
-  EXPECT_EQ(1, move->ssa_rep->defs[0]);
-  EXPECT_EQ(1u, move->dalvikInsn.vA);
-  EXPECT_EQ(0u, move->dalvikInsn.vB);
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi3) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(4, Instruction::CONST, 1u, 1000),
-      DEF_IPUT(4, Instruction::IPUT, 1u, 0u, 0u),
-      DEF_CONST(5, Instruction::CONST, 3u, 2000),
-      DEF_IPUT(5, Instruction::IPUT, 3u, 0u, 0u),
-      DEF_IGET(6, Instruction::IGET, 5u, 0u, 0u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2 /* dummy */, 1, 2 /* dummy */, 1 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 3, 5 };
-  ExpectValueNamesNE(diff_indexes);
-
-  const size_t no_null_ck_indexes[] = { 2, 4, 5 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created a two-input Phi to replace the IGET 5u.
-  BasicBlock* bb6 = cu_.mir_graph->GetBasicBlock(6);
-  MIR* phi = bb6->first_mir_insn;
-  ASSERT_TRUE(phi != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
-  ASSERT_EQ(2, phi->ssa_rep->num_uses);
-  EXPECT_EQ(1, phi->ssa_rep->uses[0]);
-  EXPECT_EQ(3, phi->ssa_rep->uses[1]);
-  ASSERT_EQ(1, phi->ssa_rep->num_defs);
-  EXPECT_EQ(5, phi->ssa_rep->defs[0]);
-  EXPECT_EQ(1u, phi->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, KillChainInAnotherBlock1) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },  // linked list
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 3u, 2u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 3u, 0u),
-      DEF_IFZ(3, Instruction::IF_NEZ, 4u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 6u, 0u, 0u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 7u, 6u, 0u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 8u, 7u, 0u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 9u, 8u, 0u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3 /* dummy */, 1, 2, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[1], value_names_[6]);
-  EXPECT_EQ(value_names_[2], value_names_[7]);
-  EXPECT_EQ(value_names_[3], value_names_[8]);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-
-  const size_t no_null_ck_indexes[] = { 1, 6, 7, 8, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, true, true, true, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created two single-input Phis to replace the IGET 8u and IGET 9u;
-  // the IGET 6u and IGET 7u were killed without a replacement.
-  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
-  MIR* phi1 = bb4->first_mir_insn;
-  ASSERT_TRUE(phi1 != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi1->dalvikInsn.opcode));
-  MIR* phi2 = phi1->next;
-  ASSERT_TRUE(phi2 != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi2->dalvikInsn.opcode));
-  ASSERT_TRUE(phi2->next == &mirs_[6]);
-  if (phi1->dalvikInsn.vA == 2u) {
-    std::swap(phi1, phi2);
-  }
-  ASSERT_EQ(1, phi1->ssa_rep->num_uses);
-  EXPECT_EQ(3, phi1->ssa_rep->uses[0]);
-  ASSERT_EQ(1, phi1->ssa_rep->num_defs);
-  EXPECT_EQ(8, phi1->ssa_rep->defs[0]);
-  EXPECT_EQ(1u, phi1->dalvikInsn.vA);
-  ASSERT_EQ(1, phi2->ssa_rep->num_uses);
-  EXPECT_EQ(4, phi2->ssa_rep->uses[0]);
-  ASSERT_EQ(1, phi2->ssa_rep->num_defs);
-  EXPECT_EQ(9, phi2->ssa_rep->defs[0]);
-  EXPECT_EQ(2u, phi2->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, KillChainInAnotherBlock2) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },  // linked list
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 3u, 2u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 3u, 0u),
-      DEF_IFZ(3, Instruction::IF_NEZ, 4u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 6u, 0u, 0u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 7u, 6u, 0u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 8u, 7u, 0u),
-      DEF_CONST(4, Instruction::CONST, 9u, 1000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3 /* dummy */, 1, 2, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 9 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[1], value_names_[6]);
-  EXPECT_EQ(value_names_[2], value_names_[7]);
-  EXPECT_EQ(value_names_[3], value_names_[8]);
-
-  const size_t no_null_ck_indexes[] = { 1, 6, 7, 8 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, true, true, true, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created a single-input Phi to replace the IGET 8u;
-  // the IGET 6u and IGET 7u were killed without a replacement.
-  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
-  MIR* phi = bb4->first_mir_insn;
-  ASSERT_TRUE(phi != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
-  ASSERT_TRUE(phi->next == &mirs_[6]);
-  ASSERT_EQ(1, phi->ssa_rep->num_uses);
-  EXPECT_EQ(3, phi->ssa_rep->uses[0]);
-  ASSERT_EQ(1, phi->ssa_rep->num_defs);
-  EXPECT_EQ(8, phi->ssa_rep->defs[0]);
-  EXPECT_EQ(1u, phi->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestLoop, IFieldLoopVariable) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1),
-      DEF_CONST(3, Instruction::CONST, 2u, 0),
-      DEF_IPUT(3, Instruction::IPUT, 2u, 0u, 0u),
-      DEF_IGET(4, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(4, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_IPUT(4, Instruction::IPUT, 5u, 0u, 0u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3 /* dummy */, 2, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 4, 5 };
-  ExpectValueNamesNE(diff_indexes);
-
-  const size_t no_null_ck_indexes[] = { 3, 4, 6 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, true, false, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created a two-input Phi to replace the IGET 3u.
-  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
-  MIR* phi = bb4->first_mir_insn;
-  ASSERT_TRUE(phi != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
-  ASSERT_TRUE(phi->next == &mirs_[4]);
-  ASSERT_EQ(2, phi->ssa_rep->num_uses);
-  EXPECT_EQ(2, phi->ssa_rep->uses[0]);
-  EXPECT_EQ(5, phi->ssa_rep->uses[1]);
-  ASSERT_EQ(1, phi->ssa_rep->num_defs);
-  EXPECT_EQ(4, phi->ssa_rep->defs[0]);
-  EXPECT_EQ(2u, phi->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, LongOverlaps1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 2u, 1000u),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 4u, 0u),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 6u, 2u),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 8u, 4u),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 10u, 6u),
-  };
-
-  // The last insn should overlap the first and second.
-  static const int32_t sreg_to_vreg_map[] = { 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 0, 2, 4, 6, 8, 10 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-  EXPECT_EQ(value_names_[0], value_names_[4]);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, LongOverlaps2) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u),
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 4u, 2u),
-  };
-
-  // The last insn should overlap the first and second.
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 0, 2, 4 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  static const bool eliminated[] = {
-      false, true, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the CONST_WIDE registers have been correctly renamed.
-  MIR* const_wide = &mirs_[0];
-  ASSERT_EQ(2u, const_wide->ssa_rep->num_defs);
-  EXPECT_EQ(4, const_wide->ssa_rep->defs[0]);
-  EXPECT_EQ(5, const_wide->ssa_rep->defs[1]);
-  EXPECT_EQ(1u, const_wide->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, LongOverlaps3) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u),
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 4u, 2u),
-  };
-
-  // The last insn should overlap the first and second.
-  static const int32_t sreg_to_vreg_map[] = { 2, 3, 0, 1, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 0, 2, 4 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  static const bool eliminated[] = {
-      false, true, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the CONST_WIDE registers have been correctly renamed.
-  MIR* const_wide = &mirs_[0];
-  ASSERT_EQ(2u, const_wide->ssa_rep->num_defs);
-  EXPECT_EQ(4, const_wide->ssa_rep->defs[0]);
-  EXPECT_EQ(5, const_wide->ssa_rep->defs[1]);
-  EXPECT_EQ(1u, const_wide->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, MixedOverlaps1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000u),
-      { 3, Instruction::INT_TO_LONG, 0, 0u, 1, { 2u }, 2, { 3u, 4u } },
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 5u, 3u),
-      DEF_CONST(3, Instruction::CONST, 7u, 3000u),
-      DEF_CONST(3, Instruction::CONST, 8u, 4000u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 1, 2, 0, 0, 1, 3, 4, 0, 1 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 3, 5 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 2, 3, 5, 6 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[3], value_names_[4]);
-
-  static const bool eliminated[] = {
-      false, true, false, false, true, false, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check renamed registers in CONST.
-  MIR* cst = &mirs_[0];
-  ASSERT_EQ(Instruction::CONST, cst->dalvikInsn.opcode);
-  ASSERT_EQ(0, cst->ssa_rep->num_uses);
-  ASSERT_EQ(1, cst->ssa_rep->num_defs);
-  EXPECT_EQ(1, cst->ssa_rep->defs[0]);
-  EXPECT_EQ(2u, cst->dalvikInsn.vA);
-  // Check renamed registers in INT_TO_LONG.
-  MIR* int_to_long = &mirs_[3];
-  ASSERT_EQ(Instruction::INT_TO_LONG, int_to_long->dalvikInsn.opcode);
-  ASSERT_EQ(1, int_to_long->ssa_rep->num_uses);
-  EXPECT_EQ(2, int_to_long->ssa_rep->uses[0]);
-  ASSERT_EQ(2, int_to_long->ssa_rep->num_defs);
-  EXPECT_EQ(5, int_to_long->ssa_rep->defs[0]);
-  EXPECT_EQ(6, int_to_long->ssa_rep->defs[1]);
-  EXPECT_EQ(3u, int_to_long->dalvikInsn.vA);
-  EXPECT_EQ(0u, int_to_long->dalvikInsn.vB);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, UnusedRegs1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_CONST(3, Instruction::CONST, 1u, 2000u),
-      DEF_BINOP(3, Instruction::ADD_INT, 2u, 1u, 0u),
-      DEF_CONST(3, Instruction::CONST, 3u, 1000u),            // NOT killed (b/21702651).
-      DEF_BINOP(3, Instruction::ADD_INT, 4u, 1u, 3u),         // Killed (RecordPass)
-      DEF_CONST(3, Instruction::CONST, 5u, 2000u),            // Killed with 9u (BackwardPass)
-      DEF_BINOP(3, Instruction::ADD_INT, 6u, 5u, 0u),         // Killed (RecordPass)
-      DEF_CONST(3, Instruction::CONST, 7u, 4000u),
-      DEF_MOVE(3, Instruction::MOVE, 8u, 0u),                 // Killed with 6u (BackwardPass)
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 1, 2, 3, 0, 3, 0, 3, 4, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 7 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-  EXPECT_EQ(value_names_[2], value_names_[4]);
-  EXPECT_EQ(value_names_[1], value_names_[5]);
-  EXPECT_EQ(value_names_[2], value_names_[6]);
-  EXPECT_EQ(value_names_[0], value_names_[8]);
-
-  static const bool eliminated[] = {
-      false, false, false, false, true, true, true, false, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, UnusedRegs2) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_CONST(3, Instruction::CONST, 1u, 2000u),
-      DEF_BINOP(3, Instruction::ADD_INT, 2u, 1u, 0u),
-      DEF_CONST(3, Instruction::CONST, 3u, 1000u),            // Killed (BackwardPass; b/21702651)
-      DEF_BINOP(3, Instruction::ADD_INT, 4u, 1u, 3u),         // Killed (RecordPass)
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 5u, 4000u),
-      { 3, Instruction::LONG_TO_INT, 0, 0u, 2, { 5u, 6u }, 1, { 7u } },
-      DEF_BINOP(3, Instruction::ADD_INT, 8u, 7u, 0u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 9u, 4000u),  // Killed with 12u (BackwardPass)
-      DEF_CONST(3, Instruction::CONST, 11u, 6000u),
-      { 3, Instruction::LONG_TO_INT, 0, 0u, 2, { 9u, 10u }, 1, { 12u } },  // Killed with 9u (BP)
-  };
-
-  static const int32_t sreg_to_vreg_map[] = {
-      2, 3, 4, 1, 4, 5, 6 /* high word */, 0, 7, 0, 1 /* high word */, 8, 0
-  };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 5, 9 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 5, 6, 7, 9 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-  EXPECT_EQ(value_names_[2], value_names_[4]);
-  EXPECT_EQ(value_names_[5], value_names_[8]);
-  EXPECT_EQ(value_names_[6], value_names_[10]);
-
-  static const bool eliminated[] = {
-      false, false, false, true, true, false, false, false, true, false, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, ArrayLengthThrows) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 0),              // null
-      DEF_UNOP(3, Instruction::ARRAY_LENGTH, 1u, 0u),       // null.length
-      DEF_CONST(3, Instruction::CONST, 2u, 1000u),          // Overwrite the array-length dest.
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 1 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2 };
-  ExpectValueNamesNE(diff_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Dependancy) {
-  static const MIRDef mirs[] = {
-      DEF_MOVE(3, Instruction::MOVE, 5u, 1u),                 // move v5,v1
-      DEF_MOVE(3, Instruction::MOVE, 6u, 1u),                 // move v12,v1
-      DEF_MOVE(3, Instruction::MOVE, 7u, 0u),                 // move v13,v0
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 8u, 2u),       // move v0_1,v2_3
-      DEF_MOVE(3, Instruction::MOVE, 10u, 6u),                // move v3,v12
-      DEF_MOVE(3, Instruction::MOVE, 11u, 4u),                // move v2,v4
-      DEF_MOVE(3, Instruction::MOVE, 12u, 7u),                // move v4,v13
-      DEF_MOVE(3, Instruction::MOVE, 13, 11u),                // move v12,v2
-      DEF_MOVE(3, Instruction::MOVE, 14u, 10u),               // move v2,v3
-      DEF_MOVE(3, Instruction::MOVE, 15u, 5u),                // move v3,v5
-      DEF_MOVE(3, Instruction::MOVE, 16u, 12u),               // move v5,v4
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 12, 13, 0, 1, 3, 2, 4, 12, 2, 3, 5 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 2, 8 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, true, true, false, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
deleted file mode 100644
index 38f7d1e..0000000
--- a/compiler/dex/local_value_numbering.cc
+++ /dev/null
@@ -1,2038 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "local_value_numbering.h"
-
-#include "base/bit_utils.h"
-#include "global_value_numbering.h"
-#include "mir_field_info.h"
-#include "mir_graph.h"
-#include "utils.h"
-
-namespace art {
-
-namespace {  // anonymous namespace
-
-// Operations used for value map keys instead of actual opcode.
-static constexpr uint16_t kInvokeMemoryVersionBumpOp = Instruction::INVOKE_VIRTUAL;
-static constexpr uint16_t kUnresolvedSFieldOp = Instruction::SGET;
-static constexpr uint16_t kResolvedSFieldOp = Instruction::SGET_WIDE;
-static constexpr uint16_t kUnresolvedIFieldOp = Instruction::IGET;
-static constexpr uint16_t kNonAliasingIFieldLocOp = Instruction::IGET_WIDE;
-static constexpr uint16_t kNonAliasingIFieldInitialOp = Instruction::IGET_OBJECT;
-static constexpr uint16_t kAliasingIFieldOp = Instruction::IGET_BOOLEAN;
-static constexpr uint16_t kAliasingIFieldStartVersionOp = Instruction::IGET_BYTE;
-static constexpr uint16_t kAliasingIFieldBumpVersionOp = Instruction::IGET_CHAR;
-static constexpr uint16_t kNonAliasingArrayOp = Instruction::AGET;
-static constexpr uint16_t kNonAliasingArrayStartVersionOp = Instruction::AGET_WIDE;
-static constexpr uint16_t kNonAliasingArrayBumpVersionOp = Instruction::AGET_OBJECT;
-static constexpr uint16_t kAliasingArrayOp = Instruction::AGET_BOOLEAN;
-static constexpr uint16_t kAliasingArrayStartVersionOp = Instruction::AGET_BYTE;
-static constexpr uint16_t kAliasingArrayBumpVersionOp = Instruction::AGET_CHAR;
-static constexpr uint16_t kMergeBlockMemoryVersionBumpOp = Instruction::INVOKE_VIRTUAL_RANGE;
-static constexpr uint16_t kMergeBlockAliasingIFieldVersionBumpOp = Instruction::IPUT;
-static constexpr uint16_t kMergeBlockAliasingIFieldMergeLocationOp = Instruction::IPUT_WIDE;
-static constexpr uint16_t kMergeBlockNonAliasingArrayVersionBumpOp = Instruction::APUT;
-static constexpr uint16_t kMergeBlockNonAliasingArrayMergeLocationOp = Instruction::APUT_WIDE;
-static constexpr uint16_t kMergeBlockAliasingArrayVersionBumpOp = Instruction::APUT_OBJECT;
-static constexpr uint16_t kMergeBlockAliasingArrayMergeLocationOp = Instruction::APUT_BOOLEAN;
-static constexpr uint16_t kMergeBlockNonAliasingIFieldVersionBumpOp = Instruction::APUT_BYTE;
-static constexpr uint16_t kMergeBlockSFieldVersionBumpOp = Instruction::APUT_CHAR;
-
-}  // anonymous namespace
-
-class LocalValueNumbering::AliasingIFieldVersions {
- public:
-  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                     uint16_t field_id) {
-    uint16_t type = gvn->GetIFieldType(field_id);
-    return gvn->LookupValue(kAliasingIFieldStartVersionOp, field_id,
-                            lvn->global_memory_version_, lvn->unresolved_ifield_version_[type]);
-  }
-
-  static uint16_t BumpMemoryVersion(GlobalValueNumbering* gvn, uint16_t old_version,
-                                    uint16_t store_ref_set_id, uint16_t stored_value) {
-    return gvn->LookupValue(kAliasingIFieldBumpVersionOp, old_version,
-                            store_ref_set_id, stored_value);
-  }
-
-  static uint16_t LookupGlobalValue(GlobalValueNumbering* gvn,
-                                    uint16_t field_id, uint16_t base, uint16_t memory_version) {
-    return gvn->LookupValue(kAliasingIFieldOp, field_id, base, memory_version);
-  }
-
-  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                   uint16_t field_id, uint16_t base) {
-    // If the base/field_id is non-aliasing in lvn, use the non-aliasing value.
-    uint16_t type = gvn->GetIFieldType(field_id);
-    if (lvn->IsNonAliasingIField(base, field_id, type)) {
-      uint16_t loc = gvn->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
-      auto lb = lvn->non_aliasing_ifield_value_map_.find(loc);
-      return (lb != lvn->non_aliasing_ifield_value_map_.end())
-          ? lb->second
-          : gvn->LookupValue(kNonAliasingIFieldInitialOp, loc, kNoValue, kNoValue);
-    }
-    return AliasingValuesMergeGet<AliasingIFieldVersions>(
-        gvn, lvn, &lvn->aliasing_ifield_value_map_, field_id, base);
-  }
-
-  static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                uint16_t field_id) {
-    uint16_t type = gvn->GetIFieldType(field_id);
-    return lvn->unresolved_ifield_version_[type] == lvn->merge_new_memory_version_ ||
-        lvn->global_memory_version_ == lvn->merge_new_memory_version_;
-  }
-
-  static uint16_t LookupMergeBlockValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                        uint16_t field_id) {
-    return gvn->LookupValue(kMergeBlockAliasingIFieldVersionBumpOp, field_id, kNoValue, lvn_id);
-  }
-
-  static uint16_t LookupMergeLocationValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                           uint16_t field_id, uint16_t base) {
-    return gvn->LookupValue(kMergeBlockAliasingIFieldMergeLocationOp, field_id, base, lvn_id);
-  }
-};
-
-class LocalValueNumbering::NonAliasingArrayVersions {
- public:
-  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn,
-                                     const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
-                                     uint16_t array) {
-    return gvn->LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, kNoValue);
-  }
-
-  static uint16_t BumpMemoryVersion(GlobalValueNumbering* gvn, uint16_t old_version,
-                                    uint16_t store_ref_set_id, uint16_t stored_value) {
-    return gvn->LookupValue(kNonAliasingArrayBumpVersionOp, old_version,
-                            store_ref_set_id, stored_value);
-  }
-
-  static uint16_t LookupGlobalValue(GlobalValueNumbering* gvn,
-                                    uint16_t array, uint16_t index, uint16_t memory_version) {
-    return gvn->LookupValue(kNonAliasingArrayOp, array, index, memory_version);
-  }
-
-  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                   uint16_t array, uint16_t index) {
-    return AliasingValuesMergeGet<NonAliasingArrayVersions>(
-        gvn, lvn, &lvn->non_aliasing_array_value_map_, array, index);
-  }
-
-  static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
-                                const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
-                                uint16_t array ATTRIBUTE_UNUSED) {
-    return false;  // Not affected by global_memory_version_.
-  }
-
-  static uint16_t LookupMergeBlockValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                        uint16_t array) {
-    return gvn->LookupValue(kMergeBlockNonAliasingArrayVersionBumpOp, array, kNoValue, lvn_id);
-  }
-
-  static uint16_t LookupMergeLocationValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                           uint16_t array, uint16_t index) {
-    return gvn->LookupValue(kMergeBlockNonAliasingArrayMergeLocationOp, array, index, lvn_id);
-  }
-};
-
-class LocalValueNumbering::AliasingArrayVersions {
- public:
-  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                     uint16_t type) {
-    return gvn->LookupValue(kAliasingArrayStartVersionOp, type, lvn->global_memory_version_,
-                            kNoValue);
-  }
-
-  static uint16_t BumpMemoryVersion(GlobalValueNumbering* gvn, uint16_t old_version,
-                                    uint16_t store_ref_set_id, uint16_t stored_value) {
-    return gvn->LookupValue(kAliasingArrayBumpVersionOp, old_version,
-                            store_ref_set_id, stored_value);
-  }
-
-  static uint16_t LookupGlobalValue(GlobalValueNumbering* gvn,
-                                    uint16_t type, uint16_t location, uint16_t memory_version) {
-    return gvn->LookupValue(kAliasingArrayOp, type, location, memory_version);
-  }
-
-  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn,
-                                   const LocalValueNumbering* lvn,
-                                   uint16_t type, uint16_t location) {
-    // If the location is non-aliasing in lvn, use the non-aliasing value.
-    uint16_t array = gvn->GetArrayLocationBase(location);
-    if (lvn->IsNonAliasingArray(array, type)) {
-      uint16_t index = gvn->GetArrayLocationIndex(location);
-      return NonAliasingArrayVersions::LookupMergeValue(gvn, lvn, array, index);
-    }
-    return AliasingValuesMergeGet<AliasingArrayVersions>(
-        gvn, lvn, &lvn->aliasing_array_value_map_, type, location);
-  }
-
-  static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
-                                const LocalValueNumbering* lvn,
-                                uint16_t type ATTRIBUTE_UNUSED) {
-    return lvn->global_memory_version_ == lvn->merge_new_memory_version_;
-  }
-
-  static uint16_t LookupMergeBlockValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                        uint16_t type) {
-    return gvn->LookupValue(kMergeBlockAliasingArrayVersionBumpOp, type, kNoValue, lvn_id);
-  }
-
-  static uint16_t LookupMergeLocationValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                           uint16_t type, uint16_t location) {
-    return gvn->LookupValue(kMergeBlockAliasingArrayMergeLocationOp, type, location, lvn_id);
-  }
-};
-
-template <typename Map>
-LocalValueNumbering::AliasingValues* LocalValueNumbering::GetAliasingValues(
-    Map* map, const typename Map::key_type& key) {
-  auto lb = map->lower_bound(key);
-  if (lb == map->end() || map->key_comp()(key, lb->first)) {
-    lb = map->PutBefore(lb, key, AliasingValues(this));
-  }
-  return &lb->second;
-}
-
-template <typename Versions, typename KeyType>
-void LocalValueNumbering::UpdateAliasingValuesLoadVersion(const KeyType& key,
-                                                          AliasingValues* values) {
-  if (values->last_load_memory_version == kNoValue) {
-    // Get the start version that accounts for aliasing with unresolved fields of the same
-    // type and make it unique for the field by including the field_id.
-    uint16_t memory_version = values->memory_version_before_stores;
-    if (memory_version == kNoValue) {
-      memory_version = Versions::StartMemoryVersion(gvn_, this, key);
-    }
-    if (!values->store_loc_set.empty()) {
-      uint16_t ref_set_id = gvn_->GetRefSetId(values->store_loc_set);
-      memory_version = Versions::BumpMemoryVersion(gvn_, memory_version, ref_set_id,
-                                                   values->last_stored_value);
-    }
-    values->last_load_memory_version = memory_version;
-  }
-}
-
-template <typename Versions, typename Map>
-uint16_t LocalValueNumbering::AliasingValuesMergeGet(GlobalValueNumbering* gvn,
-                                                     const LocalValueNumbering* lvn,
-                                                     Map* map, const typename Map::key_type& key,
-                                                     uint16_t location) {
-  // Retrieve the value name that we would get from
-  //   const_cast<LocalValueNumbering*>(lvn)->HandleAliasingValueGet(map. key, location)
-  // but don't modify the map.
-  uint16_t value_name;
-  auto it = map->find(key);
-  if (it == map->end()) {
-    uint16_t start_version = Versions::StartMemoryVersion(gvn, lvn, key);
-    value_name = Versions::LookupGlobalValue(gvn, key, location, start_version);
-  } else if (it->second.store_loc_set.count(location) != 0u) {
-    value_name = it->second.last_stored_value;
-  } else {
-    auto load_it = it->second.load_value_map.find(location);
-    if (load_it != it->second.load_value_map.end()) {
-      value_name = load_it->second;
-    } else {
-      value_name = Versions::LookupGlobalValue(gvn, key, location, it->second.last_load_memory_version);
-    }
-  }
-  return value_name;
-}
-
-template <typename Versions, typename Map>
-uint16_t LocalValueNumbering::HandleAliasingValuesGet(Map* map, const typename Map::key_type& key,
-                                                      uint16_t location) {
-  // Retrieve the value name for IGET/SGET/AGET, update the map with new value if any.
-  uint16_t res;
-  AliasingValues* values = GetAliasingValues(map, key);
-  if (values->store_loc_set.count(location) != 0u) {
-    res = values->last_stored_value;
-  } else {
-    UpdateAliasingValuesLoadVersion<Versions>(key, values);
-    auto lb = values->load_value_map.lower_bound(location);
-    if (lb != values->load_value_map.end() && lb->first == location) {
-      res = lb->second;
-    } else {
-      res = Versions::LookupGlobalValue(gvn_, key, location, values->last_load_memory_version);
-      values->load_value_map.PutBefore(lb, location, res);
-    }
-  }
-  return res;
-}
-
-template <typename Versions, typename Map>
-bool LocalValueNumbering::HandleAliasingValuesPut(Map* map, const typename Map::key_type& key,
-                                                  uint16_t location, uint16_t value) {
-  AliasingValues* values = GetAliasingValues(map, key);
-  auto load_values_it = values->load_value_map.find(location);
-  if (load_values_it != values->load_value_map.end() && load_values_it->second == value) {
-    // This insn can be eliminated, it stores the same value that's already in the field.
-    return false;
-  }
-  if (value == values->last_stored_value) {
-    auto store_loc_lb = values->store_loc_set.lower_bound(location);
-    if (store_loc_lb != values->store_loc_set.end() && *store_loc_lb == location) {
-      // This insn can be eliminated, it stores the same value that's already in the field.
-      return false;
-    }
-    values->store_loc_set.emplace_hint(store_loc_lb, location);
-  } else {
-    UpdateAliasingValuesLoadVersion<Versions>(key, values);
-    values->memory_version_before_stores = values->last_load_memory_version;
-    values->last_stored_value = value;
-    values->store_loc_set.clear();
-    values->store_loc_set.insert(location);
-  }
-  // Clear the last load memory version and remove all potentially overwritten values.
-  values->last_load_memory_version = kNoValue;
-  auto it = values->load_value_map.begin(), end = values->load_value_map.end();
-  while (it != end) {
-    if (it->second == value) {
-      ++it;
-    } else {
-      it = values->load_value_map.erase(it);
-    }
-  }
-  return true;
-}
-
-template <typename K>
-void LocalValueNumbering::CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
-                                                const ScopedArenaSafeMap<K, AliasingValues>& src) {
-  // We need each new AliasingValues (or rather its map members) to be constructed
-  // with our allocator, rather than the allocator of the source.
-  for (const auto& entry : src) {
-    auto it = dest->PutBefore(dest->end(), entry.first, AliasingValues(this));
-    it->second = entry.second;  // Map assignments preserve current allocator.
-  }
-}
-
-LocalValueNumbering::LocalValueNumbering(GlobalValueNumbering* gvn, uint16_t id,
-                                         ScopedArenaAllocator* allocator)
-    : gvn_(gvn),
-      id_(id),
-      sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      sfield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      non_aliasing_ifield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      aliasing_ifield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      non_aliasing_array_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      aliasing_array_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      global_memory_version_(0u),
-      non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
-      escaped_refs_(std::less<uint16_t>(), allocator->Adapter()),
-      escaped_ifield_clobber_set_(EscapedIFieldClobberKeyComparator(), allocator->Adapter()),
-      escaped_array_clobber_set_(EscapedArrayClobberKeyComparator(), allocator->Adapter()),
-      range_checked_(RangeCheckKeyComparator() , allocator->Adapter()),
-      null_checked_(std::less<uint16_t>(), allocator->Adapter()),
-      div_zero_checked_(std::less<uint16_t>(), allocator->Adapter()),
-      merge_names_(allocator->Adapter()),
-      merge_map_(std::less<ScopedArenaVector<BasicBlockId>>(), allocator->Adapter()),
-      merge_new_memory_version_(kNoValue) {
-  std::fill_n(unresolved_sfield_version_, arraysize(unresolved_sfield_version_), 0u);
-  std::fill_n(unresolved_ifield_version_, arraysize(unresolved_ifield_version_), 0u);
-}
-
-bool LocalValueNumbering::Equals(const LocalValueNumbering& other) const {
-  DCHECK(gvn_ == other.gvn_);
-  // Compare the maps/sets and memory versions.
-  return sreg_value_map_ == other.sreg_value_map_ &&
-      sreg_wide_value_map_ == other.sreg_wide_value_map_ &&
-      sfield_value_map_ == other.sfield_value_map_ &&
-      non_aliasing_ifield_value_map_ == other.non_aliasing_ifield_value_map_ &&
-      aliasing_ifield_value_map_ == other.aliasing_ifield_value_map_ &&
-      non_aliasing_array_value_map_ == other.non_aliasing_array_value_map_ &&
-      aliasing_array_value_map_ == other.aliasing_array_value_map_ &&
-      SameMemoryVersion(other) &&
-      non_aliasing_refs_ == other.non_aliasing_refs_ &&
-      escaped_refs_ == other.escaped_refs_ &&
-      escaped_ifield_clobber_set_ == other.escaped_ifield_clobber_set_ &&
-      escaped_array_clobber_set_ == other.escaped_array_clobber_set_ &&
-      range_checked_ == other.range_checked_ &&
-      null_checked_ == other.null_checked_ &&
-      div_zero_checked_ == other.div_zero_checked_;
-}
-
-void LocalValueNumbering::MergeOne(const LocalValueNumbering& other, MergeType merge_type) {
-  CopyLiveSregValues(&sreg_value_map_, other.sreg_value_map_);
-  CopyLiveSregValues(&sreg_wide_value_map_, other.sreg_wide_value_map_);
-
-  if (merge_type == kReturnMerge) {
-    // RETURN or PHI+RETURN. We need only sreg value maps.
-    return;
-  }
-
-  non_aliasing_ifield_value_map_ = other.non_aliasing_ifield_value_map_;
-  CopyAliasingValuesMap(&non_aliasing_array_value_map_, other.non_aliasing_array_value_map_);
-  non_aliasing_refs_ = other.non_aliasing_refs_;
-  range_checked_ = other.range_checked_;
-  null_checked_ = other.null_checked_;
-  div_zero_checked_ = other.div_zero_checked_;
-
-  const BasicBlock* pred_bb = gvn_->GetBasicBlock(other.Id());
-  if (GlobalValueNumbering::HasNullCheckLastInsn(pred_bb, Id())) {
-    int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
-    null_checked_.insert(other.GetOperandValue(s_reg));
-  }
-
-  if (merge_type == kCatchMerge) {
-    // Memory is clobbered. Use new memory version and don't merge aliasing locations.
-    global_memory_version_ = NewMemoryVersion(&merge_new_memory_version_);
-    std::fill_n(unresolved_sfield_version_, arraysize(unresolved_sfield_version_),
-                global_memory_version_);
-    std::fill_n(unresolved_ifield_version_, arraysize(unresolved_ifield_version_),
-                global_memory_version_);
-    PruneNonAliasingRefsForCatch();
-    return;
-  }
-
-  DCHECK(merge_type == kNormalMerge);
-  global_memory_version_ = other.global_memory_version_;
-  std::copy_n(other.unresolved_ifield_version_, arraysize(unresolved_sfield_version_),
-              unresolved_ifield_version_);
-  std::copy_n(other.unresolved_sfield_version_, arraysize(unresolved_ifield_version_),
-              unresolved_sfield_version_);
-  sfield_value_map_ = other.sfield_value_map_;
-  CopyAliasingValuesMap(&aliasing_ifield_value_map_, other.aliasing_ifield_value_map_);
-  CopyAliasingValuesMap(&aliasing_array_value_map_, other.aliasing_array_value_map_);
-  escaped_refs_ = other.escaped_refs_;
-  escaped_ifield_clobber_set_ = other.escaped_ifield_clobber_set_;
-  escaped_array_clobber_set_ = other.escaped_array_clobber_set_;
-}
-
-bool LocalValueNumbering::SameMemoryVersion(const LocalValueNumbering& other) const {
-  return
-      global_memory_version_ == other.global_memory_version_ &&
-      std::equal(unresolved_ifield_version_,
-                 unresolved_ifield_version_ + arraysize(unresolved_ifield_version_),
-                 other.unresolved_ifield_version_) &&
-      std::equal(unresolved_sfield_version_,
-                 unresolved_sfield_version_ + arraysize(unresolved_sfield_version_),
-                 other.unresolved_sfield_version_);
-}
-
-uint16_t LocalValueNumbering::NewMemoryVersion(uint16_t* new_version) {
-  if (*new_version == kNoValue) {
-    *new_version = gvn_->LookupValue(kMergeBlockMemoryVersionBumpOp, 0u, 0u, id_);
-  }
-  return *new_version;
-}
-
-void LocalValueNumbering::MergeMemoryVersions(bool clobbered_catch) {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-  const LocalValueNumbering* cmp = gvn_->merge_lvns_[0];
-  // Check if the global version has changed.
-  bool new_global_version = clobbered_catch;
-  if (!new_global_version) {
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      if (lvn->global_memory_version_ != cmp->global_memory_version_) {
-        // Use a new version for everything.
-        new_global_version = true;
-        break;
-      }
-    }
-  }
-  if (new_global_version) {
-    global_memory_version_ = NewMemoryVersion(&merge_new_memory_version_);
-    std::fill_n(unresolved_sfield_version_, arraysize(unresolved_sfield_version_),
-                merge_new_memory_version_);
-    std::fill_n(unresolved_ifield_version_, arraysize(unresolved_ifield_version_),
-                merge_new_memory_version_);
-  } else {
-    // Initialize with a copy of memory versions from the comparison LVN.
-    global_memory_version_ = cmp->global_memory_version_;
-    std::copy_n(cmp->unresolved_ifield_version_, arraysize(unresolved_sfield_version_),
-                unresolved_ifield_version_);
-    std::copy_n(cmp->unresolved_sfield_version_, arraysize(unresolved_ifield_version_),
-                unresolved_sfield_version_);
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      if (lvn == cmp) {
-        continue;
-      }
-      for (size_t i = 0; i != kDexMemAccessTypeCount; ++i) {
-        if (lvn->unresolved_ifield_version_[i] != cmp->unresolved_ifield_version_[i]) {
-          unresolved_ifield_version_[i] = NewMemoryVersion(&merge_new_memory_version_);
-        }
-        if (lvn->unresolved_sfield_version_[i] != cmp->unresolved_sfield_version_[i]) {
-          unresolved_sfield_version_[i] = NewMemoryVersion(&merge_new_memory_version_);
-        }
-      }
-    }
-  }
-}
-
-void LocalValueNumbering::PruneNonAliasingRefsForCatch() {
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    const BasicBlock* bb = gvn_->GetBasicBlock(lvn->Id());
-    if (UNLIKELY(bb->taken == id_) || UNLIKELY(bb->fall_through == id_)) {
-      // Non-exceptional path to a catch handler means that the catch block was actually
-      // empty and all exceptional paths lead to the shared path after that empty block.
-      continue;
-    }
-    DCHECK_EQ(bb->taken, kNullBlock);
-    DCHECK_NE(bb->fall_through, kNullBlock);
-    const BasicBlock* fall_through_bb = gvn_->GetBasicBlock(bb->fall_through);
-    const MIR* mir = fall_through_bb->first_mir_insn;
-    DCHECK(mir != nullptr);
-    // Only INVOKEs can leak and clobber non-aliasing references if they throw.
-    if ((mir->dalvikInsn.FlagsOf() & Instruction::kInvoke) != 0) {
-      HandleInvokeArgs(mir, lvn);
-    }
-  }
-}
-
-
-template <typename Set, Set LocalValueNumbering::* set_ptr>
-void LocalValueNumbering::IntersectSets() {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-
-  // Find the LVN with the least entries in the set.
-  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    if ((lvn->*set_ptr).size() < (least_entries_lvn->*set_ptr).size()) {
-      least_entries_lvn = lvn;
-    }
-  }
-
-  // For each key check if it's in all the LVNs.
-  for (const auto& key : least_entries_lvn->*set_ptr) {
-    bool checked = true;
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      if (lvn != least_entries_lvn && (lvn->*set_ptr).count(key) == 0u) {
-        checked = false;
-        break;
-      }
-    }
-    if (checked) {
-      (this->*set_ptr).emplace_hint((this->*set_ptr).end(), key);
-    }
-  }
-}
-
-void LocalValueNumbering::CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src) {
-  auto dest_end = dest->end();
-  ArenaBitVector* live_in_v = gvn_->GetMirGraph()->GetBasicBlock(id_)->data_flow_info->live_in_v;
-  DCHECK(live_in_v != nullptr);
-  for (const auto& entry : src) {
-    bool live = live_in_v->IsBitSet(gvn_->GetMirGraph()->SRegToVReg(entry.first));
-    if (live) {
-      dest->PutBefore(dest_end, entry.first, entry.second);
-    }
-  }
-}
-
-template <LocalValueNumbering::SregValueMap LocalValueNumbering::* map_ptr>
-void LocalValueNumbering::IntersectSregValueMaps() {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-
-  // Find the LVN with the least entries in the set.
-  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    if ((lvn->*map_ptr).size() < (least_entries_lvn->*map_ptr).size()) {
-      least_entries_lvn = lvn;
-    }
-  }
-
-  // For each key check if it's in all the LVNs.
-  ArenaBitVector* live_in_v = gvn_->GetMirGraph()->GetBasicBlock(id_)->data_flow_info->live_in_v;
-  DCHECK(live_in_v != nullptr);
-  for (const auto& entry : least_entries_lvn->*map_ptr) {
-    bool live_and_same = live_in_v->IsBitSet(gvn_->GetMirGraph()->SRegToVReg(entry.first));
-    if (live_and_same) {
-      for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-        if (lvn != least_entries_lvn) {
-          auto it = (lvn->*map_ptr).find(entry.first);
-          if (it == (lvn->*map_ptr).end() || !(it->second == entry.second)) {
-            live_and_same = false;
-            break;
-          }
-        }
-      }
-    }
-    if (live_and_same) {
-      (this->*map_ptr).PutBefore((this->*map_ptr).end(), entry.first, entry.second);
-    }
-  }
-}
-
-// Intersect maps as sets. The value type must be equality-comparable.
-template <typename Map>
-void LocalValueNumbering::InPlaceIntersectMaps(Map* work_map, const Map& other_map) {
-  auto work_it = work_map->begin(), work_end = work_map->end();
-  auto cmp = work_map->value_comp();
-  for (const auto& entry : other_map) {
-    while (work_it != work_end &&
-        (cmp(*work_it, entry) ||
-         (!cmp(entry, *work_it) && !(work_it->second == entry.second)))) {
-      work_it = work_map->erase(work_it);
-    }
-    if (work_it == work_end) {
-      return;
-    }
-    ++work_it;
-  }
-}
-
-template <typename Set, Set LocalValueNumbering::*set_ptr, void (LocalValueNumbering::*MergeFn)(
-    const typename Set::value_type& entry, typename Set::iterator hint)>
-void LocalValueNumbering::MergeSets() {
-  auto cmp = (this->*set_ptr).value_comp();
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    auto my_it = (this->*set_ptr).begin(), my_end = (this->*set_ptr).end();
-    for (const auto& entry : lvn->*set_ptr) {
-      while (my_it != my_end && cmp(*my_it, entry)) {
-        ++my_it;
-      }
-      if (my_it != my_end && !cmp(entry, *my_it)) {
-        // Already handled.
-        ++my_it;
-      } else {
-        // Merge values for this field_id.
-        (this->*MergeFn)(entry, my_it);  // my_it remains valid across inserts to std::set/SafeMap.
-      }
-    }
-  }
-}
-
-void LocalValueNumbering::IntersectAliasingValueLocations(AliasingValues* work_values,
-                                                          const AliasingValues* values) {
-  auto cmp = work_values->load_value_map.key_comp();
-  auto work_it = work_values->load_value_map.begin(), work_end = work_values->load_value_map.end();
-  auto store_it = values->store_loc_set.begin(), store_end = values->store_loc_set.end();
-  auto load_it = values->load_value_map.begin(), load_end = values->load_value_map.end();
-  while (store_it != store_end || load_it != load_end) {
-    uint16_t loc;
-    if (store_it != store_end && (load_it == load_end || *store_it < load_it->first)) {
-      loc = *store_it;
-      ++store_it;
-    } else {
-      loc = load_it->first;
-      ++load_it;
-      DCHECK(store_it == store_end || cmp(loc, *store_it));
-    }
-    while (work_it != work_end && cmp(work_it->first, loc)) {
-      work_it = work_values->load_value_map.erase(work_it);
-    }
-    if (work_it != work_end && !cmp(loc, work_it->first)) {
-      // The location matches, keep it.
-      ++work_it;
-    }
-  }
-  while (work_it != work_end) {
-    work_it = work_values->load_value_map.erase(work_it);
-  }
-}
-
-void LocalValueNumbering::MergeEscapedRefs(const ValueNameSet::value_type& entry,
-                                           ValueNameSet::iterator hint) {
-  // See if the ref is either escaped or non-aliasing in each predecessor.
-  bool is_escaped = true;
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    if (lvn->non_aliasing_refs_.count(entry) == 0u &&
-        lvn->escaped_refs_.count(entry) == 0u) {
-      is_escaped = false;
-      break;
-    }
-  }
-  if (is_escaped) {
-    escaped_refs_.emplace_hint(hint, entry);
-  }
-}
-
-void LocalValueNumbering::MergeEscapedIFieldTypeClobberSets(
-    const EscapedIFieldClobberSet::value_type& entry, EscapedIFieldClobberSet::iterator hint) {
-  // Insert only type-clobber entries (field_id == kNoValue) of escaped refs.
-  if (entry.field_id == kNoValue && escaped_refs_.count(entry.base) != 0u) {
-    escaped_ifield_clobber_set_.emplace_hint(hint, entry);
-  }
-}
-
-void LocalValueNumbering::MergeEscapedIFieldClobberSets(
-    const EscapedIFieldClobberSet::value_type& entry, EscapedIFieldClobberSet::iterator hint) {
-  // Insert only those entries of escaped refs that are not overridden by a type clobber.
-  if (!(hint == escaped_ifield_clobber_set_.end() &&
-        hint->base == entry.base && hint->type == entry.type) &&
-      escaped_refs_.count(entry.base) != 0u) {
-    escaped_ifield_clobber_set_.emplace_hint(hint, entry);
-  }
-}
-
-void LocalValueNumbering::MergeEscapedArrayClobberSets(
-    const EscapedArrayClobberSet::value_type& entry, EscapedArrayClobberSet::iterator hint) {
-  if (escaped_refs_.count(entry.base) != 0u) {
-    escaped_array_clobber_set_.emplace_hint(hint, entry);
-  }
-}
-
-void LocalValueNumbering::MergeNullChecked() {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-
-  // Find the LVN with the least entries in the set.
-  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    if (lvn->null_checked_.size() < least_entries_lvn->null_checked_.size()) {
-      least_entries_lvn = lvn;
-    }
-  }
-
-  // For each null-checked value name check if it's null-checked in all the LVNs.
-  for (const auto& value_name : least_entries_lvn->null_checked_) {
-    // Merge null_checked_ for this ref.
-    merge_names_.clear();
-    merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
-    if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-      null_checked_.insert(null_checked_.end(), value_name);
-    }
-  }
-
-  // Now check if the least_entries_lvn has a null-check as the last insn.
-  const BasicBlock* least_entries_bb = gvn_->GetBasicBlock(least_entries_lvn->Id());
-  if (gvn_->HasNullCheckLastInsn(least_entries_bb, id_)) {
-    int s_reg = least_entries_bb->last_mir_insn->ssa_rep->uses[0];
-    uint32_t value_name = least_entries_lvn->GetOperandValue(s_reg);
-    merge_names_.clear();
-    merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
-    if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-      null_checked_.insert(value_name);
-    }
-  }
-}
-
-void LocalValueNumbering::MergeDivZeroChecked() {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-
-  // Find the LVN with the least entries in the set.
-  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    if (lvn->div_zero_checked_.size() < least_entries_lvn->div_zero_checked_.size()) {
-      least_entries_lvn = lvn;
-    }
-  }
-
-  // For each div-zero value name check if it's div-zero checked in all the LVNs.
-  for (const auto& value_name : least_entries_lvn->div_zero_checked_) {
-    // Merge null_checked_ for this ref.
-    merge_names_.clear();
-    merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
-    if (gvn_->DivZeroCheckedInAllPredecessors(merge_names_)) {
-      div_zero_checked_.insert(div_zero_checked_.end(), value_name);
-    }
-  }
-}
-
-void LocalValueNumbering::MergeSFieldValues(const SFieldToValueMap::value_type& entry,
-                                            SFieldToValueMap::iterator hint) {
-  uint16_t field_id = entry.first;
-  merge_names_.clear();
-  uint16_t value_name = kNoValue;
-  bool same_values = true;
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    // Get the value name as in HandleSGet() but don't modify *lvn.
-    auto it = lvn->sfield_value_map_.find(field_id);
-    if (it != lvn->sfield_value_map_.end()) {
-      value_name = it->second;
-    } else {
-      uint16_t type = gvn_->GetSFieldType(field_id);
-      value_name = gvn_->LookupValue(kResolvedSFieldOp, field_id,
-                                     lvn->unresolved_sfield_version_[type],
-                                     lvn->global_memory_version_);
-    }
-
-    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
-    merge_names_.push_back(value_name);
-  }
-  if (same_values) {
-    // value_name already contains the result.
-  } else {
-    auto lb = merge_map_.lower_bound(merge_names_);
-    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
-      value_name = lb->second;
-    } else {
-      value_name = gvn_->LookupValue(kMergeBlockSFieldVersionBumpOp, field_id, id_, kNoValue);
-      merge_map_.PutBefore(lb, merge_names_, value_name);
-      if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-        null_checked_.insert(value_name);
-      }
-    }
-  }
-  sfield_value_map_.PutBefore(hint, field_id, value_name);
-}
-
-void LocalValueNumbering::MergeNonAliasingIFieldValues(const IFieldLocToValueMap::value_type& entry,
-                                                       IFieldLocToValueMap::iterator hint) {
-  uint16_t field_loc = entry.first;
-  merge_names_.clear();
-  uint16_t value_name = kNoValue;
-  bool same_values = true;
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    // Get the value name as in HandleIGet() but don't modify *lvn.
-    auto it = lvn->non_aliasing_ifield_value_map_.find(field_loc);
-    if (it != lvn->non_aliasing_ifield_value_map_.end()) {
-      value_name = it->second;
-    } else {
-      value_name = gvn_->LookupValue(kNonAliasingIFieldInitialOp, field_loc, kNoValue, kNoValue);
-    }
-
-    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
-    merge_names_.push_back(value_name);
-  }
-  if (same_values) {
-    // value_name already contains the result.
-  } else {
-    auto lb = merge_map_.lower_bound(merge_names_);
-    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
-      value_name = lb->second;
-    } else {
-      value_name = gvn_->LookupValue(kMergeBlockNonAliasingIFieldVersionBumpOp, field_loc,
-                                     id_, kNoValue);
-      merge_map_.PutBefore(lb, merge_names_, value_name);
-      if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-        null_checked_.insert(value_name);
-      }
-    }
-  }
-  non_aliasing_ifield_value_map_.PutBefore(hint, field_loc, value_name);
-}
-
-template <typename Map, Map LocalValueNumbering::*map_ptr, typename Versions>
-void LocalValueNumbering::MergeAliasingValues(const typename Map::value_type& entry,
-                                              typename Map::iterator hint) {
-  const typename Map::key_type& key = entry.first;
-
-  auto it = (this->*map_ptr).PutBefore(hint, key, AliasingValues(this));
-  AliasingValues* my_values = &it->second;
-
-  const AliasingValues* cmp_values = nullptr;
-  bool same_version = !Versions::HasNewBaseVersion(gvn_, this, key);
-  uint16_t load_memory_version_for_same_version = kNoValue;
-  if (same_version) {
-    // Find the first non-null values.
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      auto value = (lvn->*map_ptr).find(key);
-      if (value != (lvn->*map_ptr).end()) {
-        cmp_values = &value->second;
-        break;
-      }
-    }
-    DCHECK(cmp_values != nullptr);  // There must be at least one non-null values.
-
-    // Check if we have identical memory versions, i.e. the global memory version, unresolved
-    // field version and the values' memory_version_before_stores, last_stored_value
-    // and store_loc_set are identical.
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      auto value = (lvn->*map_ptr).find(key);
-      if (value == (lvn->*map_ptr).end()) {
-        if (cmp_values->memory_version_before_stores != kNoValue) {
-          same_version = false;
-          break;
-        }
-      } else if (cmp_values->last_stored_value != value->second.last_stored_value ||
-          cmp_values->memory_version_before_stores != value->second.memory_version_before_stores ||
-          cmp_values->store_loc_set != value->second.store_loc_set) {
-        same_version = false;
-        break;
-      } else if (value->second.last_load_memory_version != kNoValue) {
-        DCHECK(load_memory_version_for_same_version == kNoValue ||
-               load_memory_version_for_same_version == value->second.last_load_memory_version);
-        load_memory_version_for_same_version = value->second.last_load_memory_version;
-      }
-    }
-  }
-
-  if (same_version) {
-    // Copy the identical values.
-    my_values->memory_version_before_stores = cmp_values->memory_version_before_stores;
-    my_values->last_stored_value = cmp_values->last_stored_value;
-    my_values->store_loc_set = cmp_values->store_loc_set;
-    my_values->last_load_memory_version = load_memory_version_for_same_version;
-    // Merge load values seen in all incoming arcs (i.e. an intersection).
-    if (!cmp_values->load_value_map.empty()) {
-      my_values->load_value_map = cmp_values->load_value_map;
-      for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-        auto value = (lvn->*map_ptr).find(key);
-        if (value == (lvn->*map_ptr).end() || value->second.load_value_map.empty()) {
-          my_values->load_value_map.clear();
-          break;
-        }
-        InPlaceIntersectMaps(&my_values->load_value_map, value->second.load_value_map);
-        if (my_values->load_value_map.empty()) {
-          break;
-        }
-      }
-    }
-  } else {
-    // Bump version number for the merge.
-    my_values->memory_version_before_stores = my_values->last_load_memory_version =
-        Versions::LookupMergeBlockValue(gvn_, id_, key);
-
-    // Calculate the locations that have been either read from or written to in each incoming LVN.
-    bool first_lvn = true;
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      auto value = (lvn->*map_ptr).find(key);
-      if (value == (lvn->*map_ptr).end()) {
-        my_values->load_value_map.clear();
-        break;
-      }
-      if (first_lvn) {
-        first_lvn = false;
-        // Copy the first LVN's locations. Values will be overwritten later.
-        my_values->load_value_map = value->second.load_value_map;
-        for (uint16_t location : value->second.store_loc_set) {
-          my_values->load_value_map.Put(location, 0u);
-        }
-      } else {
-        IntersectAliasingValueLocations(my_values, &value->second);
-      }
-    }
-    // Calculate merged values for the intersection.
-    for (auto& load_value_entry : my_values->load_value_map) {
-      uint16_t location = load_value_entry.first;
-      merge_names_.clear();
-      uint16_t value_name = kNoValue;
-      bool same_values = true;
-      for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-        value_name = Versions::LookupMergeValue(gvn_, lvn, key, location);
-        same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
-        merge_names_.push_back(value_name);
-      }
-      if (same_values) {
-        // value_name already contains the result.
-      } else {
-        auto lb = merge_map_.lower_bound(merge_names_);
-        if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
-          value_name = lb->second;
-        } else {
-          // NOTE: In addition to the key and id_ which don't change on an LVN recalculation
-          // during GVN, we also add location which can actually change on recalculation, so the
-          // value_name below may change. This could lead to an infinite loop if the location
-          // value name always changed when the refereced value name changes. However, given that
-          // we assign unique value names for other merges, such as Phis, such a dependency is
-          // not possible in a well-formed SSA graph.
-          value_name = Versions::LookupMergeLocationValue(gvn_, id_, key, location);
-          merge_map_.PutBefore(lb, merge_names_, value_name);
-          if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-            null_checked_.insert(value_name);
-          }
-        }
-      }
-      load_value_entry.second = value_name;
-    }
-  }
-}
-
-void LocalValueNumbering::Merge(MergeType merge_type) {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-
-  // Always reserve space in merge_names_. Even if we don't use it in Merge() we may need it
-  // in GetStartingVregValueNumberImpl() when the merge_names_'s allocator is not the top.
-  merge_names_.reserve(gvn_->merge_lvns_.size());
-
-  IntersectSregValueMaps<&LocalValueNumbering::sreg_value_map_>();
-  IntersectSregValueMaps<&LocalValueNumbering::sreg_wide_value_map_>();
-  if (merge_type == kReturnMerge) {
-    // RETURN or PHI+RETURN. We need only sreg value maps.
-    return;
-  }
-
-  MergeMemoryVersions(merge_type == kCatchMerge);
-
-  // Merge non-aliasing maps/sets.
-  IntersectSets<ValueNameSet, &LocalValueNumbering::non_aliasing_refs_>();
-  if (!non_aliasing_refs_.empty() && merge_type == kCatchMerge) {
-    PruneNonAliasingRefsForCatch();
-  }
-  if (!non_aliasing_refs_.empty()) {
-    MergeSets<IFieldLocToValueMap, &LocalValueNumbering::non_aliasing_ifield_value_map_,
-              &LocalValueNumbering::MergeNonAliasingIFieldValues>();
-    MergeSets<NonAliasingArrayValuesMap, &LocalValueNumbering::non_aliasing_array_value_map_,
-              &LocalValueNumbering::MergeAliasingValues<
-                  NonAliasingArrayValuesMap, &LocalValueNumbering::non_aliasing_array_value_map_,
-                  NonAliasingArrayVersions>>();
-  }
-
-  // We won't do anything complicated for range checks, just calculate the intersection.
-  IntersectSets<RangeCheckSet, &LocalValueNumbering::range_checked_>();
-
-  // Merge null_checked_. We may later insert more, such as merged object field values.
-  MergeNullChecked();
-
-  // Now merge the div_zero_checked_.
-  MergeDivZeroChecked();
-
-  if (merge_type == kCatchMerge) {
-    // Memory is clobbered. New memory version already created, don't merge aliasing locations.
-    return;
-  }
-
-  DCHECK(merge_type == kNormalMerge);
-
-  // Merge escaped refs and clobber sets.
-  MergeSets<ValueNameSet, &LocalValueNumbering::escaped_refs_,
-            &LocalValueNumbering::MergeEscapedRefs>();
-  if (!escaped_refs_.empty()) {
-    MergeSets<EscapedIFieldClobberSet, &LocalValueNumbering::escaped_ifield_clobber_set_,
-              &LocalValueNumbering::MergeEscapedIFieldTypeClobberSets>();
-    MergeSets<EscapedIFieldClobberSet, &LocalValueNumbering::escaped_ifield_clobber_set_,
-              &LocalValueNumbering::MergeEscapedIFieldClobberSets>();
-    MergeSets<EscapedArrayClobberSet, &LocalValueNumbering::escaped_array_clobber_set_,
-              &LocalValueNumbering::MergeEscapedArrayClobberSets>();
-  }
-
-  MergeSets<SFieldToValueMap, &LocalValueNumbering::sfield_value_map_,
-            &LocalValueNumbering::MergeSFieldValues>();
-  MergeSets<AliasingIFieldValuesMap, &LocalValueNumbering::aliasing_ifield_value_map_,
-            &LocalValueNumbering::MergeAliasingValues<
-                AliasingIFieldValuesMap, &LocalValueNumbering::aliasing_ifield_value_map_,
-                AliasingIFieldVersions>>();
-  MergeSets<AliasingArrayValuesMap, &LocalValueNumbering::aliasing_array_value_map_,
-            &LocalValueNumbering::MergeAliasingValues<
-                AliasingArrayValuesMap, &LocalValueNumbering::aliasing_array_value_map_,
-                AliasingArrayVersions>>();
-}
-
-void LocalValueNumbering::PrepareEntryBlock() {
-  uint32_t vreg = gvn_->GetMirGraph()->GetFirstInVR();
-  CompilationUnit* cu = gvn_->GetCompilationUnit();
-  const char* shorty = cu->shorty;
-  ++shorty;  // Skip return value.
-  if ((cu->access_flags & kAccStatic) == 0) {
-    // If non-static method, mark "this" as non-null
-    uint16_t value_name = GetOperandValue(vreg);
-    ++vreg;
-    null_checked_.insert(value_name);
-  }
-  for ( ; *shorty != 0; ++shorty, ++vreg) {
-    if (*shorty == 'J' || *shorty == 'D') {
-      uint16_t value_name = GetOperandValueWide(vreg);
-      SetOperandValueWide(vreg, value_name);
-      ++vreg;
-    }
-  }
-}
-
-uint16_t LocalValueNumbering::MarkNonAliasingNonNull(MIR* mir) {
-  uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
-  DCHECK(null_checked_.find(res) == null_checked_.end());
-  null_checked_.insert(res);
-  non_aliasing_refs_.insert(res);
-  return res;
-}
-
-bool LocalValueNumbering::IsNonAliasing(uint16_t reg) const {
-  return non_aliasing_refs_.find(reg) != non_aliasing_refs_.end();
-}
-
-bool LocalValueNumbering::IsNonAliasingIField(uint16_t reg, uint16_t field_id,
-                                              uint16_t type) const {
-  if (IsNonAliasing(reg)) {
-    return true;
-  }
-  if (escaped_refs_.find(reg) == escaped_refs_.end()) {
-    return false;
-  }
-  // Check for IPUTs to unresolved fields.
-  EscapedIFieldClobberKey key1 = { reg, type, kNoValue };
-  if (escaped_ifield_clobber_set_.find(key1) != escaped_ifield_clobber_set_.end()) {
-    return false;
-  }
-  // Check for aliased IPUTs to the same field.
-  EscapedIFieldClobberKey key2 = { reg, type, field_id };
-  return escaped_ifield_clobber_set_.find(key2) == escaped_ifield_clobber_set_.end();
-}
-
-bool LocalValueNumbering::IsNonAliasingArray(uint16_t reg, uint16_t type) const {
-  if (IsNonAliasing(reg)) {
-    return true;
-  }
-  if (escaped_refs_.count(reg) == 0u) {
-    return false;
-  }
-  // Check for aliased APUTs.
-  EscapedArrayClobberKey key = { reg, type };
-  return escaped_array_clobber_set_.find(key) == escaped_array_clobber_set_.end();
-}
-
-void LocalValueNumbering::HandleNullCheck(MIR* mir, uint16_t reg) {
-  auto lb = null_checked_.lower_bound(reg);
-  if (lb != null_checked_.end() && *lb == reg) {
-    if (LIKELY(gvn_->CanModify())) {
-      if (gvn_->GetCompilationUnit()->verbose) {
-        LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
-      }
-      mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
-    }
-  } else {
-    null_checked_.insert(lb, reg);
-  }
-}
-
-void LocalValueNumbering::HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index) {
-  RangeCheckKey key = { array, index };
-  auto lb = range_checked_.lower_bound(key);
-  if (lb != range_checked_.end() && !RangeCheckKeyComparator()(key, *lb)) {
-    if (LIKELY(gvn_->CanModify())) {
-      if (gvn_->GetCompilationUnit()->verbose) {
-        LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
-      }
-      mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
-    }
-  } else {
-    // Mark range check completed.
-    range_checked_.insert(lb, key);
-  }
-}
-
-void LocalValueNumbering::HandleDivZeroCheck(MIR* mir, uint16_t reg) {
-  auto lb = div_zero_checked_.lower_bound(reg);
-  if (lb != div_zero_checked_.end() && *lb == reg) {
-    if (LIKELY(gvn_->CanModify())) {
-      if (gvn_->GetCompilationUnit()->verbose) {
-        LOG(INFO) << "Removing div zero check for 0x" << std::hex << mir->offset;
-      }
-      mir->optimization_flags |= MIR_IGNORE_DIV_ZERO_CHECK;
-    }
-  } else {
-    div_zero_checked_.insert(lb, reg);
-  }
-}
-
-void LocalValueNumbering::HandlePutObject(MIR* mir) {
-  // If we're storing a non-aliasing reference, stop tracking it as non-aliasing now.
-  uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
-  HandleEscapingRef(base);
-  if (gvn_->CanModify() && null_checked_.count(base) != 0u) {
-    if (gvn_->GetCompilationUnit()->verbose) {
-      LOG(INFO) << "Removing GC card mark value null check for 0x" << std::hex << mir->offset;
-    }
-    mir->optimization_flags |= MIR_STORE_NON_NULL_VALUE;
-  }
-}
-
-void LocalValueNumbering::HandleEscapingRef(uint16_t base) {
-  auto it = non_aliasing_refs_.find(base);
-  if (it != non_aliasing_refs_.end()) {
-    non_aliasing_refs_.erase(it);
-    escaped_refs_.insert(base);
-  }
-}
-
-void LocalValueNumbering::HandleInvokeArgs(const MIR* mir, const LocalValueNumbering* mir_lvn) {
-  const int32_t* uses = mir->ssa_rep->uses;
-  const int32_t* uses_end = uses + mir->ssa_rep->num_uses;
-  while (uses != uses_end) {
-    uint16_t sreg = *uses;
-    ++uses;
-    // Avoid LookupValue() so that we don't store new values in the global value map.
-    auto local_it = mir_lvn->sreg_value_map_.find(sreg);
-    if (local_it != mir_lvn->sreg_value_map_.end()) {
-      non_aliasing_refs_.erase(local_it->second);
-    } else {
-      uint16_t value_name = gvn_->FindValue(kNoValue, sreg, kNoValue, kNoValue);
-      if (value_name != kNoValue) {
-        non_aliasing_refs_.erase(value_name);
-      }
-    }
-  }
-}
-
-uint16_t LocalValueNumbering::HandlePhi(MIR* mir) {
-  if (gvn_->merge_lvns_.empty()) {
-    // Running LVN without a full GVN?
-    return kNoValue;
-  }
-  // Determine if this Phi is merging wide regs.
-  RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
-  if (raw_dest.high_word) {
-    // This is the high part of a wide reg. Ignore the Phi.
-    return kNoValue;
-  }
-  bool wide = raw_dest.wide;
-  // Iterate over *merge_lvns_ and skip incoming sregs for BBs without associated LVN.
-  merge_names_.clear();
-  uint16_t value_name = kNoValue;
-  bool same_values = true;
-  BasicBlockId* incoming = mir->meta.phi_incoming;
-  int32_t* uses = mir->ssa_rep->uses;
-  int16_t pos = 0;
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    DCHECK_LT(pos, mir->ssa_rep->num_uses);
-    while (incoming[pos] != lvn->Id()) {
-      ++pos;
-      DCHECK_LT(pos, mir->ssa_rep->num_uses);
-    }
-    int s_reg = uses[pos];
-    ++pos;
-    value_name = wide ? lvn->GetOperandValueWide(s_reg) : lvn->GetOperandValue(s_reg);
-
-    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
-    merge_names_.push_back(value_name);
-  }
-  if (same_values) {
-    // value_name already contains the result.
-  } else {
-    auto lb = merge_map_.lower_bound(merge_names_);
-    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
-      value_name = lb->second;
-    } else {
-      value_name = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
-      merge_map_.PutBefore(lb, merge_names_, value_name);
-      if (!wide && gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-        null_checked_.insert(value_name);
-      }
-      if (gvn_->DivZeroCheckedInAllPredecessors(merge_names_)) {
-        div_zero_checked_.insert(value_name);
-      }
-    }
-  }
-  if (wide) {
-    SetOperandValueWide(mir->ssa_rep->defs[0], value_name);
-  } else {
-    SetOperandValue(mir->ssa_rep->defs[0], value_name);
-  }
-  return value_name;
-}
-
-uint16_t LocalValueNumbering::HandleConst(MIR* mir, uint32_t value) {
-  RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
-  uint16_t res;
-  if (value == 0u && raw_dest.ref) {
-    res = GlobalValueNumbering::kNullValue;
-  } else {
-    Instruction::Code op = raw_dest.fp ? Instruction::CONST_HIGH16 : Instruction::CONST;
-    res = gvn_->LookupValue(op, Low16Bits(value), High16Bits(value), 0);
-  }
-  SetOperandValue(mir->ssa_rep->defs[0], res);
-  return res;
-}
-
-uint16_t LocalValueNumbering::HandleConstWide(MIR* mir, uint64_t value) {
-  RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
-  Instruction::Code op = raw_dest.fp ? Instruction::CONST_HIGH16 : Instruction::CONST;
-  uint32_t low_word = Low32Bits(value);
-  uint32_t high_word = High32Bits(value);
-  uint16_t low_res = gvn_->LookupValue(op, Low16Bits(low_word), High16Bits(low_word), 1);
-  uint16_t high_res = gvn_->LookupValue(op, Low16Bits(high_word), High16Bits(high_word), 2);
-  uint16_t res = gvn_->LookupValue(op, low_res, high_res, 3);
-  SetOperandValueWide(mir->ssa_rep->defs[0], res);
-  return res;
-}
-
-uint16_t LocalValueNumbering::HandleAGet(MIR* mir, uint16_t opcode) {
-  uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
-  HandleNullCheck(mir, array);
-  uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
-  HandleRangeCheck(mir, array, index);
-  uint16_t type = AGetMemAccessType(static_cast<Instruction::Code>(opcode));
-  // Establish value number for loaded register.
-  uint16_t res;
-  if (IsNonAliasingArray(array, type)) {
-    res = HandleAliasingValuesGet<NonAliasingArrayVersions>(&non_aliasing_array_value_map_,
-                                                            array, index);
-  } else {
-    uint16_t location = gvn_->GetArrayLocation(array, index);
-    res = HandleAliasingValuesGet<AliasingArrayVersions>(&aliasing_array_value_map_,
-                                                         type, location);
-  }
-  if (opcode == Instruction::AGET_WIDE) {
-    SetOperandValueWide(mir->ssa_rep->defs[0], res);
-  } else {
-    SetOperandValue(mir->ssa_rep->defs[0], res);
-  }
-  return res;
-}
-
-void LocalValueNumbering::HandleAPut(MIR* mir, uint16_t opcode) {
-  int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
-  int index_idx = array_idx + 1;
-  uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
-  HandleNullCheck(mir, array);
-  uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
-  HandleRangeCheck(mir, array, index);
-
-  uint16_t type = APutMemAccessType(static_cast<Instruction::Code>(opcode));
-  uint16_t value = (opcode == Instruction::APUT_WIDE)
-                   ? GetOperandValueWide(mir->ssa_rep->uses[0])
-                   : GetOperandValue(mir->ssa_rep->uses[0]);
-  if (IsNonAliasing(array)) {
-    bool put_is_live = HandleAliasingValuesPut<NonAliasingArrayVersions>(
-        &non_aliasing_array_value_map_, array, index, value);
-    if (!put_is_live) {
-      // This APUT can be eliminated, it stores the same value that's already in the field.
-      // TODO: Eliminate the APUT.
-      return;
-    }
-  } else {
-    uint16_t location = gvn_->GetArrayLocation(array, index);
-    bool put_is_live = HandleAliasingValuesPut<AliasingArrayVersions>(
-        &aliasing_array_value_map_, type, location, value);
-    if (!put_is_live) {
-      // This APUT can be eliminated, it stores the same value that's already in the field.
-      // TODO: Eliminate the APUT.
-      return;
-    }
-
-    // Clobber all escaped array refs for this type.
-    for (uint16_t escaped_array : escaped_refs_) {
-      EscapedArrayClobberKey clobber_key = { escaped_array, type };
-      escaped_array_clobber_set_.insert(clobber_key);
-    }
-  }
-}
-
-uint16_t LocalValueNumbering::HandleIGet(MIR* mir, uint16_t opcode) {
-  uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
-  HandleNullCheck(mir, base);
-  const MirFieldInfo& field_info = gvn_->GetMirGraph()->GetIFieldLoweringInfo(mir);
-  uint16_t res;
-  if (!field_info.IsResolved() || field_info.IsVolatile()) {
-    // Unresolved fields may be volatile, so handle them as such to be safe.
-    HandleInvokeOrClInitOrAcquireOp(mir);  // Volatile GETs have acquire semantics.
-    // Volatile fields always get a new memory version; field id is irrelevant.
-    // Use result s_reg - will be unique.
-    res = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
-  } else {
-    uint16_t type = IGetMemAccessType(static_cast<Instruction::Code>(opcode));
-    uint16_t field_id = gvn_->GetIFieldId(mir);
-    if (IsNonAliasingIField(base, field_id, type)) {
-      uint16_t loc = gvn_->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
-      auto lb = non_aliasing_ifield_value_map_.lower_bound(loc);
-      if (lb != non_aliasing_ifield_value_map_.end() && lb->first == loc) {
-        res = lb->second;
-      } else {
-        res = gvn_->LookupValue(kNonAliasingIFieldInitialOp, loc, kNoValue, kNoValue);
-        non_aliasing_ifield_value_map_.PutBefore(lb, loc, res);
-      }
-    } else {
-      res = HandleAliasingValuesGet<AliasingIFieldVersions>(&aliasing_ifield_value_map_,
-                                                            field_id, base);
-    }
-  }
-  if (opcode == Instruction::IGET_WIDE) {
-    SetOperandValueWide(mir->ssa_rep->defs[0], res);
-  } else {
-    SetOperandValue(mir->ssa_rep->defs[0], res);
-  }
-  return res;
-}
-
-void LocalValueNumbering::HandleIPut(MIR* mir, uint16_t opcode) {
-  int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
-  uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
-  HandleNullCheck(mir, base);
-  uint16_t type = IPutMemAccessType(static_cast<Instruction::Code>(opcode));
-  const MirFieldInfo& field_info = gvn_->GetMirGraph()->GetIFieldLoweringInfo(mir);
-  if (!field_info.IsResolved()) {
-    // Unresolved fields always alias with everything of the same type.
-    // Use mir->offset as modifier; without elaborate inlining, it will be unique.
-    unresolved_ifield_version_[type] =
-        gvn_->LookupValue(kUnresolvedIFieldOp, kNoValue, kNoValue, mir->offset);
-
-    // For simplicity, treat base as escaped now.
-    HandleEscapingRef(base);
-
-    // Clobber all fields of escaped references of the same type.
-    for (uint16_t escaped_ref : escaped_refs_) {
-      EscapedIFieldClobberKey clobber_key = { escaped_ref, type, kNoValue };
-      escaped_ifield_clobber_set_.insert(clobber_key);
-    }
-
-    // Aliasing fields of the same type may have been overwritten.
-    auto it = aliasing_ifield_value_map_.begin(), end = aliasing_ifield_value_map_.end();
-    while (it != end) {
-      if (gvn_->GetIFieldType(it->first) != type) {
-        ++it;
-      } else {
-        it = aliasing_ifield_value_map_.erase(it);
-      }
-    }
-  } else if (field_info.IsVolatile()) {
-    // Nothing to do, resolved volatile fields always get a new memory version anyway and
-    // can't alias with resolved non-volatile fields.
-  } else {
-    uint16_t field_id = gvn_->GetIFieldId(mir);
-    uint16_t value = (opcode == Instruction::IPUT_WIDE)
-                     ? GetOperandValueWide(mir->ssa_rep->uses[0])
-                     : GetOperandValue(mir->ssa_rep->uses[0]);
-    if (IsNonAliasing(base)) {
-      uint16_t loc = gvn_->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
-      auto lb = non_aliasing_ifield_value_map_.lower_bound(loc);
-      if (lb != non_aliasing_ifield_value_map_.end() && lb->first == loc) {
-        if (lb->second == value) {
-          // This IPUT can be eliminated, it stores the same value that's already in the field.
-          // TODO: Eliminate the IPUT.
-          return;
-        }
-        lb->second = value;  // Overwrite.
-      } else {
-        non_aliasing_ifield_value_map_.PutBefore(lb, loc, value);
-      }
-    } else {
-      bool put_is_live = HandleAliasingValuesPut<AliasingIFieldVersions>(
-          &aliasing_ifield_value_map_, field_id, base, value);
-      if (!put_is_live) {
-        // This IPUT can be eliminated, it stores the same value that's already in the field.
-        // TODO: Eliminate the IPUT.
-        return;
-      }
-
-      // Clobber all fields of escaped references for this field.
-      for (uint16_t escaped_ref : escaped_refs_) {
-        EscapedIFieldClobberKey clobber_key = { escaped_ref, type, field_id };
-        escaped_ifield_clobber_set_.insert(clobber_key);
-      }
-    }
-  }
-}
-
-uint16_t LocalValueNumbering::HandleSGet(MIR* mir, uint16_t opcode) {
-  const MirSFieldLoweringInfo& field_info = gvn_->GetMirGraph()->GetSFieldLoweringInfo(mir);
-  if (!field_info.IsResolved() || field_info.IsVolatile() ||
-      (!field_info.IsClassInitialized() &&
-       (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0)) {
-    // Volatile SGETs (and unresolved fields are potentially volatile) have acquire semantics
-    // and class initialization can call arbitrary functions, we need to wipe aliasing values.
-    HandleInvokeOrClInitOrAcquireOp(mir);
-  }
-  uint16_t res;
-  if (!field_info.IsResolved() || field_info.IsVolatile()) {
-    // Unresolved fields may be volatile, so handle them as such to be safe.
-    // Volatile fields always get a new memory version; field id is irrelevant.
-    // Use result s_reg - will be unique.
-    res = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
-  } else {
-    uint16_t type = SGetMemAccessType(static_cast<Instruction::Code>(opcode));
-    uint16_t field_id = gvn_->GetSFieldId(mir);
-    auto lb = sfield_value_map_.lower_bound(field_id);
-    if (lb != sfield_value_map_.end() && lb->first == field_id) {
-      res = lb->second;
-    } else {
-      // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
-      // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
-      // to determine the version of the field.
-      res = gvn_->LookupValue(kResolvedSFieldOp, field_id,
-                              unresolved_sfield_version_[type], global_memory_version_);
-      sfield_value_map_.PutBefore(lb, field_id, res);
-    }
-  }
-  if (opcode == Instruction::SGET_WIDE) {
-    SetOperandValueWide(mir->ssa_rep->defs[0], res);
-  } else {
-    SetOperandValue(mir->ssa_rep->defs[0], res);
-  }
-  return res;
-}
-
-void LocalValueNumbering::HandleSPut(MIR* mir, uint16_t opcode) {
-  const MirSFieldLoweringInfo& field_info = gvn_->GetMirGraph()->GetSFieldLoweringInfo(mir);
-  if (!field_info.IsClassInitialized() &&
-      (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
-    // Class initialization can call arbitrary functions, we need to wipe aliasing values.
-    HandleInvokeOrClInitOrAcquireOp(mir);
-  }
-  uint16_t type = SPutMemAccessType(static_cast<Instruction::Code>(opcode));
-  if (!field_info.IsResolved()) {
-    // Unresolved fields always alias with everything of the same type.
-    // Use mir->offset as modifier; without elaborate inlining, it will be unique.
-    unresolved_sfield_version_[type] =
-        gvn_->LookupValue(kUnresolvedSFieldOp, kNoValue, kNoValue, mir->offset);
-    RemoveSFieldsForType(type);
-  } else if (field_info.IsVolatile()) {
-    // Nothing to do, resolved volatile fields always get a new memory version anyway and
-    // can't alias with resolved non-volatile fields.
-  } else {
-    uint16_t field_id = gvn_->GetSFieldId(mir);
-    uint16_t value = (opcode == Instruction::SPUT_WIDE)
-                     ? GetOperandValueWide(mir->ssa_rep->uses[0])
-                     : GetOperandValue(mir->ssa_rep->uses[0]);
-    // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
-    // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
-    // to determine the version of the field.
-    auto lb = sfield_value_map_.lower_bound(field_id);
-    if (lb != sfield_value_map_.end() && lb->first == field_id) {
-      if (lb->second == value) {
-        // This SPUT can be eliminated, it stores the same value that's already in the field.
-        // TODO: Eliminate the SPUT.
-        return;
-      }
-      lb->second = value;  // Overwrite.
-    } else {
-      sfield_value_map_.PutBefore(lb, field_id, value);
-    }
-  }
-}
-
-void LocalValueNumbering::RemoveSFieldsForType(uint16_t type) {
-  // Erase all static fields of this type from the sfield_value_map_.
-  for (auto it = sfield_value_map_.begin(), end = sfield_value_map_.end(); it != end; ) {
-    if (gvn_->GetSFieldType(it->first) == type) {
-      it = sfield_value_map_.erase(it);
-    } else {
-      ++it;
-    }
-  }
-}
-
-void LocalValueNumbering::HandleInvokeOrClInitOrAcquireOp(MIR* mir) {
-  // Use mir->offset as modifier; without elaborate inlining, it will be unique.
-  global_memory_version_ =
-      gvn_->LookupValue(kInvokeMemoryVersionBumpOp, 0u, 0u, mir->offset);
-  // All static fields and instance fields and array elements of aliasing references,
-  // including escaped references, may have been modified.
-  sfield_value_map_.clear();
-  aliasing_ifield_value_map_.clear();
-  aliasing_array_value_map_.clear();
-  escaped_refs_.clear();
-  escaped_ifield_clobber_set_.clear();
-  escaped_array_clobber_set_.clear();
-}
-
-uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
-  uint16_t res = kNoValue;
-  uint16_t opcode = mir->dalvikInsn.opcode;
-  switch (opcode) {
-    case Instruction::NOP:
-    case Instruction::RETURN_VOID:
-    case Instruction::RETURN:
-    case Instruction::RETURN_OBJECT:
-    case Instruction::RETURN_WIDE:
-    case Instruction::GOTO:
-    case Instruction::GOTO_16:
-    case Instruction::GOTO_32:
-    case Instruction::THROW:
-    case Instruction::FILL_ARRAY_DATA:
-    case Instruction::PACKED_SWITCH:
-    case Instruction::SPARSE_SWITCH:
-    case Instruction::IF_EQ:
-    case Instruction::IF_NE:
-    case Instruction::IF_LT:
-    case Instruction::IF_GE:
-    case Instruction::IF_GT:
-    case Instruction::IF_LE:
-    case Instruction::IF_EQZ:
-    case Instruction::IF_NEZ:
-    case Instruction::IF_LTZ:
-    case Instruction::IF_GEZ:
-    case Instruction::IF_GTZ:
-    case Instruction::IF_LEZ:
-    case kMirOpFusedCmplFloat:
-    case kMirOpFusedCmpgFloat:
-    case kMirOpFusedCmplDouble:
-    case kMirOpFusedCmpgDouble:
-    case kMirOpFusedCmpLong:
-      // Nothing defined - take no action.
-      break;
-
-    case Instruction::MONITOR_ENTER:
-      HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
-      HandleInvokeOrClInitOrAcquireOp(mir);  // Acquire operation.
-      break;
-
-    case Instruction::MONITOR_EXIT:
-      HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
-      // If we're running GVN and CanModify(), uneliminated null check indicates bytecode error.
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 &&
-          gvn_->work_lvn_ != nullptr && gvn_->CanModify()) {
-        LOG(WARNING) << "Bytecode error: MONITOR_EXIT is still null checked at 0x" << std::hex
-            << mir->offset << " in " << PrettyMethod(gvn_->cu_->method_idx, *gvn_->cu_->dex_file);
-      }
-      break;
-
-    case Instruction::FILLED_NEW_ARRAY:
-    case Instruction::FILLED_NEW_ARRAY_RANGE:
-      // Nothing defined but the result will be unique and non-null.
-      if (mir->next != nullptr && mir->next->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
-        uint16_t array = MarkNonAliasingNonNull(mir->next);
-        // Do not SetOperandValue(), we'll do that when we process the MOVE_RESULT_OBJECT.
-        if (kLocalValueNumberingEnableFilledNewArrayTracking && mir->ssa_rep->num_uses != 0u) {
-          AliasingValues* values = GetAliasingValues(&non_aliasing_array_value_map_, array);
-          // Clear the value if we got a merged version in a loop.
-          *values = AliasingValues(this);
-          for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
-            DCHECK_EQ(High16Bits(i), 0u);
-            uint16_t index = gvn_->LookupValue(Instruction::CONST, i, 0u, 0);
-            uint16_t value = GetOperandValue(mir->ssa_rep->uses[i]);
-            values->load_value_map.Put(index, value);
-            RangeCheckKey key = { array, index };
-            range_checked_.insert(key);
-          }
-        }
-        // The MOVE_RESULT_OBJECT will be processed next and we'll return the value name then.
-      }
-      // All args escaped (if references).
-      for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
-        uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
-        HandleEscapingRef(reg);
-      }
-      break;
-
-    case kMirOpNullCheck:
-      HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
-      break;
-
-    case Instruction::INVOKE_DIRECT:
-    case Instruction::INVOKE_DIRECT_RANGE:
-    case Instruction::INVOKE_VIRTUAL:
-    case Instruction::INVOKE_VIRTUAL_RANGE:
-    case Instruction::INVOKE_SUPER:
-    case Instruction::INVOKE_SUPER_RANGE:
-    case Instruction::INVOKE_INTERFACE:
-    case Instruction::INVOKE_INTERFACE_RANGE: {
-        // Nothing defined but handle the null check.
-        uint16_t reg = GetOperandValue(mir->ssa_rep->uses[0]);
-        HandleNullCheck(mir, reg);
-      }
-      FALLTHROUGH_INTENDED;
-    case Instruction::INVOKE_STATIC:
-    case Instruction::INVOKE_STATIC_RANGE:
-      // Make ref args aliasing.
-      HandleInvokeArgs(mir, this);
-      HandleInvokeOrClInitOrAcquireOp(mir);
-      break;
-
-    case Instruction::INSTANCE_OF: {
-        uint16_t operand = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t type = mir->dalvikInsn.vC;
-        res = gvn_->LookupValue(Instruction::INSTANCE_OF, operand, type, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-    case Instruction::CHECK_CAST:
-      if (gvn_->CanModify()) {
-        // Check if there was an instance-of operation on the same value and if we are
-        // in a block where its result is true. If so, we can eliminate the check-cast.
-        uint16_t operand = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t type = mir->dalvikInsn.vB;
-        uint16_t cond = gvn_->FindValue(Instruction::INSTANCE_OF, operand, type, kNoValue);
-        if (cond != kNoValue && gvn_->IsTrueInBlock(cond, Id())) {
-          if (gvn_->GetCompilationUnit()->verbose) {
-            LOG(INFO) << "Removing check-cast at 0x" << std::hex << mir->offset;
-          }
-          // Don't use kMirOpNop. Keep the check-cast as it defines the type of the register.
-          mir->optimization_flags |= MIR_IGNORE_CHECK_CAST;
-        }
-      }
-      break;
-
-    case Instruction::MOVE_RESULT:
-    case Instruction::MOVE_RESULT_OBJECT:
-      // 1 result, treat as unique each time, use result s_reg - will be unique.
-      res = GetOperandValue(mir->ssa_rep->defs[0]);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      break;
-    case Instruction::MOVE_EXCEPTION:
-    case Instruction::NEW_INSTANCE:
-    case Instruction::NEW_ARRAY:
-      // 1 result, treat as unique each time, use result s_reg - will be unique.
-      res = MarkNonAliasingNonNull(mir);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      break;
-    case Instruction::CONST_CLASS:
-      DCHECK_EQ(Low16Bits(mir->dalvikInsn.vB), mir->dalvikInsn.vB);
-      res = gvn_->LookupValue(Instruction::CONST_CLASS, mir->dalvikInsn.vB, 0, 0);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      null_checked_.insert(res);
-      non_aliasing_refs_.insert(res);
-      break;
-    case Instruction::CONST_STRING:
-    case Instruction::CONST_STRING_JUMBO:
-      // These strings are internalized, so assign value based on the string pool index.
-      res = gvn_->LookupValue(Instruction::CONST_STRING, Low16Bits(mir->dalvikInsn.vB),
-                              High16Bits(mir->dalvikInsn.vB), 0);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      null_checked_.insert(res);  // May already be there.
-      // NOTE: Hacking the contents of an internalized string via reflection is possible
-      // but the behavior is undefined. Therefore, we consider the string constant and
-      // the reference non-aliasing.
-      // TUNING: We could keep this property even if the reference "escapes".
-      non_aliasing_refs_.insert(res);  // May already be there.
-      break;
-    case Instruction::MOVE_RESULT_WIDE:
-      // 1 wide result, treat as unique each time, use result s_reg - will be unique.
-      res = GetOperandValueWide(mir->ssa_rep->defs[0]);
-      SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      break;
-
-    case kMirOpPhi:
-      res = HandlePhi(mir);
-      break;
-
-    case Instruction::MOVE:
-    case Instruction::MOVE_OBJECT:
-    case Instruction::MOVE_16:
-    case Instruction::MOVE_OBJECT_16:
-    case Instruction::MOVE_FROM16:
-    case Instruction::MOVE_OBJECT_FROM16:
-    case kMirOpCopy:
-      // Just copy value number of source to value number of result.
-      res = GetOperandValue(mir->ssa_rep->uses[0]);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      break;
-
-    case Instruction::MOVE_WIDE:
-    case Instruction::MOVE_WIDE_16:
-    case Instruction::MOVE_WIDE_FROM16:
-      // Just copy value number of source to value number of result.
-      res = GetOperandValueWide(mir->ssa_rep->uses[0]);
-      SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      break;
-
-    case Instruction::CONST_HIGH16:
-      res = HandleConst(mir, mir->dalvikInsn.vB << 16);
-      break;
-    case Instruction::CONST:
-    case Instruction::CONST_4:
-    case Instruction::CONST_16:
-      res = HandleConst(mir, mir->dalvikInsn.vB);
-      break;
-
-    case Instruction::CONST_WIDE_16:
-    case Instruction::CONST_WIDE_32:
-      res = HandleConstWide(
-          mir,
-          mir->dalvikInsn.vB +
-              ((mir->dalvikInsn.vB & 0x80000000) != 0 ? UINT64_C(0xffffffff00000000) : 0u));
-      break;
-
-    case Instruction::CONST_WIDE:
-      res = HandleConstWide(mir, mir->dalvikInsn.vB_wide);
-      break;
-
-    case Instruction::CONST_WIDE_HIGH16:
-      res = HandleConstWide(mir, static_cast<uint64_t>(mir->dalvikInsn.vB) << 48);
-      break;
-
-    case Instruction::ARRAY_LENGTH: {
-        // Handle the null check.
-        uint16_t reg = GetOperandValue(mir->ssa_rep->uses[0]);
-        HandleNullCheck(mir, reg);
-      }
-      FALLTHROUGH_INTENDED;
-    case Instruction::NEG_INT:
-    case Instruction::NOT_INT:
-    case Instruction::NEG_FLOAT:
-    case Instruction::INT_TO_BYTE:
-    case Instruction::INT_TO_SHORT:
-    case Instruction::INT_TO_CHAR:
-    case Instruction::INT_TO_FLOAT:
-    case Instruction::FLOAT_TO_INT: {
-        // res = op + 1 operand
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::LONG_TO_FLOAT:
-    case Instruction::LONG_TO_INT:
-    case Instruction::DOUBLE_TO_FLOAT:
-    case Instruction::DOUBLE_TO_INT: {
-        // res = op + 1 wide operand
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::DOUBLE_TO_LONG:
-    case Instruction::LONG_TO_DOUBLE:
-    case Instruction::NEG_LONG:
-    case Instruction::NOT_LONG:
-    case Instruction::NEG_DOUBLE: {
-        // wide res = op + 1 wide operand
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::FLOAT_TO_DOUBLE:
-    case Instruction::FLOAT_TO_LONG:
-    case Instruction::INT_TO_DOUBLE:
-    case Instruction::INT_TO_LONG: {
-        // wide res = op + 1 operand
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::CMPL_DOUBLE:
-    case Instruction::CMPG_DOUBLE:
-    case Instruction::CMP_LONG: {
-        // res = op + 2 wide operands
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::DIV_INT:
-    case Instruction::DIV_INT_2ADDR:
-    case Instruction::REM_INT:
-    case Instruction::REM_INT_2ADDR:
-      HandleDivZeroCheck(mir, GetOperandValue(mir->ssa_rep->uses[1]));
-      FALLTHROUGH_INTENDED;
-
-    case Instruction::CMPG_FLOAT:
-    case Instruction::CMPL_FLOAT:
-    case Instruction::ADD_INT:
-    case Instruction::ADD_INT_2ADDR:
-    case Instruction::MUL_INT:
-    case Instruction::MUL_INT_2ADDR:
-    case Instruction::AND_INT:
-    case Instruction::AND_INT_2ADDR:
-    case Instruction::OR_INT:
-    case Instruction::OR_INT_2ADDR:
-    case Instruction::XOR_INT:
-    case Instruction::XOR_INT_2ADDR:
-    case Instruction::SUB_INT:
-    case Instruction::SUB_INT_2ADDR:
-    case Instruction::SHL_INT:
-    case Instruction::SHL_INT_2ADDR:
-    case Instruction::SHR_INT:
-    case Instruction::SHR_INT_2ADDR:
-    case Instruction::USHR_INT:
-    case Instruction::USHR_INT_2ADDR: {
-        // res = op + 2 operands
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::DIV_LONG:
-    case Instruction::REM_LONG:
-    case Instruction::DIV_LONG_2ADDR:
-    case Instruction::REM_LONG_2ADDR:
-      HandleDivZeroCheck(mir, GetOperandValueWide(mir->ssa_rep->uses[2]));
-      FALLTHROUGH_INTENDED;
-
-    case Instruction::ADD_LONG:
-    case Instruction::SUB_LONG:
-    case Instruction::MUL_LONG:
-    case Instruction::AND_LONG:
-    case Instruction::OR_LONG:
-    case Instruction::XOR_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-    case Instruction::SUB_LONG_2ADDR:
-    case Instruction::MUL_LONG_2ADDR:
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::OR_LONG_2ADDR:
-    case Instruction::XOR_LONG_2ADDR:
-    case Instruction::ADD_DOUBLE:
-    case Instruction::SUB_DOUBLE:
-    case Instruction::MUL_DOUBLE:
-    case Instruction::DIV_DOUBLE:
-    case Instruction::REM_DOUBLE:
-    case Instruction::ADD_DOUBLE_2ADDR:
-    case Instruction::SUB_DOUBLE_2ADDR:
-    case Instruction::MUL_DOUBLE_2ADDR:
-    case Instruction::DIV_DOUBLE_2ADDR:
-    case Instruction::REM_DOUBLE_2ADDR: {
-        // wide res = op + 2 wide operands
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::SHL_LONG:
-    case Instruction::SHR_LONG:
-    case Instruction::USHR_LONG:
-    case Instruction::SHL_LONG_2ADDR:
-    case Instruction::SHR_LONG_2ADDR:
-    case Instruction::USHR_LONG_2ADDR: {
-        // wide res = op + 1 wide operand + 1 operand
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[2]);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::ADD_FLOAT:
-    case Instruction::SUB_FLOAT:
-    case Instruction::MUL_FLOAT:
-    case Instruction::DIV_FLOAT:
-    case Instruction::REM_FLOAT:
-    case Instruction::ADD_FLOAT_2ADDR:
-    case Instruction::SUB_FLOAT_2ADDR:
-    case Instruction::MUL_FLOAT_2ADDR:
-    case Instruction::DIV_FLOAT_2ADDR:
-    case Instruction::REM_FLOAT_2ADDR: {
-        // res = op + 2 operands
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::RSUB_INT:
-    case Instruction::ADD_INT_LIT16:
-    case Instruction::MUL_INT_LIT16:
-    case Instruction::DIV_INT_LIT16:
-    case Instruction::REM_INT_LIT16:
-    case Instruction::AND_INT_LIT16:
-    case Instruction::OR_INT_LIT16:
-    case Instruction::XOR_INT_LIT16:
-    case Instruction::ADD_INT_LIT8:
-    case Instruction::RSUB_INT_LIT8:
-    case Instruction::MUL_INT_LIT8:
-    case Instruction::DIV_INT_LIT8:
-    case Instruction::REM_INT_LIT8:
-    case Instruction::AND_INT_LIT8:
-    case Instruction::OR_INT_LIT8:
-    case Instruction::XOR_INT_LIT8:
-    case Instruction::SHL_INT_LIT8:
-    case Instruction::SHR_INT_LIT8:
-    case Instruction::USHR_INT_LIT8: {
-        // Same as res = op + 2 operands, except use vC as operand 2
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = gvn_->LookupValue(Instruction::CONST, mir->dalvikInsn.vC, 0, 0);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::AGET_OBJECT:
-    case Instruction::AGET:
-    case Instruction::AGET_WIDE:
-    case Instruction::AGET_BOOLEAN:
-    case Instruction::AGET_BYTE:
-    case Instruction::AGET_CHAR:
-    case Instruction::AGET_SHORT:
-      res = HandleAGet(mir, opcode);
-      break;
-
-    case Instruction::APUT_OBJECT:
-      HandlePutObject(mir);
-      FALLTHROUGH_INTENDED;
-    case Instruction::APUT:
-    case Instruction::APUT_WIDE:
-    case Instruction::APUT_BYTE:
-    case Instruction::APUT_BOOLEAN:
-    case Instruction::APUT_SHORT:
-    case Instruction::APUT_CHAR:
-      HandleAPut(mir, opcode);
-      break;
-
-    case Instruction::IGET_OBJECT:
-    case Instruction::IGET:
-    case Instruction::IGET_WIDE:
-    case Instruction::IGET_BOOLEAN:
-    case Instruction::IGET_BYTE:
-    case Instruction::IGET_CHAR:
-    case Instruction::IGET_SHORT:
-      res = HandleIGet(mir, opcode);
-      break;
-
-    case Instruction::IPUT_OBJECT:
-      HandlePutObject(mir);
-      FALLTHROUGH_INTENDED;
-    case Instruction::IPUT:
-    case Instruction::IPUT_WIDE:
-    case Instruction::IPUT_BOOLEAN:
-    case Instruction::IPUT_BYTE:
-    case Instruction::IPUT_CHAR:
-    case Instruction::IPUT_SHORT:
-      HandleIPut(mir, opcode);
-      break;
-
-    case Instruction::SGET_OBJECT:
-    case Instruction::SGET:
-    case Instruction::SGET_WIDE:
-    case Instruction::SGET_BOOLEAN:
-    case Instruction::SGET_BYTE:
-    case Instruction::SGET_CHAR:
-    case Instruction::SGET_SHORT:
-      res = HandleSGet(mir, opcode);
-      break;
-
-    case Instruction::SPUT_OBJECT:
-      HandlePutObject(mir);
-      FALLTHROUGH_INTENDED;
-    case Instruction::SPUT:
-    case Instruction::SPUT_WIDE:
-    case Instruction::SPUT_BOOLEAN:
-    case Instruction::SPUT_BYTE:
-    case Instruction::SPUT_CHAR:
-    case Instruction::SPUT_SHORT:
-      HandleSPut(mir, opcode);
-      break;
-  }
-  return res;
-}
-
-uint16_t LocalValueNumbering::GetEndingVregValueNumberImpl(int v_reg, bool wide) const {
-  const BasicBlock* bb = gvn_->GetBasicBlock(Id());
-  DCHECK(bb != nullptr);
-  int s_reg = bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
-  if (s_reg == INVALID_SREG) {
-    return kNoValue;
-  }
-  if (gvn_->GetMirGraph()->GetRegLocation(s_reg).wide != wide) {
-    return kNoValue;
-  }
-  if (wide) {
-    int high_s_reg = bb->data_flow_info->vreg_to_ssa_map_exit[v_reg + 1];
-    if (high_s_reg != s_reg + 1) {
-      return kNoValue;  // High word has been overwritten.
-    }
-    return GetSregValueWide(s_reg);
-  } else {
-    return GetSregValue(s_reg);
-  }
-}
-
-uint16_t LocalValueNumbering::GetStartingVregValueNumberImpl(int v_reg, bool wide) const {
-  DCHECK_EQ(gvn_->mode_, GlobalValueNumbering::kModeGvnPostProcessing);
-  DCHECK(gvn_->CanModify());
-  const BasicBlock* bb = gvn_->GetBasicBlock(Id());
-  DCHECK(bb != nullptr);
-  DCHECK_NE(bb->predecessors.size(), 0u);
-  if (bb->predecessors.size() == 1u) {
-    return gvn_->GetLvn(bb->predecessors[0])->GetEndingVregValueNumberImpl(v_reg, wide);
-  }
-  merge_names_.clear();
-  uint16_t value_name = kNoValue;
-  bool same_values = true;
-  for (BasicBlockId pred_id : bb->predecessors) {
-    value_name = gvn_->GetLvn(pred_id)->GetEndingVregValueNumberImpl(v_reg, wide);
-    if (value_name == kNoValue) {
-      return kNoValue;
-    }
-    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
-    merge_names_.push_back(value_name);
-  }
-  if (same_values) {
-    // value_name already contains the result.
-  } else {
-    auto lb = merge_map_.lower_bound(merge_names_);
-    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
-      value_name = lb->second;
-    } else {
-      value_name = kNoValue;  // We never assigned a value name to this set of merged names.
-    }
-  }
-  return value_name;
-}
-
-}    // namespace art
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
deleted file mode 100644
index dff5e27..0000000
--- a/compiler/dex/local_value_numbering.h
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
-#define ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
-
-#include <memory>
-
-#include "base/arena_object.h"
-#include "base/logging.h"
-#include "dex_instruction_utils.h"
-#include "global_value_numbering.h"
-
-namespace art {
-
-class DexFile;
-
-// Enable/disable tracking values stored in the FILLED_NEW_ARRAY result.
-static constexpr bool kLocalValueNumberingEnableFilledNewArrayTracking = true;
-
-class LocalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
- private:
-  static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
-
- public:
-  LocalValueNumbering(GlobalValueNumbering* gvn, BasicBlockId id, ScopedArenaAllocator* allocator);
-
-  BasicBlockId Id() const {
-    return id_;
-  }
-
-  bool Equals(const LocalValueNumbering& other) const;
-
-  bool IsValueNullChecked(uint16_t value_name) const {
-    return null_checked_.find(value_name) != null_checked_.end();
-  }
-
-  bool IsValueDivZeroChecked(uint16_t value_name) const {
-    return div_zero_checked_.find(value_name) != div_zero_checked_.end();
-  }
-
-  uint16_t GetSregValue(uint16_t s_reg) const {
-    DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    return GetSregValueImpl(s_reg, &sreg_value_map_);
-  }
-
-  uint16_t GetSregValueWide(uint16_t s_reg) const {
-    DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    return GetSregValueImpl(s_reg, &sreg_wide_value_map_);
-  }
-
-  // Get the starting value number for a given dalvik register.
-  uint16_t GetStartingVregValueNumber(int v_reg) const {
-    return GetStartingVregValueNumberImpl(v_reg, false);
-  }
-
-  // Get the starting value number for a given wide dalvik register.
-  uint16_t GetStartingVregValueNumberWide(int v_reg) const {
-    return GetStartingVregValueNumberImpl(v_reg, true);
-  }
-
-  enum MergeType {
-    kNormalMerge,
-    kCatchMerge,
-    kReturnMerge,  // RETURN or PHI+RETURN. Merge only sreg maps.
-  };
-
-  void MergeOne(const LocalValueNumbering& other, MergeType merge_type);
-  void Merge(MergeType merge_type);  // Merge gvn_->merge_lvns_.
-  void PrepareEntryBlock();
-
-  uint16_t GetValueNumber(MIR* mir);
-
- private:
-  // A set of value names.
-  typedef GlobalValueNumbering::ValueNameSet ValueNameSet;
-
-  // Key is s_reg, value is value name.
-  typedef ScopedArenaSafeMap<uint16_t, uint16_t> SregValueMap;
-
-  uint16_t GetEndingVregValueNumberImpl(int v_reg, bool wide) const;
-  uint16_t GetStartingVregValueNumberImpl(int v_reg, bool wide) const;
-
-  uint16_t GetSregValueImpl(int s_reg, const SregValueMap* map) const {
-    uint16_t res = kNoValue;
-    auto lb = map->find(s_reg);
-    if (lb != map->end()) {
-      res = lb->second;
-    } else {
-      res = gvn_->FindValue(kNoValue, s_reg, kNoValue, kNoValue);
-    }
-    return res;
-  }
-
-  void SetOperandValueImpl(uint16_t s_reg, uint16_t value, SregValueMap* map) {
-    DCHECK_EQ(map->count(s_reg), 0u);
-    map->Put(s_reg, value);
-  }
-
-  uint16_t GetOperandValueImpl(int s_reg, const SregValueMap* map) const {
-    uint16_t res = kNoValue;
-    auto lb = map->find(s_reg);
-    if (lb != map->end()) {
-      res = lb->second;
-    } else {
-      // Using the original value; s_reg refers to an input reg.
-      res = gvn_->LookupValue(kNoValue, s_reg, kNoValue, kNoValue);
-    }
-    return res;
-  }
-
-  void SetOperandValue(uint16_t s_reg, uint16_t value) {
-    DCHECK_EQ(sreg_wide_value_map_.count(s_reg), 0u);
-    DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    SetOperandValueImpl(s_reg, value, &sreg_value_map_);
-  }
-
-  uint16_t GetOperandValue(int s_reg) const {
-    DCHECK_EQ(sreg_wide_value_map_.count(s_reg), 0u);
-    DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    return GetOperandValueImpl(s_reg, &sreg_value_map_);
-  }
-
-  void SetOperandValueWide(uint16_t s_reg, uint16_t value) {
-    DCHECK_EQ(sreg_value_map_.count(s_reg), 0u);
-    DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).high_word);
-    SetOperandValueImpl(s_reg, value, &sreg_wide_value_map_);
-  }
-
-  uint16_t GetOperandValueWide(int s_reg) const {
-    DCHECK_EQ(sreg_value_map_.count(s_reg), 0u);
-    DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).high_word);
-    return GetOperandValueImpl(s_reg, &sreg_wide_value_map_);
-  }
-
-  struct RangeCheckKey {
-    uint16_t array;
-    uint16_t index;
-
-    // NOTE: Can't define this at namespace scope for a private struct.
-    bool operator==(const RangeCheckKey& other) const {
-      return array == other.array && index == other.index;
-    }
-  };
-
-  struct RangeCheckKeyComparator {
-    bool operator()(const RangeCheckKey& lhs, const RangeCheckKey& rhs) const {
-      if (lhs.array != rhs.array) {
-        return lhs.array < rhs.array;
-      }
-      return lhs.index < rhs.index;
-    }
-  };
-
-  typedef ScopedArenaSet<RangeCheckKey, RangeCheckKeyComparator> RangeCheckSet;
-
-  // Maps instance field "location" (derived from base, field_id and type) to value name.
-  typedef ScopedArenaSafeMap<uint16_t, uint16_t> IFieldLocToValueMap;
-
-  // Maps static field id to value name
-  typedef ScopedArenaSafeMap<uint16_t, uint16_t> SFieldToValueMap;
-
-  struct EscapedIFieldClobberKey {
-    uint16_t base;      // Or array.
-    uint16_t type;
-    uint16_t field_id;  // None (kNoValue) for arrays and unresolved instance field stores.
-
-    // NOTE: Can't define this at namespace scope for a private struct.
-    bool operator==(const EscapedIFieldClobberKey& other) const {
-      return base == other.base && type == other.type && field_id == other.field_id;
-    }
-  };
-
-  struct EscapedIFieldClobberKeyComparator {
-    bool operator()(const EscapedIFieldClobberKey& lhs, const EscapedIFieldClobberKey& rhs) const {
-      // Compare base first. This makes sequential iteration respect the order of base.
-      if (lhs.base != rhs.base) {
-        return lhs.base < rhs.base;
-      }
-      // Compare type second. This makes the type-clobber entries (field_id == kNoValue) last
-      // for given base and type and makes it easy to prune unnecessary entries when merging
-      // escaped_ifield_clobber_set_ from multiple LVNs.
-      if (lhs.type != rhs.type) {
-        return lhs.type < rhs.type;
-      }
-      return lhs.field_id < rhs.field_id;
-    }
-  };
-
-  typedef ScopedArenaSet<EscapedIFieldClobberKey, EscapedIFieldClobberKeyComparator>
-      EscapedIFieldClobberSet;
-
-  struct EscapedArrayClobberKey {
-    uint16_t base;
-    uint16_t type;
-
-    // NOTE: Can't define this at namespace scope for a private struct.
-    bool operator==(const EscapedArrayClobberKey& other) const {
-      return base == other.base && type == other.type;
-    }
-  };
-
-  struct EscapedArrayClobberKeyComparator {
-    bool operator()(const EscapedArrayClobberKey& lhs, const EscapedArrayClobberKey& rhs) const {
-      // Compare base first. This makes sequential iteration respect the order of base.
-      if (lhs.base != rhs.base) {
-        return lhs.base < rhs.base;
-      }
-      return lhs.type < rhs.type;
-    }
-  };
-
-  // Clobber set for previously non-aliasing array refs that escaped.
-  typedef ScopedArenaSet<EscapedArrayClobberKey, EscapedArrayClobberKeyComparator>
-      EscapedArrayClobberSet;
-
-  // Known location values for an aliasing set. The set can be tied to one of:
-  //   1. Instance field. The locations are aliasing references used to access the field.
-  //   2. Non-aliasing array reference. The locations are indexes to the array.
-  //   3. Aliasing array type. The locations are (reference, index) pair ids assigned by GVN.
-  // In each case we keep track of the last stored value, if any, and the set of locations
-  // where it was stored. We also keep track of all values known for the current write state
-  // (load_value_map), which can be known either because they have been loaded since the last
-  // store or because they contained the last_stored_value before the store and thus could not
-  // have changed as a result.
-  struct AliasingValues {
-    explicit AliasingValues(LocalValueNumbering* lvn)
-        : memory_version_before_stores(kNoValue),
-          last_stored_value(kNoValue),
-          store_loc_set(std::less<uint16_t>(), lvn->null_checked_.get_allocator()),
-          last_load_memory_version(kNoValue),
-          load_value_map(std::less<uint16_t>(), lvn->null_checked_.get_allocator()) {
-    }
-
-    uint16_t memory_version_before_stores;  // kNoValue if start version for the field.
-    uint16_t last_stored_value;             // Last stored value name, kNoValue if none.
-    ValueNameSet store_loc_set;             // Where was last_stored_value stored.
-
-    // Maps refs (other than stored_to) to currently known values for this field other. On write,
-    // anything that differs from the written value is removed as it may be overwritten.
-    uint16_t last_load_memory_version;    // kNoValue if not known.
-    ScopedArenaSafeMap<uint16_t, uint16_t> load_value_map;
-
-    // NOTE: Can't define this at namespace scope for a private struct.
-    bool operator==(const AliasingValues& other) const {
-      return memory_version_before_stores == other.memory_version_before_stores &&
-          last_load_memory_version == other.last_load_memory_version &&
-          last_stored_value == other.last_stored_value &&
-          store_loc_set == other.store_loc_set &&
-          load_value_map == other.load_value_map;
-    }
-  };
-
-  // Maps instance field id to AliasingValues, locations are object refs.
-  typedef ScopedArenaSafeMap<uint16_t, AliasingValues> AliasingIFieldValuesMap;
-
-  // Maps non-aliasing array reference to AliasingValues, locations are array indexes.
-  typedef ScopedArenaSafeMap<uint16_t, AliasingValues> NonAliasingArrayValuesMap;
-
-  // Maps aliasing array type to AliasingValues, locations are (array, index) pair ids.
-  typedef ScopedArenaSafeMap<uint16_t, AliasingValues> AliasingArrayValuesMap;
-
-  // Helper classes defining versions for updating and merging the AliasingValues maps above.
-  class AliasingIFieldVersions;
-  class NonAliasingArrayVersions;
-  class AliasingArrayVersions;
-
-  template <typename Map>
-  AliasingValues* GetAliasingValues(Map* map, const typename Map::key_type& key);
-
-  template <typename Versions, typename KeyType>
-  void UpdateAliasingValuesLoadVersion(const KeyType& key, AliasingValues* values);
-
-  template <typename Versions, typename Map>
-  static uint16_t AliasingValuesMergeGet(GlobalValueNumbering* gvn,
-                                         const LocalValueNumbering* lvn,
-                                         Map* map, const typename Map::key_type& key,
-                                         uint16_t location);
-
-  template <typename Versions, typename Map>
-  uint16_t HandleAliasingValuesGet(Map* map, const typename Map::key_type& key,
-                                   uint16_t location);
-
-  template <typename Versions, typename Map>
-  bool HandleAliasingValuesPut(Map* map, const typename Map::key_type& key,
-                               uint16_t location, uint16_t value);
-
-  template <typename K>
-  void CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
-                             const ScopedArenaSafeMap<K, AliasingValues>& src);
-
-  uint16_t MarkNonAliasingNonNull(MIR* mir);
-  bool IsNonAliasing(uint16_t reg) const;
-  bool IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type) const;
-  bool IsNonAliasingArray(uint16_t reg, uint16_t type) const;
-  void HandleNullCheck(MIR* mir, uint16_t reg);
-  void HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index);
-  void HandleDivZeroCheck(MIR* mir, uint16_t reg);
-  void HandlePutObject(MIR* mir);
-  void HandleEscapingRef(uint16_t base);
-  void HandleInvokeArgs(const MIR* mir, const LocalValueNumbering* mir_lvn);
-  uint16_t HandlePhi(MIR* mir);
-  uint16_t HandleConst(MIR* mir, uint32_t value);
-  uint16_t HandleConstWide(MIR* mir, uint64_t value);
-  uint16_t HandleAGet(MIR* mir, uint16_t opcode);
-  void HandleAPut(MIR* mir, uint16_t opcode);
-  uint16_t HandleIGet(MIR* mir, uint16_t opcode);
-  void HandleIPut(MIR* mir, uint16_t opcode);
-  uint16_t HandleSGet(MIR* mir, uint16_t opcode);
-  void HandleSPut(MIR* mir, uint16_t opcode);
-  void RemoveSFieldsForType(uint16_t type);
-  void HandleInvokeOrClInitOrAcquireOp(MIR* mir);
-
-  bool SameMemoryVersion(const LocalValueNumbering& other) const;
-
-  uint16_t NewMemoryVersion(uint16_t* new_version);
-  void MergeMemoryVersions(bool clobbered_catch);
-
-  void PruneNonAliasingRefsForCatch();
-
-  template <typename Set, Set LocalValueNumbering::* set_ptr>
-  void IntersectSets();
-
-  void CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src);
-
-  // Intersect SSA reg value maps as sets, ignore dead regs.
-  template <SregValueMap LocalValueNumbering::* map_ptr>
-  void IntersectSregValueMaps();
-
-  // Intersect maps as sets. The value type must be equality-comparable.
-  template <typename Map>
-  static void InPlaceIntersectMaps(Map* work_map, const Map& other_map);
-
-  template <typename Set, Set LocalValueNumbering::*set_ptr, void (LocalValueNumbering::*MergeFn)(
-      const typename Set::value_type& entry, typename Set::iterator hint)>
-  void MergeSets();
-
-  void IntersectAliasingValueLocations(AliasingValues* work_values, const AliasingValues* values);
-
-  void MergeEscapedRefs(const ValueNameSet::value_type& entry, ValueNameSet::iterator hint);
-  void MergeEscapedIFieldTypeClobberSets(const EscapedIFieldClobberSet::value_type& entry,
-                                         EscapedIFieldClobberSet::iterator hint);
-  void MergeEscapedIFieldClobberSets(const EscapedIFieldClobberSet::value_type& entry,
-                                     EscapedIFieldClobberSet::iterator hint);
-  void MergeEscapedArrayClobberSets(const EscapedArrayClobberSet::value_type& entry,
-                                    EscapedArrayClobberSet::iterator hint);
-  void MergeSFieldValues(const SFieldToValueMap::value_type& entry,
-                         SFieldToValueMap::iterator hint);
-  void MergeNonAliasingIFieldValues(const IFieldLocToValueMap::value_type& entry,
-                                    IFieldLocToValueMap::iterator hint);
-  void MergeNullChecked();
-  void MergeDivZeroChecked();
-
-  template <typename Map, Map LocalValueNumbering::*map_ptr, typename Versions>
-  void MergeAliasingValues(const typename Map::value_type& entry, typename Map::iterator hint);
-
-  GlobalValueNumbering* gvn_;
-
-  // We're using the block id as a 16-bit operand value for some lookups.
-  static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "BasicBlockId must be 16 bit");
-  BasicBlockId id_;
-
-  SregValueMap sreg_value_map_;
-  SregValueMap sreg_wide_value_map_;
-
-  SFieldToValueMap sfield_value_map_;
-  IFieldLocToValueMap non_aliasing_ifield_value_map_;
-  AliasingIFieldValuesMap aliasing_ifield_value_map_;
-  NonAliasingArrayValuesMap non_aliasing_array_value_map_;
-  AliasingArrayValuesMap aliasing_array_value_map_;
-
-  // Data for dealing with memory clobbering and store/load aliasing.
-  uint16_t global_memory_version_;
-  uint16_t unresolved_sfield_version_[kDexMemAccessTypeCount];
-  uint16_t unresolved_ifield_version_[kDexMemAccessTypeCount];
-  // Value names of references to objects that cannot be reached through a different value name.
-  ValueNameSet non_aliasing_refs_;
-  // Previously non-aliasing refs that escaped but can still be used for non-aliasing AGET/IGET.
-  ValueNameSet escaped_refs_;
-  // Blacklists for cases where escaped_refs_ can't be used.
-  EscapedIFieldClobberSet escaped_ifield_clobber_set_;
-  EscapedArrayClobberSet escaped_array_clobber_set_;
-
-  // Range check and null check elimination.
-  RangeCheckSet range_checked_;
-  ValueNameSet null_checked_;
-  ValueNameSet div_zero_checked_;
-
-  // Reuse one vector for all merges to avoid leaking too much memory on the ArenaStack.
-  mutable ScopedArenaVector<uint16_t> merge_names_;
-  // Map to identify when different locations merge the same values.
-  ScopedArenaSafeMap<ScopedArenaVector<uint16_t>, uint16_t> merge_map_;
-  // New memory version for merge, kNoValue if all memory versions matched.
-  uint16_t merge_new_memory_version_;
-
-  DISALLOW_COPY_AND_ASSIGN(LocalValueNumbering);
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
deleted file mode 100644
index f98969e..0000000
--- a/compiler/dex/local_value_numbering_test.cc
+++ /dev/null
@@ -1,920 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dex/mir_field_info.h"
-#include "global_value_numbering.h"
-#include "local_value_numbering.h"
-#include "gtest/gtest.h"
-
-namespace art {
-
-class LocalValueNumberingTest : public testing::Test {
- protected:
-  struct IFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct SFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct MIRDef {
-    static constexpr size_t kMaxSsaDefs = 2;
-    static constexpr size_t kMaxSsaUses = 4;
-
-    Instruction::Code opcode;
-    int64_t value;
-    uint32_t field_info;
-    size_t num_uses;
-    int32_t uses[kMaxSsaUses];
-    size_t num_defs;
-    int32_t defs[kMaxSsaDefs];
-  };
-
-#define DEF_CONST(opcode, reg, value) \
-    { opcode, value, 0u, 0, { }, 1, { reg } }
-#define DEF_CONST_WIDE(opcode, reg, value) \
-    { opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_CONST_STRING(opcode, reg, index) \
-    { opcode, index, 0u, 0, { }, 1, { reg } }
-#define DEF_IGET(opcode, reg, obj, field_info) \
-    { opcode, 0u, field_info, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(opcode, reg, obj, field_info) \
-    { opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(opcode, reg, obj, field_info) \
-    { opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(opcode, reg, obj, field_info) \
-    { opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(opcode, reg, field_info) \
-    { opcode, 0u, field_info, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(opcode, reg, field_info) \
-    { opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(opcode, reg, field_info) \
-    { opcode, 0u, field_info, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(opcode, reg, field_info) \
-    { opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
-#define DEF_AGET(opcode, reg, obj, idx) \
-    { opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
-#define DEF_AGET_WIDE(opcode, reg, obj, idx) \
-    { opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
-#define DEF_APUT(opcode, reg, obj, idx) \
-    { opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
-#define DEF_APUT_WIDE(opcode, reg, obj, idx) \
-    { opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
-#define DEF_INVOKE1(opcode, reg) \
-    { opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_UNIQUE_REF(opcode, reg) \
-    { opcode, 0u, 0u, 0, { }, 1, { reg } }  // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
-#define DEF_DIV_REM(opcode, result, dividend, divisor) \
-    { opcode, 0u, 0u, 2, { dividend, divisor }, 1, { result } }
-#define DEF_DIV_REM_WIDE(opcode, result, dividend, divisor) \
-    { opcode, 0u, 0u, 4, { dividend, dividend + 1, divisor, divisor + 1 }, 2, { result, result + 1 } }
-
-  void DoPrepareIFields(const IFieldDef* defs, size_t count) {
-    cu_.mir_graph->ifield_lowering_infos_.clear();
-    cu_.mir_graph->ifield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
-      }
-      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareIFields(const IFieldDef (&defs)[count]) {
-    DoPrepareIFields(defs, count);
-  }
-
-  void DoPrepareSFields(const SFieldDef* defs, size_t count) {
-    cu_.mir_graph->sfield_lowering_infos_.clear();
-    cu_.mir_graph->sfield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const SFieldDef* def = &defs[i];
-      MirSFieldLoweringInfo field_info(def->field_idx, def->type);
-      // Mark even unresolved fields as initialized.
-      field_info.flags_ |= MirSFieldLoweringInfo::kFlagClassIsInitialized;
-      // NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by LVN.
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
-      }
-      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareSFields(const SFieldDef (&defs)[count]) {
-    DoPrepareSFields(defs, count);
-  }
-
-  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
-    mir_count_ = count;
-    mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
-    ssa_reps_.resize(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const MIRDef* def = &defs[i];
-      MIR* mir = &mirs_[i];
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
-      mir->dalvikInsn.vB_wide = def->value;
-      if (IsInstructionIGetOrIPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
-        mir->meta.ifield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_info].MemAccessType(),
-                  IGetOrIPutMemAccessType(def->opcode));
-      } else if (IsInstructionSGetOrSPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
-        mir->meta.sfield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
-                  SGetOrSPutMemAccessType(def->opcode));
-      }
-      mir->ssa_rep = &ssa_reps_[i];
-      mir->ssa_rep->num_uses = def->num_uses;
-      mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
-      mir->ssa_rep->num_defs = def->num_defs;
-      mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->offset = i;  // LVN uses offset only for debug output
-      mir->optimization_flags = 0u;
-
-      if (i != 0u) {
-        mirs_[i - 1u].next = mir;
-      }
-    }
-    mirs_[count - 1u].next = nullptr;
-  }
-
-  template <size_t count>
-  void PrepareMIRs(const MIRDef (&defs)[count]) {
-    DoPrepareMIRs(defs, count);
-  }
-
-  void MakeSFieldUninitialized(uint32_t sfield_index) {
-    CHECK_LT(sfield_index, cu_.mir_graph->sfield_lowering_infos_.size());
-    cu_.mir_graph->sfield_lowering_infos_[sfield_index].flags_ &=
-        ~MirSFieldLoweringInfo::kFlagClassIsInitialized;
-  }
-
-  template <size_t count>
-  void MarkAsWideSRegs(const int32_t (&sregs)[count]) {
-    for (int32_t sreg : sregs) {
-      cu_.mir_graph->reg_location_[sreg].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].high_word = true;
-    }
-  }
-
-  void PerformLVN() {
-    cu_.mir_graph->temp_.gvn.ifield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
-    cu_.mir_graph->temp_.gvn.sfield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
-    gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
-                                                           GlobalValueNumbering::kModeLvn));
-    lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u, allocator_.get()));
-    value_names_.resize(mir_count_);
-    for (size_t i = 0; i != mir_count_; ++i) {
-      value_names_[i] =  lvn_->GetValueNumber(&mirs_[i]);
-    }
-    EXPECT_TRUE(gvn_->Good());
-  }
-
-  LocalValueNumberingTest()
-      : pool_(),
-        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
-        mir_count_(0u),
-        mirs_(nullptr),
-        ssa_reps_(),
-        allocator_(),
-        gvn_(),
-        lvn_(),
-        value_names_() {
-    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
-    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
-    // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
-    // 0 constants are integral, not references, and the values are all narrow.
-    // Nothing else is used by LVN/GVN. Tests can override the default values as needed.
-    cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
-        kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
-    cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
-  }
-
-  static constexpr size_t kMaxSsaRegs = 16384u;
-
-  ArenaPool pool_;
-  CompilationUnit cu_;
-  size_t mir_count_;
-  MIR* mirs_;
-  std::vector<SSARepresentation> ssa_reps_;
-  std::unique_ptr<ScopedArenaAllocator> allocator_;
-  std::unique_ptr<GlobalValueNumbering> gvn_;
-  std::unique_ptr<LocalValueNumbering> lvn_;
-  std::vector<uint16_t> value_names_;
-};
-
-TEST_F(LocalValueNumberingTest, IGetIGetInvokeIGet) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
-      DEF_IGET(Instruction::IGET, 1u, 10u, 0u),
-      DEF_INVOKE1(Instruction::INVOKE_VIRTUAL, 11u),
-      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 4u);
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_NE(value_names_[0], value_names_[3]);
-  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
-  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
-  EXPECT_EQ(mirs_[2].optimization_flags, 0u);
-  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
-}
-
-TEST_F(LocalValueNumberingTest, IGetIPutIGetIGetIGet) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessObject },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET(Instruction::IGET_OBJECT, 0u, 10u, 0u),
-      DEF_IPUT(Instruction::IPUT_OBJECT, 1u, 11u, 0u),  // May alias.
-      DEF_IGET(Instruction::IGET_OBJECT, 2u, 10u, 0u),
-      DEF_IGET(Instruction::IGET, 3u,  0u, 1u),
-      DEF_IGET(Instruction::IGET, 4u,  2u, 1u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 5u);
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_NE(value_names_[3], value_names_[4]);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    EXPECT_EQ((i == 2u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UniquePreserve1) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 10u),
-      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
-      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),  // No aliasing since 10u is unique.
-      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 4u);
-  EXPECT_EQ(value_names_[1], value_names_[3]);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    EXPECT_EQ((i == 1u || i == 3u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UniquePreserve2) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 11u),
-      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
-      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),  // No aliasing since 11u is unique.
-      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 4u);
-  EXPECT_EQ(value_names_[1], value_names_[3]);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    EXPECT_EQ((i == 2u || i == 3u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UniquePreserveAndEscape) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 10u),
-      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
-      DEF_INVOKE1(Instruction::INVOKE_VIRTUAL, 11u),  // 10u still unique.
-      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
-      DEF_INVOKE1(Instruction::INVOKE_VIRTUAL, 10u),  // 10u not unique anymore.
-      DEF_IGET(Instruction::IGET, 3u, 10u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 6u);
-  EXPECT_EQ(value_names_[1], value_names_[3]);
-  EXPECT_NE(value_names_[1], value_names_[5]);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    EXPECT_EQ((i == 1u || i == 3u || i == 4u || i == 5u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, Volatile) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, true, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET(Instruction::IGET, 0u, 10u, 1u),  // Volatile.
-      DEF_IGET(Instruction::IGET, 1u,  0u, 0u),  // Non-volatile.
-      DEF_IGET(Instruction::IGET, 2u, 10u, 1u),  // Volatile.
-      DEF_IGET(Instruction::IGET, 3u,  2u, 1u),  // Non-volatile.
-      DEF_IGET(Instruction::IGET, 4u,  0u, 0u),  // Non-volatile.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 5u);
-  EXPECT_NE(value_names_[0], value_names_[2]);  // Volatile has always different value name.
-  EXPECT_NE(value_names_[1], value_names_[3]);  // Used different base because of volatile.
-  EXPECT_NE(value_names_[1], value_names_[4]);  // Not guaranteed to be the same after "acquire".
-
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    EXPECT_EQ((i == 2u || i == 4u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UnresolvedIField) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },  // Resolved field #1.
-      { 2u, 1u, 2u, false, kDexMemAccessWide },  // Resolved field #2.
-      { 3u, 0u, 0u, false, kDexMemAccessWord },  // Unresolved field.
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 30u),
-      DEF_IGET(Instruction::IGET, 1u, 30u, 0u),             // Resolved field #1, unique object.
-      DEF_IGET(Instruction::IGET, 2u, 31u, 0u),             // Resolved field #1.
-      DEF_IGET_WIDE(Instruction::IGET_WIDE, 3u, 31u, 1u),   // Resolved field #2.
-      DEF_IGET(Instruction::IGET, 5u, 32u, 2u),             // Unresolved IGET can be "acquire".
-      DEF_IGET(Instruction::IGET, 6u, 30u, 0u),             // Resolved field #1, unique object.
-      DEF_IGET(Instruction::IGET, 7u, 31u, 0u),             // Resolved field #1.
-      DEF_IGET_WIDE(Instruction::IGET_WIDE, 8u, 31u, 1u),   // Resolved field #2.
-      DEF_IPUT(Instruction::IPUT, 10u, 32u, 2u),            // IPUT clobbers field #1 (#2 is wide).
-      DEF_IGET(Instruction::IGET, 11u, 30u, 0u),            // Resolved field #1, unique object.
-      DEF_IGET(Instruction::IGET, 12u, 31u, 0u),            // Resolved field #1, new value name.
-      DEF_IGET_WIDE(Instruction::IGET_WIDE, 13u, 31u, 1u),  // Resolved field #2.
-      DEF_IGET_WIDE(Instruction::IGET_WIDE, 15u, 30u, 1u),  // Resolved field #2, unique object.
-      DEF_IPUT(Instruction::IPUT, 17u, 30u, 2u),            // IPUT clobbers field #1 (#2 is wide).
-      DEF_IGET(Instruction::IGET, 18u, 30u, 0u),            // Resolved field #1, unique object.
-      DEF_IGET_WIDE(Instruction::IGET_WIDE, 19u, 30u, 1u),  // Resolved field #2, unique object.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 3, 8, 13, 15, 19 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 16u);
-  // Unresolved field is potentially volatile, so we need to adhere to the volatile semantics.
-  EXPECT_EQ(value_names_[1], value_names_[5]);    // Unique object.
-  EXPECT_NE(value_names_[2], value_names_[6]);    // Not guaranteed to be the same after "acquire".
-  EXPECT_NE(value_names_[3], value_names_[7]);    // Not guaranteed to be the same after "acquire".
-  EXPECT_EQ(value_names_[1], value_names_[9]);    // Unique object.
-  EXPECT_NE(value_names_[6], value_names_[10]);   // This aliased with unresolved IPUT.
-  EXPECT_EQ(value_names_[7], value_names_[11]);   // Still the same after "release".
-  EXPECT_EQ(value_names_[12], value_names_[15]);  // Still the same after "release".
-  EXPECT_NE(value_names_[1], value_names_[14]);   // This aliased with unresolved IPUT.
-  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
-  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
-  EXPECT_EQ(mirs_[2].optimization_flags, 0u);
-  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
-  EXPECT_EQ(mirs_[4].optimization_flags, 0u);
-  for (size_t i = 5u; i != mir_count_; ++i) {
-    EXPECT_EQ((i == 1u || i == 3u || i >=5u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UnresolvedSField) {
-  static const SFieldDef sfields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },  // Resolved field #1.
-      { 2u, 1u, 2u, false, kDexMemAccessWide },  // Resolved field #2.
-      { 3u, 0u, 0u, false, kDexMemAccessWord },  // Unresolved field.
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(Instruction::SGET, 0u, 0u),            // Resolved field #1.
-      DEF_SGET_WIDE(Instruction::SGET_WIDE, 1u, 1u),  // Resolved field #2.
-      DEF_SGET(Instruction::SGET, 3u, 2u),            // Unresolved SGET can be "acquire".
-      DEF_SGET(Instruction::SGET, 4u, 0u),            // Resolved field #1.
-      DEF_SGET_WIDE(Instruction::SGET_WIDE, 5u, 1u),  // Resolved field #2.
-      DEF_SPUT(Instruction::SPUT, 7u, 2u),            // SPUT clobbers field #1 (#2 is wide).
-      DEF_SGET(Instruction::SGET, 8u, 0u),            // Resolved field #1.
-      DEF_SGET_WIDE(Instruction::SGET_WIDE, 9u, 1u),  // Resolved field #2.
-  };
-
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 1, 5, 9 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 8u);
-  // Unresolved field is potentially volatile, so we need to adhere to the volatile semantics.
-  EXPECT_NE(value_names_[0], value_names_[3]);  // Not guaranteed to be the same after "acquire".
-  EXPECT_NE(value_names_[1], value_names_[4]);  // Not guaranteed to be the same after "acquire".
-  EXPECT_NE(value_names_[3], value_names_[6]);  // This aliased with unresolved IPUT.
-  EXPECT_EQ(value_names_[4], value_names_[7]);  // Still the same after "release".
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    EXPECT_EQ(0, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UninitializedSField) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },  // Resolved field #1.
-  };
-  static const SFieldDef sfields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },  // Resolved field #1.
-      { 2u, 1u, 2u, false, kDexMemAccessWord },  // Resolved field #2; uninitialized.
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 200u),
-      DEF_IGET(Instruction::IGET, 1u, 100u, 0u),
-      DEF_IGET(Instruction::IGET, 2u, 200u, 0u),
-      DEF_SGET(Instruction::SGET, 3u, 0u),
-      DEF_SGET(Instruction::SGET, 4u, 1u),            // Can call <clinit>().
-      DEF_IGET(Instruction::IGET, 5u, 100u, 0u),      // Differs from 1u.
-      DEF_IGET(Instruction::IGET, 6u, 200u, 0u),      // Same as 2u.
-      DEF_SGET(Instruction::SGET, 7u, 0u),            // Differs from 3u.
-  };
-
-  PrepareIFields(ifields);
-  PrepareSFields(sfields);
-  MakeSFieldUninitialized(1u);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 8u);
-  EXPECT_NE(value_names_[1], value_names_[5]);
-  EXPECT_EQ(value_names_[2], value_names_[6]);
-  EXPECT_NE(value_names_[3], value_names_[7]);
-}
-
-TEST_F(LocalValueNumberingTest, ConstString) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_STRING(Instruction::CONST_STRING, 0u, 0u),
-      DEF_CONST_STRING(Instruction::CONST_STRING, 1u, 0u),
-      DEF_CONST_STRING(Instruction::CONST_STRING, 2u, 2u),
-      DEF_CONST_STRING(Instruction::CONST_STRING, 3u, 0u),
-      DEF_INVOKE1(Instruction::INVOKE_DIRECT, 2u),
-      DEF_CONST_STRING(Instruction::CONST_STRING, 4u, 2u),
-  };
-
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 6u);
-  EXPECT_EQ(value_names_[1], value_names_[0]);
-  EXPECT_NE(value_names_[2], value_names_[0]);
-  EXPECT_EQ(value_names_[3], value_names_[0]);
-  EXPECT_EQ(value_names_[5], value_names_[2]);
-}
-
-TEST_F(LocalValueNumberingTest, SameValueInDifferentMemoryLocations) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const SFieldDef sfields[] = {
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 201u),
-      DEF_IGET(Instruction::IGET, 0u, 100u, 0u),
-      DEF_IPUT(Instruction::IPUT, 0u, 100u, 1u),
-      DEF_IPUT(Instruction::IPUT, 0u, 101u, 1u),
-      DEF_APUT(Instruction::APUT, 0u, 200u, 300u),
-      DEF_APUT(Instruction::APUT, 0u, 200u, 301u),
-      DEF_APUT(Instruction::APUT, 0u, 201u, 300u),
-      DEF_APUT(Instruction::APUT, 0u, 201u, 301u),
-      DEF_SPUT(Instruction::SPUT, 0u, 0u),
-      DEF_IGET(Instruction::IGET, 9u, 100u, 0u),
-      DEF_IGET(Instruction::IGET, 10u, 100u, 1u),
-      DEF_IGET(Instruction::IGET, 11u, 101u, 1u),
-      DEF_AGET(Instruction::AGET, 12u, 200u, 300u),
-      DEF_AGET(Instruction::AGET, 13u, 200u, 301u),
-      DEF_AGET(Instruction::AGET, 14u, 201u, 300u),
-      DEF_AGET(Instruction::AGET, 15u, 201u, 301u),
-      DEF_SGET(Instruction::SGET, 16u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 17u);
-  for (size_t i = 9; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(value_names_[1], value_names_[i]) << i;
-  }
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    int expected_flags =
-        ((i == 2u || (i >= 5u && i <= 7u) || (i >= 9u && i <= 15u)) ? MIR_IGNORE_NULL_CHECK : 0) |
-        ((i >= 12u && i <= 15u) ? MIR_IGNORE_RANGE_CHECK : 0);
-    EXPECT_EQ(expected_flags, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UniqueArrayAliasing) {
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 20u),
-      DEF_AGET(Instruction::AGET, 1u, 20u, 40u),
-      DEF_APUT(Instruction::APUT, 2u, 20u, 41u),  // May alias with index for sreg 40u.
-      DEF_AGET(Instruction::AGET, 3u, 20u, 40u),
-  };
-
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 4u);
-  EXPECT_NE(value_names_[1], value_names_[3]);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    int expected_flags =
-        ((i >= 1u) ? MIR_IGNORE_NULL_CHECK : 0) |
-        ((i == 3u) ? MIR_IGNORE_RANGE_CHECK : 0);
-    EXPECT_EQ(expected_flags, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, EscapingRefs) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },    // Field #1.
-      { 2u, 1u, 2u, false, kDexMemAccessWord },    // Field #2.
-      { 3u, 1u, 3u, false, kDexMemAccessObject },  // For storing escaping refs.
-      { 4u, 1u, 4u, false, kDexMemAccessWide },    // Wide.
-      { 5u, 0u, 0u, false, kDexMemAccessWord },    // Unresolved field, int.
-      { 6u, 0u, 0u, false, kDexMemAccessWide },    // Unresolved field, wide.
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
-      DEF_IGET(Instruction::IGET, 1u, 20u, 0u),
-      DEF_IGET(Instruction::IGET, 2u, 20u, 1u),
-      DEF_IPUT(Instruction::IPUT_OBJECT, 20u, 30u, 2u),      // Ref escapes.
-      DEF_IGET(Instruction::IGET, 4u, 20u, 0u),
-      DEF_IGET(Instruction::IGET, 5u, 20u, 1u),
-      DEF_IPUT(Instruction::IPUT, 6u, 31u, 0u),              // May alias with field #1.
-      DEF_IGET(Instruction::IGET, 7u, 20u, 0u),              // New value.
-      DEF_IGET(Instruction::IGET, 8u, 20u, 1u),              // Still the same.
-      DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 9u, 31u, 3u),    // No aliasing, different type.
-      DEF_IGET(Instruction::IGET, 11u, 20u, 0u),
-      DEF_IGET(Instruction::IGET, 12u, 20u, 1u),
-      DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 13u, 31u, 5u),   // No aliasing, different type.
-      DEF_IGET(Instruction::IGET, 15u, 20u, 0u),
-      DEF_IGET(Instruction::IGET, 16u, 20u, 1u),
-      DEF_IPUT(Instruction::IPUT, 17u, 31u, 4u),             // Aliasing, same type.
-      DEF_IGET(Instruction::IGET, 18u, 20u, 0u),
-      DEF_IGET(Instruction::IGET, 19u, 20u, 1u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 9, 13 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 18u);
-  EXPECT_EQ(value_names_[1], value_names_[4]);
-  EXPECT_EQ(value_names_[2], value_names_[5]);
-  EXPECT_NE(value_names_[4], value_names_[7]);  // New value.
-  EXPECT_EQ(value_names_[5], value_names_[8]);
-  EXPECT_EQ(value_names_[7], value_names_[10]);
-  EXPECT_EQ(value_names_[8], value_names_[11]);
-  EXPECT_EQ(value_names_[10], value_names_[13]);
-  EXPECT_EQ(value_names_[11], value_names_[14]);
-  EXPECT_NE(value_names_[13], value_names_[16]);  // New value.
-  EXPECT_NE(value_names_[14], value_names_[17]);  // New value.
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected =
-        ((i != 0u && i != 3u && i != 6u) ? MIR_IGNORE_NULL_CHECK : 0) |
-        ((i == 3u) ? MIR_STORE_NON_NULL_VALUE: 0);
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, EscapingArrayRefs) {
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 20u),
-      DEF_AGET(Instruction::AGET, 1u, 20u, 40u),
-      DEF_AGET(Instruction::AGET, 2u, 20u, 41u),
-      DEF_APUT(Instruction::APUT_OBJECT, 20u, 30u, 42u),    // Array ref escapes.
-      DEF_AGET(Instruction::AGET, 4u, 20u, 40u),
-      DEF_AGET(Instruction::AGET, 5u, 20u, 41u),
-      DEF_APUT_WIDE(Instruction::APUT_WIDE, 6u, 31u, 43u),  // No aliasing, different type.
-      DEF_AGET(Instruction::AGET, 8u, 20u, 40u),
-      DEF_AGET(Instruction::AGET, 9u, 20u, 41u),
-      DEF_APUT(Instruction::APUT, 10u, 32u, 40u),           // May alias with all elements.
-      DEF_AGET(Instruction::AGET, 11u, 20u, 40u),           // New value (same index name).
-      DEF_AGET(Instruction::AGET, 12u, 20u, 41u),           // New value (different index name).
-  };
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 6 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 12u);
-  EXPECT_EQ(value_names_[1], value_names_[4]);
-  EXPECT_EQ(value_names_[2], value_names_[5]);
-  EXPECT_EQ(value_names_[4], value_names_[7]);
-  EXPECT_EQ(value_names_[5], value_names_[8]);
-  EXPECT_NE(value_names_[7], value_names_[10]);  // New value.
-  EXPECT_NE(value_names_[8], value_names_[11]);  // New value.
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected =
-        ((i != 0u && i != 3u && i != 6u && i != 9u) ? MIR_IGNORE_NULL_CHECK : 0u) |
-        ((i >= 4 && i != 6u && i != 9u) ? MIR_IGNORE_RANGE_CHECK : 0u) |
-        ((i == 3u) ? MIR_STORE_NON_NULL_VALUE: 0);
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, StoringSameValueKeepsMemoryVersion) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const SFieldDef sfields[] = {
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET(Instruction::IGET, 0u, 30u, 0u),
-      DEF_IGET(Instruction::IGET, 1u, 31u, 0u),
-      DEF_IPUT(Instruction::IPUT, 1u, 31u, 0u),            // Store the same value.
-      DEF_IGET(Instruction::IGET, 3u, 30u, 0u),
-      DEF_AGET(Instruction::AGET, 4u, 32u, 40u),
-      DEF_AGET(Instruction::AGET, 5u, 33u, 40u),
-      DEF_APUT(Instruction::APUT, 5u, 33u, 40u),           // Store the same value.
-      DEF_AGET(Instruction::AGET, 7u, 32u, 40u),
-      DEF_SGET(Instruction::SGET, 8u, 0u),
-      DEF_SPUT(Instruction::SPUT, 8u, 0u),                 // Store the same value.
-      DEF_SGET(Instruction::SGET, 10u, 0u),
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 50u),      // Test with unique references.
-      { Instruction::FILLED_NEW_ARRAY, 0, 0u, 2, { 12u, 13u }, 0, { } },
-      DEF_UNIQUE_REF(Instruction::MOVE_RESULT_OBJECT, 51u),
-      DEF_IGET(Instruction::IGET, 14u, 50u, 0u),
-      DEF_IGET(Instruction::IGET, 15u, 50u, 1u),
-      DEF_IPUT(Instruction::IPUT, 15u, 50u, 1u),           // Store the same value.
-      DEF_IGET(Instruction::IGET, 17u, 50u, 0u),
-      DEF_AGET(Instruction::AGET, 18u, 51u, 40u),
-      DEF_AGET(Instruction::AGET, 19u, 51u, 41u),
-      DEF_APUT(Instruction::APUT, 19u, 51u, 41u),          // Store the same value.
-      DEF_AGET(Instruction::AGET, 21u, 51u, 40u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 22u);
-  EXPECT_NE(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-  EXPECT_NE(value_names_[4], value_names_[5]);
-  EXPECT_EQ(value_names_[4], value_names_[7]);
-  EXPECT_EQ(value_names_[8], value_names_[10]);
-  EXPECT_NE(value_names_[14], value_names_[15]);
-  EXPECT_EQ(value_names_[14], value_names_[17]);
-  EXPECT_NE(value_names_[18], value_names_[19]);
-  EXPECT_EQ(value_names_[18], value_names_[21]);
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected =
-        ((i == 2u || i == 3u || i == 6u || i == 7u || (i >= 14u)) ? MIR_IGNORE_NULL_CHECK : 0u) |
-        ((i == 6u || i == 7u || i >= 20u) ? MIR_IGNORE_RANGE_CHECK : 0u);
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, FilledNewArrayTracking) {
-  if (!kLocalValueNumberingEnableFilledNewArrayTracking) {
-    // Feature disabled.
-    return;
-  }
-  static const MIRDef mirs[] = {
-      DEF_CONST(Instruction::CONST, 0u, 100),
-      DEF_CONST(Instruction::CONST, 1u, 200),
-      { Instruction::FILLED_NEW_ARRAY, 0, 0u, 2, { 0u, 1u }, 0, { } },
-      DEF_UNIQUE_REF(Instruction::MOVE_RESULT_OBJECT, 10u),
-      DEF_CONST(Instruction::CONST, 20u, 0),
-      DEF_CONST(Instruction::CONST, 21u, 1),
-      DEF_AGET(Instruction::AGET, 6u, 10u, 20u),
-      DEF_AGET(Instruction::AGET, 7u, 10u, 21u),
-  };
-
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 8u);
-  EXPECT_EQ(value_names_[0], value_names_[6]);
-  EXPECT_EQ(value_names_[1], value_names_[7]);
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected = (i == 6u || i == 7u) ? (MIR_IGNORE_NULL_CHECK | MIR_IGNORE_RANGE_CHECK) : 0u;
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, ClInitOnSget) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-      { 1u, 2u, 1u, false, kDexMemAccessObject },
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(Instruction::SGET_OBJECT, 0u, 0u),
-      DEF_AGET(Instruction::AGET, 1u, 0u, 100u),
-      DEF_SGET(Instruction::SGET_OBJECT, 2u, 1u),
-      DEF_SGET(Instruction::SGET_OBJECT, 3u, 0u),
-      DEF_AGET(Instruction::AGET, 4u, 3u, 100u),
-  };
-
-  PrepareSFields(sfields);
-  MakeSFieldUninitialized(1u);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 5u);
-  EXPECT_NE(value_names_[0], value_names_[3]);
-}
-
-TEST_F(LocalValueNumberingTest, DivZeroCheck) {
-  static const MIRDef mirs[] = {
-      DEF_DIV_REM(Instruction::DIV_INT, 1u, 10u, 20u),
-      DEF_DIV_REM(Instruction::DIV_INT, 2u, 20u, 20u),
-      DEF_DIV_REM(Instruction::DIV_INT_2ADDR, 3u, 10u, 1u),
-      DEF_DIV_REM(Instruction::REM_INT, 4u, 30u, 20u),
-      DEF_DIV_REM_WIDE(Instruction::REM_LONG, 5u, 12u, 14u),
-      DEF_DIV_REM_WIDE(Instruction::DIV_LONG_2ADDR, 7u, 16u, 14u),
-  };
-
-  static const bool expected_ignore_div_zero_check[] = {
-      false, true, false, true, false, true,
-  };
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 5, 7, 12, 14, 16 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformLVN();
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected = expected_ignore_div_zero_check[i] ? MIR_IGNORE_DIV_ZERO_CHECK : 0u;
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-static constexpr int64_t shift_minus_1(size_t by) {
-  return static_cast<int64_t>(static_cast<uint64_t>(INT64_C(-1)) << by);
-}
-
-TEST_F(LocalValueNumberingTest, ConstWide) {
-  static const MIRDef mirs[] = {
-      // Core reg constants.
-      DEF_CONST(Instruction::CONST_WIDE_16, 0u, 0),
-      DEF_CONST(Instruction::CONST_WIDE_16, 2u, 1),
-      DEF_CONST(Instruction::CONST_WIDE_16, 4u, -1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 6u, 1 << 16),
-      DEF_CONST(Instruction::CONST_WIDE_32, 8u, shift_minus_1(16)),
-      DEF_CONST(Instruction::CONST_WIDE_32, 10u, (1 << 16) + 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 12u, (1 << 16) - 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 14u, -(1 << 16) + 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 16u, -(1 << 16) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 18u, INT64_C(1) << 32),
-      DEF_CONST(Instruction::CONST_WIDE, 20u, shift_minus_1(32)),
-      DEF_CONST(Instruction::CONST_WIDE, 22u, (INT64_C(1) << 32) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 24u, (INT64_C(1) << 32) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 26u, shift_minus_1(32) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 28u, shift_minus_1(32) - 1),
-      DEF_CONST(Instruction::CONST_WIDE_HIGH16, 30u, 1),       // Effectively 1 << 48.
-      DEF_CONST(Instruction::CONST_WIDE_HIGH16, 32u, 0xffff),  // Effectively -1 << 48.
-      DEF_CONST(Instruction::CONST_WIDE, 34u, (INT64_C(1) << 48) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 36u, (INT64_C(1) << 48) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 38u, shift_minus_1(48) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 40u, shift_minus_1(48) - 1),
-      // FP reg constants.
-      DEF_CONST(Instruction::CONST_WIDE_16, 42u, 0),
-      DEF_CONST(Instruction::CONST_WIDE_16, 44u, 1),
-      DEF_CONST(Instruction::CONST_WIDE_16, 46u, -1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 48u, 1 << 16),
-      DEF_CONST(Instruction::CONST_WIDE_32, 50u, shift_minus_1(16)),
-      DEF_CONST(Instruction::CONST_WIDE_32, 52u, (1 << 16) + 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 54u, (1 << 16) - 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 56u, -(1 << 16) + 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 58u, -(1 << 16) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 60u, INT64_C(1) << 32),
-      DEF_CONST(Instruction::CONST_WIDE, 62u, shift_minus_1(32)),
-      DEF_CONST(Instruction::CONST_WIDE, 64u, (INT64_C(1) << 32) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 66u, (INT64_C(1) << 32) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 68u, shift_minus_1(32) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 70u, shift_minus_1(32) - 1),
-      DEF_CONST(Instruction::CONST_WIDE_HIGH16, 72u, 1),       // Effectively 1 << 48.
-      DEF_CONST(Instruction::CONST_WIDE_HIGH16, 74u, 0xffff),  // Effectively -1 << 48.
-      DEF_CONST(Instruction::CONST_WIDE, 76u, (INT64_C(1) << 48) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 78u, (INT64_C(1) << 48) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 80u, shift_minus_1(48) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 82u, shift_minus_1(48) - 1),
-  };
-
-  PrepareMIRs(mirs);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    const int32_t wide_sregs[] = { mirs_[i].ssa_rep->defs[0] };
-    MarkAsWideSRegs(wide_sregs);
-  }
-  for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs); ++i) {
-    cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
-  }
-  PerformLVN();
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    for (size_t j = i + 1u; j != mir_count_; ++j) {
-      EXPECT_NE(value_names_[i], value_names_[j]) << i << " " << j;
-    }
-  }
-}
-
-TEST_F(LocalValueNumberingTest, Const) {
-  static const MIRDef mirs[] = {
-      // Core reg constants.
-      DEF_CONST(Instruction::CONST_4, 0u, 0),
-      DEF_CONST(Instruction::CONST_4, 1u, 1),
-      DEF_CONST(Instruction::CONST_4, 2u, -1),
-      DEF_CONST(Instruction::CONST_16, 3u, 1 << 4),
-      DEF_CONST(Instruction::CONST_16, 4u, shift_minus_1(4)),
-      DEF_CONST(Instruction::CONST_16, 5u, (1 << 4) + 1),
-      DEF_CONST(Instruction::CONST_16, 6u, (1 << 4) - 1),
-      DEF_CONST(Instruction::CONST_16, 7u, -(1 << 4) + 1),
-      DEF_CONST(Instruction::CONST_16, 8u, -(1 << 4) - 1),
-      DEF_CONST(Instruction::CONST_HIGH16, 9u, 1),       // Effectively 1 << 16.
-      DEF_CONST(Instruction::CONST_HIGH16, 10u, 0xffff),  // Effectively -1 << 16.
-      DEF_CONST(Instruction::CONST, 11u, (1 << 16) + 1),
-      DEF_CONST(Instruction::CONST, 12u, (1 << 16) - 1),
-      DEF_CONST(Instruction::CONST, 13u, shift_minus_1(16) + 1),
-      DEF_CONST(Instruction::CONST, 14u, shift_minus_1(16) - 1),
-      // FP reg constants.
-      DEF_CONST(Instruction::CONST_4, 15u, 0),
-      DEF_CONST(Instruction::CONST_4, 16u, 1),
-      DEF_CONST(Instruction::CONST_4, 17u, -1),
-      DEF_CONST(Instruction::CONST_16, 18u, 1 << 4),
-      DEF_CONST(Instruction::CONST_16, 19u, shift_minus_1(4)),
-      DEF_CONST(Instruction::CONST_16, 20u, (1 << 4) + 1),
-      DEF_CONST(Instruction::CONST_16, 21u, (1 << 4) - 1),
-      DEF_CONST(Instruction::CONST_16, 22u, -(1 << 4) + 1),
-      DEF_CONST(Instruction::CONST_16, 23u, -(1 << 4) - 1),
-      DEF_CONST(Instruction::CONST_HIGH16, 24u, 1),       // Effectively 1 << 16.
-      DEF_CONST(Instruction::CONST_HIGH16, 25u, 0xffff),  // Effectively -1 << 16.
-      DEF_CONST(Instruction::CONST, 26u, (1 << 16) + 1),
-      DEF_CONST(Instruction::CONST, 27u, (1 << 16) - 1),
-      DEF_CONST(Instruction::CONST, 28u, shift_minus_1(16) + 1),
-      DEF_CONST(Instruction::CONST, 29u, shift_minus_1(16) - 1),
-      // null reference constant.
-      DEF_CONST(Instruction::CONST_4, 30u, 0),
-  };
-
-  PrepareMIRs(mirs);
-  static_assert((arraysize(mirs) & 1) != 0, "missing null or unmatched fp/core");
-  cu_.mir_graph->reg_location_[arraysize(mirs) - 1].ref = true;
-  for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs) - 1; ++i) {
-    cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
-  }
-  PerformLVN();
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    for (size_t j = i + 1u; j != mir_count_; ++j) {
-      EXPECT_NE(value_names_[i], value_names_[j]) << i << " " << j;
-    }
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
deleted file mode 100644
index 18ce563..0000000
--- a/compiler/dex/mir_analysis.cc
+++ /dev/null
@@ -1,1433 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <memory>
-
-#include "base/logging.h"
-#include "base/scoped_arena_containers.h"
-#include "dataflow_iterator-inl.h"
-#include "compiler_ir.h"
-#include "dex_flags.h"
-#include "dex_instruction-inl.h"
-#include "dex/mir_field_info.h"
-#include "dex/verified_method.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "driver/dex_compilation_unit.h"
-#include "scoped_thread_state_change.h"
-#include "utils.h"
-
-namespace art {
-
-enum InstructionAnalysisAttributeOps : uint8_t {
-  kUninterestingOp = 0,
-  kArithmeticOp,
-  kFpOp,
-  kSingleOp,
-  kDoubleOp,
-  kIntOp,
-  kLongOp,
-  kBranchOp,
-  kInvokeOp,
-  kArrayOp,
-  kHeavyweightOp,
-  kSimpleConstOp,
-  kMoveOp,
-  kSwitch
-};
-
-enum InstructionAnalysisAttributeMasks : uint16_t {
-  kAnNone = 1 << kUninterestingOp,
-  kAnMath = 1 << kArithmeticOp,
-  kAnFp = 1 << kFpOp,
-  kAnLong = 1 << kLongOp,
-  kAnInt = 1 << kIntOp,
-  kAnSingle = 1 << kSingleOp,
-  kAnDouble = 1 << kDoubleOp,
-  kAnFloatMath = 1 << kFpOp,
-  kAnBranch = 1 << kBranchOp,
-  kAnInvoke = 1 << kInvokeOp,
-  kAnArrayOp = 1 << kArrayOp,
-  kAnHeavyWeight = 1 << kHeavyweightOp,
-  kAnSimpleConst = 1 << kSimpleConstOp,
-  kAnMove = 1 << kMoveOp,
-  kAnSwitch = 1 << kSwitch,
-  kAnComputational = kAnMath | kAnArrayOp | kAnMove | kAnSimpleConst,
-};
-
-// Instruction characteristics used to statically identify computation-intensive methods.
-static const uint16_t kAnalysisAttributes[kMirOpLast] = {
-  // 00 NOP
-  kAnNone,
-
-  // 01 MOVE vA, vB
-  kAnMove,
-
-  // 02 MOVE_FROM16 vAA, vBBBB
-  kAnMove,
-
-  // 03 MOVE_16 vAAAA, vBBBB
-  kAnMove,
-
-  // 04 MOVE_WIDE vA, vB
-  kAnMove,
-
-  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
-  kAnMove,
-
-  // 06 MOVE_WIDE_16 vAAAA, vBBBB
-  kAnMove,
-
-  // 07 MOVE_OBJECT vA, vB
-  kAnMove,
-
-  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
-  kAnMove,
-
-  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
-  kAnMove,
-
-  // 0A MOVE_RESULT vAA
-  kAnMove,
-
-  // 0B MOVE_RESULT_WIDE vAA
-  kAnMove,
-
-  // 0C MOVE_RESULT_OBJECT vAA
-  kAnMove,
-
-  // 0D MOVE_EXCEPTION vAA
-  kAnMove,
-
-  // 0E RETURN_VOID
-  kAnBranch,
-
-  // 0F RETURN vAA
-  kAnBranch,
-
-  // 10 RETURN_WIDE vAA
-  kAnBranch,
-
-  // 11 RETURN_OBJECT vAA
-  kAnBranch,
-
-  // 12 CONST_4 vA, #+B
-  kAnSimpleConst,
-
-  // 13 CONST_16 vAA, #+BBBB
-  kAnSimpleConst,
-
-  // 14 CONST vAA, #+BBBBBBBB
-  kAnSimpleConst,
-
-  // 15 CONST_HIGH16 VAA, #+BBBB0000
-  kAnSimpleConst,
-
-  // 16 CONST_WIDE_16 vAA, #+BBBB
-  kAnSimpleConst,
-
-  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
-  kAnSimpleConst,
-
-  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
-  kAnSimpleConst,
-
-  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
-  kAnSimpleConst,
-
-  // 1A CONST_STRING vAA, string@BBBB
-  kAnNone,
-
-  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
-  kAnNone,
-
-  // 1C CONST_CLASS vAA, type@BBBB
-  kAnNone,
-
-  // 1D MONITOR_ENTER vAA
-  kAnNone,
-
-  // 1E MONITOR_EXIT vAA
-  kAnNone,
-
-  // 1F CHK_CAST vAA, type@BBBB
-  kAnNone,
-
-  // 20 INSTANCE_OF vA, vB, type@CCCC
-  kAnNone,
-
-  // 21 ARRAY_LENGTH vA, vB
-  kAnArrayOp,
-
-  // 22 NEW_INSTANCE vAA, type@BBBB
-  kAnHeavyWeight,
-
-  // 23 NEW_ARRAY vA, vB, type@CCCC
-  kAnHeavyWeight,
-
-  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
-  kAnHeavyWeight,
-
-  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
-  kAnHeavyWeight,
-
-  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
-  kAnNone,
-
-  // 27 THROW vAA
-  kAnHeavyWeight | kAnBranch,
-
-  // 28 GOTO
-  kAnBranch,
-
-  // 29 GOTO_16
-  kAnBranch,
-
-  // 2A GOTO_32
-  kAnBranch,
-
-  // 2B PACKED_SWITCH vAA, +BBBBBBBB
-  kAnSwitch,
-
-  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
-  kAnSwitch,
-
-  // 2D CMPL_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // 2E CMPG_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // 2F CMPL_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // 30 CMPG_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // 31 CMP_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // 32 IF_EQ vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 33 IF_NE vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 34 IF_LT vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 35 IF_GE vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 36 IF_GT vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 37 IF_LE vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 38 IF_EQZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 39 IF_NEZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 3A IF_LTZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 3B IF_GEZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 3C IF_GTZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 3D IF_LEZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 3E UNUSED_3E
-  kAnNone,
-
-  // 3F UNUSED_3F
-  kAnNone,
-
-  // 40 UNUSED_40
-  kAnNone,
-
-  // 41 UNUSED_41
-  kAnNone,
-
-  // 42 UNUSED_42
-  kAnNone,
-
-  // 43 UNUSED_43
-  kAnNone,
-
-  // 44 AGET vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 45 AGET_WIDE vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 46 AGET_OBJECT vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 47 AGET_BOOLEAN vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 48 AGET_BYTE vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 49 AGET_CHAR vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4A AGET_SHORT vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4B APUT vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4C APUT_WIDE vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4D APUT_OBJECT vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4E APUT_BOOLEAN vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4F APUT_BYTE vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 50 APUT_CHAR vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 51 APUT_SHORT vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 52 IGET vA, vB, field@CCCC
-  kAnNone,
-
-  // 53 IGET_WIDE vA, vB, field@CCCC
-  kAnNone,
-
-  // 54 IGET_OBJECT vA, vB, field@CCCC
-  kAnNone,
-
-  // 55 IGET_BOOLEAN vA, vB, field@CCCC
-  kAnNone,
-
-  // 56 IGET_BYTE vA, vB, field@CCCC
-  kAnNone,
-
-  // 57 IGET_CHAR vA, vB, field@CCCC
-  kAnNone,
-
-  // 58 IGET_SHORT vA, vB, field@CCCC
-  kAnNone,
-
-  // 59 IPUT vA, vB, field@CCCC
-  kAnNone,
-
-  // 5A IPUT_WIDE vA, vB, field@CCCC
-  kAnNone,
-
-  // 5B IPUT_OBJECT vA, vB, field@CCCC
-  kAnNone,
-
-  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
-  kAnNone,
-
-  // 5D IPUT_BYTE vA, vB, field@CCCC
-  kAnNone,
-
-  // 5E IPUT_CHAR vA, vB, field@CCCC
-  kAnNone,
-
-  // 5F IPUT_SHORT vA, vB, field@CCCC
-  kAnNone,
-
-  // 60 SGET vAA, field@BBBB
-  kAnNone,
-
-  // 61 SGET_WIDE vAA, field@BBBB
-  kAnNone,
-
-  // 62 SGET_OBJECT vAA, field@BBBB
-  kAnNone,
-
-  // 63 SGET_BOOLEAN vAA, field@BBBB
-  kAnNone,
-
-  // 64 SGET_BYTE vAA, field@BBBB
-  kAnNone,
-
-  // 65 SGET_CHAR vAA, field@BBBB
-  kAnNone,
-
-  // 66 SGET_SHORT vAA, field@BBBB
-  kAnNone,
-
-  // 67 SPUT vAA, field@BBBB
-  kAnNone,
-
-  // 68 SPUT_WIDE vAA, field@BBBB
-  kAnNone,
-
-  // 69 SPUT_OBJECT vAA, field@BBBB
-  kAnNone,
-
-  // 6A SPUT_BOOLEAN vAA, field@BBBB
-  kAnNone,
-
-  // 6B SPUT_BYTE vAA, field@BBBB
-  kAnNone,
-
-  // 6C SPUT_CHAR vAA, field@BBBB
-  kAnNone,
-
-  // 6D SPUT_SHORT vAA, field@BBBB
-  kAnNone,
-
-  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 73 RETURN_VOID_NO_BARRIER
-  kAnBranch,
-
-  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 79 UNUSED_79
-  kAnNone,
-
-  // 7A UNUSED_7A
-  kAnNone,
-
-  // 7B NEG_INT vA, vB
-  kAnMath | kAnInt,
-
-  // 7C NOT_INT vA, vB
-  kAnMath | kAnInt,
-
-  // 7D NEG_LONG vA, vB
-  kAnMath | kAnLong,
-
-  // 7E NOT_LONG vA, vB
-  kAnMath | kAnLong,
-
-  // 7F NEG_FLOAT vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // 80 NEG_DOUBLE vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // 81 INT_TO_LONG vA, vB
-  kAnMath | kAnInt | kAnLong,
-
-  // 82 INT_TO_FLOAT vA, vB
-  kAnMath | kAnFp | kAnInt | kAnSingle,
-
-  // 83 INT_TO_DOUBLE vA, vB
-  kAnMath | kAnFp | kAnInt | kAnDouble,
-
-  // 84 LONG_TO_INT vA, vB
-  kAnMath | kAnInt | kAnLong,
-
-  // 85 LONG_TO_FLOAT vA, vB
-  kAnMath | kAnFp | kAnLong | kAnSingle,
-
-  // 86 LONG_TO_DOUBLE vA, vB
-  kAnMath | kAnFp | kAnLong | kAnDouble,
-
-  // 87 FLOAT_TO_INT vA, vB
-  kAnMath | kAnFp | kAnInt | kAnSingle,
-
-  // 88 FLOAT_TO_LONG vA, vB
-  kAnMath | kAnFp | kAnLong | kAnSingle,
-
-  // 89 FLOAT_TO_DOUBLE vA, vB
-  kAnMath | kAnFp | kAnSingle | kAnDouble,
-
-  // 8A DOUBLE_TO_INT vA, vB
-  kAnMath | kAnFp | kAnInt | kAnDouble,
-
-  // 8B DOUBLE_TO_LONG vA, vB
-  kAnMath | kAnFp | kAnLong | kAnDouble,
-
-  // 8C DOUBLE_TO_FLOAT vA, vB
-  kAnMath | kAnFp | kAnSingle | kAnDouble,
-
-  // 8D INT_TO_BYTE vA, vB
-  kAnMath | kAnInt,
-
-  // 8E INT_TO_CHAR vA, vB
-  kAnMath | kAnInt,
-
-  // 8F INT_TO_SHORT vA, vB
-  kAnMath | kAnInt,
-
-  // 90 ADD_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 91 SUB_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 92 MUL_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 93 DIV_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 94 REM_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 95 AND_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 96 OR_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 97 XOR_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 98 SHL_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 99 SHR_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 9A USHR_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 9B ADD_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // 9C SUB_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // 9D MUL_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // 9E DIV_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // 9F REM_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A0 AND_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A1 OR_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A2 XOR_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A3 SHL_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A4 SHR_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A5 USHR_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A6 ADD_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // A7 SUB_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // A8 MUL_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // A9 DIV_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // AA REM_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // AB ADD_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // AC SUB_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // AD MUL_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // AE DIV_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // AF REM_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // B0 ADD_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B1 SUB_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B2 MUL_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B3 DIV_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B4 REM_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B5 AND_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B6 OR_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B7 XOR_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B8 SHL_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B9 SHR_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // BA USHR_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // BB ADD_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // BC SUB_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // BD MUL_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // BE DIV_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // BF REM_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C0 AND_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C1 OR_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C2 XOR_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C3 SHL_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C4 SHR_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C5 USHR_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C6 ADD_FLOAT_2ADDR vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // C7 SUB_FLOAT_2ADDR vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // C8 MUL_FLOAT_2ADDR vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // C9 DIV_FLOAT_2ADDR vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // CA REM_FLOAT_2ADDR vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // CB ADD_DOUBLE_2ADDR vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // CC SUB_DOUBLE_2ADDR vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // CD MUL_DOUBLE_2ADDR vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // CE DIV_DOUBLE_2ADDR vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // CF REM_DOUBLE_2ADDR vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D1 RSUB_INT vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D4 REM_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D5 AND_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D6 OR_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DA MUL_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DB DIV_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DC REM_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DD AND_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DE OR_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DF XOR_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // E3 IGET_QUICK
-  kAnNone,
-
-  // E4 IGET_WIDE_QUICK
-  kAnNone,
-
-  // E5 IGET_OBJECT_QUICK
-  kAnNone,
-
-  // E6 IPUT_QUICK
-  kAnNone,
-
-  // E7 IPUT_WIDE_QUICK
-  kAnNone,
-
-  // E8 IPUT_OBJECT_QUICK
-  kAnNone,
-
-  // E9 INVOKE_VIRTUAL_QUICK
-  kAnInvoke | kAnHeavyWeight,
-
-  // EA INVOKE_VIRTUAL_RANGE_QUICK
-  kAnInvoke | kAnHeavyWeight,
-
-  // EB IPUT_BOOLEAN_QUICK
-  kAnNone,
-
-  // EC IPUT_BYTE_QUICK
-  kAnNone,
-
-  // ED IPUT_CHAR_QUICK
-  kAnNone,
-
-  // EE IPUT_SHORT_QUICK
-  kAnNone,
-
-  // EF IGET_BOOLEAN_QUICK
-  kAnNone,
-
-  // F0 IGET_BYTE_QUICK
-  kAnNone,
-
-  // F1 IGET_CHAR_QUICK
-  kAnNone,
-
-  // F2 IGET_SHORT_QUICK
-  kAnNone,
-
-  // F3 UNUSED_F3
-  kAnNone,
-
-  // F4 UNUSED_F4
-  kAnNone,
-
-  // F5 UNUSED_F5
-  kAnNone,
-
-  // F6 UNUSED_F6
-  kAnNone,
-
-  // F7 UNUSED_F7
-  kAnNone,
-
-  // F8 UNUSED_F8
-  kAnNone,
-
-  // F9 UNUSED_F9
-  kAnNone,
-
-  // FA UNUSED_FA
-  kAnNone,
-
-  // FB UNUSED_FB
-  kAnNone,
-
-  // FC UNUSED_FC
-  kAnNone,
-
-  // FD UNUSED_FD
-  kAnNone,
-
-  // FE UNUSED_FE
-  kAnNone,
-
-  // FF UNUSED_FF
-  kAnNone,
-
-  // Beginning of extended MIR opcodes
-  // 100 MIR_PHI
-  kAnNone,
-
-  // 101 MIR_COPY
-  kAnNone,
-
-  // 102 MIR_FUSED_CMPL_FLOAT
-  kAnNone,
-
-  // 103 MIR_FUSED_CMPG_FLOAT
-  kAnNone,
-
-  // 104 MIR_FUSED_CMPL_DOUBLE
-  kAnNone,
-
-  // 105 MIR_FUSED_CMPG_DOUBLE
-  kAnNone,
-
-  // 106 MIR_FUSED_CMP_LONG
-  kAnNone,
-
-  // 107 MIR_NOP
-  kAnNone,
-
-  // 108 MIR_NULL_CHECK
-  kAnNone,
-
-  // 109 MIR_RANGE_CHECK
-  kAnNone,
-
-  // 10A MIR_DIV_ZERO_CHECK
-  kAnNone,
-
-  // 10B MIR_CHECK
-  kAnNone,
-
-  // 10C MIR_CHECKPART2
-  kAnNone,
-
-  // 10D MIR_SELECT
-  kAnNone,
-
-  // 10E MirOpConstVector
-  kAnNone,
-
-  // 10F MirOpMoveVector
-  kAnNone,
-
-  // 110 MirOpPackedMultiply
-  kAnNone,
-
-  // 111 MirOpPackedAddition
-  kAnNone,
-
-  // 112 MirOpPackedSubtract
-  kAnNone,
-
-  // 113 MirOpPackedShiftLeft
-  kAnNone,
-
-  // 114 MirOpPackedSignedShiftRight
-  kAnNone,
-
-  // 115 MirOpPackedUnsignedShiftRight
-  kAnNone,
-
-  // 116 MirOpPackedAnd
-  kAnNone,
-
-  // 117 MirOpPackedOr
-  kAnNone,
-
-  // 118 MirOpPackedXor
-  kAnNone,
-
-  // 119 MirOpPackedAddReduce
-  kAnNone,
-
-  // 11A MirOpPackedReduce
-  kAnNone,
-
-  // 11B MirOpPackedSet
-  kAnNone,
-
-  // 11C MirOpReserveVectorRegisters
-  kAnNone,
-
-  // 11D MirOpReturnVectorRegisters
-  kAnNone,
-
-  // 11E MirOpMemBarrier
-  kAnNone,
-
-  // 11F MirOpPackedArrayGet
-  kAnArrayOp,
-
-  // 120 MirOpPackedArrayPut
-  kAnArrayOp,
-};
-
-struct MethodStats {
-  int dex_instructions;
-  int math_ops;
-  int fp_ops;
-  int array_ops;
-  int branch_ops;
-  int heavyweight_ops;
-  bool has_computational_loop;
-  bool has_switch;
-  float math_ratio;
-  float fp_ratio;
-  float array_ratio;
-  float branch_ratio;
-  float heavyweight_ratio;
-};
-
-void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
-  if (bb->visited || (bb->block_type != kDalvikByteCode)) {
-    return;
-  }
-  bool computational_block = true;
-  bool has_math = false;
-  /*
-   * For the purposes of this scan, we want to treat the set of basic blocks broken
-   * by an exception edge as a single basic block.  We'll scan forward along the fallthrough
-   * edges until we reach an explicit branch or return.
-   */
-  BasicBlock* ending_bb = bb;
-  if (ending_bb->last_mir_insn != nullptr) {
-    uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
-    while ((ending_flags & kAnBranch) == 0) {
-      ending_bb = GetBasicBlock(ending_bb->fall_through);
-      ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
-    }
-  }
-  /*
-   * Ideally, we'd weight the operations by loop nesting level, but to do so we'd
-   * first need to do some expensive loop detection - and the point of this is to make
-   * an informed guess before investing in computation.  However, we can cheaply detect
-   * many simple loop forms without having to do full dataflow analysis.
-   */
-  int loop_scale_factor = 1;
-  // Simple for and while loops
-  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->fall_through == NullBasicBlockId)) {
-    if ((GetBasicBlock(ending_bb->taken)->taken == bb->id) ||
-        (GetBasicBlock(ending_bb->taken)->fall_through == bb->id)) {
-      loop_scale_factor = 25;
-    }
-  }
-  // Simple do-while loop
-  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->taken == bb->id)) {
-    loop_scale_factor = 25;
-  }
-
-  BasicBlock* tbb = bb;
-  bool done = false;
-  while (!done) {
-    tbb->visited = true;
-    for (MIR* mir = tbb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
-        // Skip any MIR pseudo-op.
-        continue;
-      }
-      uint16_t flags = kAnalysisAttributes[mir->dalvikInsn.opcode];
-      stats->dex_instructions += loop_scale_factor;
-      if ((flags & kAnBranch) == 0) {
-        computational_block &= ((flags & kAnComputational) != 0);
-      } else {
-        stats->branch_ops += loop_scale_factor;
-      }
-      if ((flags & kAnMath) != 0) {
-        stats->math_ops += loop_scale_factor;
-        has_math = true;
-      }
-      if ((flags & kAnFp) != 0) {
-        stats->fp_ops += loop_scale_factor;
-      }
-      if ((flags & kAnArrayOp) != 0) {
-        stats->array_ops += loop_scale_factor;
-      }
-      if ((flags & kAnHeavyWeight) != 0) {
-        stats->heavyweight_ops += loop_scale_factor;
-      }
-      if ((flags & kAnSwitch) != 0) {
-        stats->has_switch = true;
-      }
-    }
-    if (tbb == ending_bb) {
-      done = true;
-    } else {
-      tbb = GetBasicBlock(tbb->fall_through);
-    }
-  }
-  if (has_math && computational_block && (loop_scale_factor > 1)) {
-    stats->has_computational_loop = true;
-  }
-}
-
-bool MIRGraph::ComputeSkipCompilation(MethodStats* stats, bool skip_default,
-                                      std::string* skip_message) {
-  float count = stats->dex_instructions;
-  stats->math_ratio = stats->math_ops / count;
-  stats->fp_ratio = stats->fp_ops / count;
-  stats->branch_ratio = stats->branch_ops / count;
-  stats->array_ratio = stats->array_ops / count;
-  stats->heavyweight_ratio = stats->heavyweight_ops / count;
-
-  if (cu_->enable_debug & (1 << kDebugShowFilterStats)) {
-    LOG(INFO) << "STATS " << stats->dex_instructions << ", math:"
-              << stats->math_ratio << ", fp:"
-              << stats->fp_ratio << ", br:"
-              << stats->branch_ratio << ", hw:"
-              << stats->heavyweight_ratio << ", arr:"
-              << stats->array_ratio << ", hot:"
-              << stats->has_computational_loop << ", "
-              << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-  }
-
-  // Computation intensive?
-  if (stats->has_computational_loop && (stats->heavyweight_ratio < 0.04)) {
-    return false;
-  }
-
-  // Complex, logic-intensive?
-  if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
-      stats->branch_ratio > 0.3) {
-    return false;
-  }
-
-  // Significant floating point?
-  if (stats->fp_ratio > 0.05) {
-    return false;
-  }
-
-  // Significant generic math?
-  if (stats->math_ratio > 0.3) {
-    return false;
-  }
-
-  // If array-intensive, compiling is probably worthwhile.
-  if (stats->array_ratio > 0.1) {
-    return false;
-  }
-
-  // Switch operations benefit greatly from compilation, so go ahead and spend the cycles.
-  if (stats->has_switch) {
-    return false;
-  }
-
-  // If significant in size and high proportion of expensive operations, skip.
-  if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
-      (stats->heavyweight_ratio > 0.3)) {
-    *skip_message = "Is a small method with heavyweight ratio " +
-                    std::to_string(stats->heavyweight_ratio);
-    return true;
-  }
-
-  return skip_default;
-}
-
- /*
-  * Will eventually want this to be a bit more sophisticated and happen at verification time.
-  */
-bool MIRGraph::SkipCompilation(std::string* skip_message) {
-  const CompilerOptions& compiler_options = cu_->compiler_driver->GetCompilerOptions();
-  CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
-  if (compiler_filter == CompilerOptions::kEverything) {
-    return false;
-  }
-
-  // Contains a pattern we don't want to compile?
-  if (PuntToInterpreter()) {
-    *skip_message = "Punt to interpreter set";
-    return true;
-  }
-
-  DCHECK(compiler_options.IsCompilationEnabled());
-
-  // Set up compilation cutoffs based on current filter mode.
-  size_t small_cutoff;
-  size_t default_cutoff;
-  switch (compiler_filter) {
-    case CompilerOptions::kBalanced:
-      small_cutoff = compiler_options.GetSmallMethodThreshold();
-      default_cutoff = compiler_options.GetLargeMethodThreshold();
-      break;
-    case CompilerOptions::kSpace:
-      small_cutoff = compiler_options.GetTinyMethodThreshold();
-      default_cutoff = compiler_options.GetSmallMethodThreshold();
-      break;
-    case CompilerOptions::kSpeed:
-    case CompilerOptions::kTime:
-      small_cutoff = compiler_options.GetHugeMethodThreshold();
-      default_cutoff = compiler_options.GetHugeMethodThreshold();
-      break;
-    default:
-      LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
-      UNREACHABLE();
-  }
-
-  // If size < cutoff, assume we'll compile - but allow removal.
-  bool skip_compilation = (GetNumDalvikInsns() >= default_cutoff);
-  if (skip_compilation) {
-    *skip_message = "#Insns >= default_cutoff: " + std::to_string(GetNumDalvikInsns());
-  }
-
-  /*
-   * Filter 1: Huge methods are likely to be machine generated, but some aren't.
-   * If huge, assume we won't compile, but allow futher analysis to turn it back on.
-   */
-  if (compiler_options.IsHugeMethod(GetNumDalvikInsns())) {
-    skip_compilation = true;
-    *skip_message = "Huge method: " + std::to_string(GetNumDalvikInsns());
-    // If we're got a huge number of basic blocks, don't bother with further analysis.
-    if (static_cast<size_t>(GetNumBlocks()) > (compiler_options.GetHugeMethodThreshold() / 2)) {
-      return true;
-    }
-  } else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
-    /* If it's large and contains no branches, it's likely to be machine generated initialization */
-      (GetBranchCount() == 0)) {
-    *skip_message = "Large method with no branches";
-    return true;
-  } else if (compiler_filter == CompilerOptions::kSpeed) {
-    // If not huge, compile.
-    return false;
-  }
-
-  // Filter 2: Skip class initializers.
-  if (((cu_->access_flags & kAccConstructor) != 0) && ((cu_->access_flags & kAccStatic) != 0)) {
-    *skip_message = "Class initializer";
-    return true;
-  }
-
-  // Filter 3: if this method is a special pattern, go ahead and emit the canned pattern.
-  if (cu_->compiler_driver->GetMethodInlinerMap() != nullptr &&
-      cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
-          ->IsSpecial(cu_->method_idx)) {
-    return false;
-  }
-
-  // Filter 4: if small, just compile.
-  if (GetNumDalvikInsns() < small_cutoff) {
-    return false;
-  }
-
-  // Analyze graph for:
-  //  o floating point computation
-  //  o basic blocks contained in loop with heavy arithmetic.
-  //  o proportion of conditional branches.
-
-  MethodStats stats;
-  memset(&stats, 0, sizeof(stats));
-
-  ClearAllVisitedFlags();
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    AnalyzeBlock(bb, &stats);
-  }
-
-  return ComputeSkipCompilation(&stats, skip_compilation, skip_message);
-}
-
-void MIRGraph::DoCacheFieldLoweringInfo() {
-  static constexpr uint32_t kFieldIndexFlagQuickened = 0x80000000;
-  // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
-  const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 2u;
-  ScopedArenaAllocator allocator(&cu_->arena_stack);
-  auto* field_idxs = allocator.AllocArray<uint32_t>(max_refs, kArenaAllocMisc);
-  DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(
-      max_refs, kArenaAllocMisc);
-  // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
-  size_t ifield_pos = 0u;
-  size_t sfield_pos = max_refs;
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    if (bb->block_type != kDalvikByteCode) {
-      continue;
-    }
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      // Get field index and try to find it among existing indexes. If found, it's usually among
-      // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
-      // is a linear search, it actually performs much better than map based approach.
-      const bool is_iget_or_iput = IsInstructionIGetOrIPut(mir->dalvikInsn.opcode);
-      const bool is_iget_or_iput_quick = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode);
-      if (is_iget_or_iput || is_iget_or_iput_quick) {
-        uint32_t field_idx;
-        DexMemAccessType access_type;
-        if (is_iget_or_iput) {
-          field_idx = mir->dalvikInsn.vC;
-          access_type = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
-        } else {
-          DCHECK(is_iget_or_iput_quick);
-          // Set kFieldIndexFlagQuickened so that we don't deduplicate against non quickened field
-          // indexes.
-          field_idx = mir->offset | kFieldIndexFlagQuickened;
-          access_type = IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode);
-        }
-        size_t i = ifield_pos;
-        while (i != 0u && field_idxs[i - 1] != field_idx) {
-          --i;
-        }
-        if (i != 0u) {
-          mir->meta.ifield_lowering_info = i - 1;
-          DCHECK_EQ(field_types[i - 1], access_type);
-        } else {
-          mir->meta.ifield_lowering_info = ifield_pos;
-          field_idxs[ifield_pos] = field_idx;
-          field_types[ifield_pos] = access_type;
-          ++ifield_pos;
-        }
-      } else if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
-        auto field_idx = mir->dalvikInsn.vB;
-        size_t i = sfield_pos;
-        while (i != max_refs && field_idxs[i] != field_idx) {
-          ++i;
-        }
-        if (i != max_refs) {
-          mir->meta.sfield_lowering_info = max_refs - i - 1u;
-          DCHECK_EQ(field_types[i], SGetOrSPutMemAccessType(mir->dalvikInsn.opcode));
-        } else {
-          mir->meta.sfield_lowering_info = max_refs - sfield_pos;
-          --sfield_pos;
-          field_idxs[sfield_pos] = field_idx;
-          field_types[sfield_pos] = SGetOrSPutMemAccessType(mir->dalvikInsn.opcode);
-        }
-      }
-      DCHECK_LE(ifield_pos, sfield_pos);
-    }
-  }
-
-  if (ifield_pos != 0u) {
-    // Resolve instance field infos.
-    DCHECK_EQ(ifield_lowering_infos_.size(), 0u);
-    ifield_lowering_infos_.reserve(ifield_pos);
-    for (size_t pos = 0u; pos != ifield_pos; ++pos) {
-      const uint32_t field_idx = field_idxs[pos];
-      const bool is_quickened = (field_idx & kFieldIndexFlagQuickened) != 0;
-      const uint32_t masked_field_idx = field_idx & ~kFieldIndexFlagQuickened;
-      CHECK_LT(masked_field_idx, 1u << 16);
-      ifield_lowering_infos_.push_back(
-          MirIFieldLoweringInfo(masked_field_idx, field_types[pos], is_quickened));
-    }
-    ScopedObjectAccess soa(Thread::Current());
-    MirIFieldLoweringInfo::Resolve(soa,
-                                   cu_->compiler_driver,
-                                   GetCurrentDexCompilationUnit(),
-                                   ifield_lowering_infos_.data(),
-                                   ifield_pos);
-  }
-
-  if (sfield_pos != max_refs) {
-    // Resolve static field infos.
-    DCHECK_EQ(sfield_lowering_infos_.size(), 0u);
-    sfield_lowering_infos_.reserve(max_refs - sfield_pos);
-    for (size_t pos = max_refs; pos != sfield_pos;) {
-      --pos;
-      sfield_lowering_infos_.push_back(MirSFieldLoweringInfo(field_idxs[pos], field_types[pos]));
-    }
-    MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
-                                   sfield_lowering_infos_.data(), max_refs - sfield_pos);
-  }
-}
-
-void MIRGraph::DoCacheMethodLoweringInfo() {
-  static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
-  static constexpr uint32_t kMethodIdxFlagQuickened = 0x80000000;
-
-  // Embed the map value in the entry to avoid extra padding in 64-bit builds.
-  struct MapEntry {
-    // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
-    const MethodReference* devirt_target;
-    uint32_t target_method_idx;
-    uint32_t vtable_idx;
-    uint16_t invoke_type;
-    // Map value.
-    uint32_t lowering_info_index;
-  };
-
-  struct MapEntryComparator {
-    bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
-      if (lhs.target_method_idx != rhs.target_method_idx) {
-        return lhs.target_method_idx < rhs.target_method_idx;
-      }
-      if (lhs.invoke_type != rhs.invoke_type) {
-        return lhs.invoke_type < rhs.invoke_type;
-      }
-      if (lhs.vtable_idx != rhs.vtable_idx) {
-        return lhs.vtable_idx < rhs.vtable_idx;
-      }
-      if (lhs.devirt_target != rhs.devirt_target) {
-        if (lhs.devirt_target == nullptr) {
-          return true;
-        }
-        if (rhs.devirt_target == nullptr) {
-          return false;
-        }
-        return devirt_cmp(*lhs.devirt_target, *rhs.devirt_target);
-      }
-      return false;
-    }
-    MethodReferenceComparator devirt_cmp;
-  };
-
-  ScopedArenaAllocator allocator(&cu_->arena_stack);
-
-  // All INVOKE instructions take 3 code units and there must also be a RETURN.
-  const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
-
-  // Map invoke key (see MapEntry) to lowering info index and vice versa.
-  // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
-  // multi_index_container with one ordered index and one sequential index.
-  ScopedArenaSet<MapEntry, MapEntryComparator> invoke_map(MapEntryComparator(),
-                                                          allocator.Adapter());
-  const MapEntry** sequential_entries =
-      allocator.AllocArray<const MapEntry*>(max_refs, kArenaAllocMisc);
-
-  // Find INVOKE insns and their devirtualization targets.
-  const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    if (bb->block_type != kDalvikByteCode) {
-      continue;
-    }
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      const bool is_quick_invoke = IsInstructionQuickInvoke(mir->dalvikInsn.opcode);
-      const bool is_invoke = IsInstructionInvoke(mir->dalvikInsn.opcode);
-      if (is_quick_invoke || is_invoke) {
-        uint32_t vtable_index = 0;
-        uint32_t target_method_idx = 0;
-        uint32_t invoke_type_idx = 0;  // Default to virtual (in case of quickened).
-        DCHECK_EQ(invoke_types[invoke_type_idx], kVirtual);
-        if (is_quick_invoke) {
-          // We need to store the vtable index since we can't necessarily recreate it at resolve
-          // phase if the dequickening resolved to an interface method.
-          vtable_index = mir->dalvikInsn.vB;
-          // Fake up the method index by storing the mir offset so that we can read the dequicken
-          // info in resolve.
-          target_method_idx = mir->offset | kMethodIdxFlagQuickened;
-        } else {
-          DCHECK(is_invoke);
-          // Decode target method index and invoke type.
-          invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
-          target_method_idx = mir->dalvikInsn.vB;
-        }
-        // Find devirtualization target.
-        // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
-        // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
-        // and increment it as needed instead of making O(log n) lookups.
-        const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
-        // Try to insert a new entry. If the insertion fails, we will have found an old one.
-        MapEntry entry = {
-            devirt_target,
-            target_method_idx,
-            vtable_index,
-            invoke_types[invoke_type_idx],
-            static_cast<uint32_t>(invoke_map.size())
-        };
-        auto it = invoke_map.insert(entry).first;  // Iterator to either the old or the new entry.
-        mir->meta.method_lowering_info = it->lowering_info_index;
-        // If we didn't actually insert, this will just overwrite an existing value with the same.
-        sequential_entries[it->lowering_info_index] = &*it;
-      }
-    }
-  }
-  if (invoke_map.empty()) {
-    return;
-  }
-  // Prepare unique method infos, set method info indexes for their MIRs.
-  const size_t count = invoke_map.size();
-  method_lowering_infos_.reserve(count);
-  for (size_t pos = 0u; pos != count; ++pos) {
-    const MapEntry* entry = sequential_entries[pos];
-    const bool is_quick = (entry->target_method_idx & kMethodIdxFlagQuickened) != 0;
-    const uint32_t masked_method_idx = entry->target_method_idx & ~kMethodIdxFlagQuickened;
-    MirMethodLoweringInfo method_info(masked_method_idx,
-                                      static_cast<InvokeType>(entry->invoke_type), is_quick);
-    if (entry->devirt_target != nullptr) {
-      method_info.SetDevirtualizationTarget(*entry->devirt_target);
-    }
-    if (is_quick) {
-      method_info.SetVTableIndex(entry->vtable_idx);
-    }
-    method_lowering_infos_.push_back(method_info);
-  }
-  MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
-                                 method_lowering_infos_.data(), count);
-}
-
-}  // namespace art
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
deleted file mode 100644
index a7ba061..0000000
--- a/compiler/dex/mir_dataflow.cc
+++ /dev/null
@@ -1,1453 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "local_value_numbering.h"
-#include "dataflow_iterator-inl.h"
-
-namespace art {
-
-/*
- * Main table containing data flow attributes for each bytecode. The
- * first kNumPackedOpcodes entries are for Dalvik bytecode
- * instructions, where extended opcode at the MIR level are appended
- * afterwards.
- *
- * TODO - many optimization flags are incomplete - they will only limit the
- * scope of optimizations but will not cause mis-optimizations.
- */
-const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
-  // 00 NOP
-  DF_NOP,
-
-  // 01 MOVE vA, vB
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 02 MOVE_FROM16 vAA, vBBBB
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 03 MOVE_16 vAAAA, vBBBB
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 04 MOVE_WIDE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
-  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
-  // 06 MOVE_WIDE_16 vAAAA, vBBBB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
-  // 07 MOVE_OBJECT vA, vB
-  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
-  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
-  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
-  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
-  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
-  // 0A MOVE_RESULT vAA
-  DF_DA,
-
-  // 0B MOVE_RESULT_WIDE vAA
-  DF_DA | DF_A_WIDE,
-
-  // 0C MOVE_RESULT_OBJECT vAA
-  DF_DA | DF_REF_A,
-
-  // 0D MOVE_EXCEPTION vAA
-  DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
-  // 0E RETURN_VOID
-  DF_NOP,
-
-  // 0F RETURN vAA
-  DF_UA,
-
-  // 10 RETURN_WIDE vAA
-  DF_UA | DF_A_WIDE,
-
-  // 11 RETURN_OBJECT vAA
-  DF_UA | DF_REF_A,
-
-  // 12 CONST_4 vA, #+B
-  DF_DA | DF_SETS_CONST,
-
-  // 13 CONST_16 vAA, #+BBBB
-  DF_DA | DF_SETS_CONST,
-
-  // 14 CONST vAA, #+BBBBBBBB
-  DF_DA | DF_SETS_CONST,
-
-  // 15 CONST_HIGH16 VAA, #+BBBB0000
-  DF_DA | DF_SETS_CONST,
-
-  // 16 CONST_WIDE_16 vAA, #+BBBB
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 1A CONST_STRING vAA, string@BBBB
-  DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
-  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
-  DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
-  // 1C CONST_CLASS vAA, type@BBBB
-  DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
-  // 1D MONITOR_ENTER vAA
-  DF_UA | DF_NULL_CHK_A | DF_REF_A,
-
-  // 1E MONITOR_EXIT vAA
-  DF_UA | DF_NULL_CHK_A | DF_REF_A,
-
-  // 1F CHK_CAST vAA, type@BBBB
-  DF_UA | DF_REF_A | DF_CHK_CAST | DF_UMS,
-
-  // 20 INSTANCE_OF vA, vB, type@CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
-
-  // 21 ARRAY_LENGTH vA, vB
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_CORE_A | DF_REF_B,
-
-  // 22 NEW_INSTANCE vAA, type@BBBB
-  DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
-
-  // 23 NEW_ARRAY vA, vB, type@CCCC
-  DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
-
-  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
-
-  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
-  DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
-
-  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
-  DF_UA | DF_REF_A | DF_UMS,
-
-  // 27 THROW vAA
-  DF_UA | DF_REF_A | DF_UMS,
-
-  // 28 GOTO
-  DF_NOP,
-
-  // 29 GOTO_16
-  DF_NOP,
-
-  // 2A GOTO_32
-  DF_NOP,
-
-  // 2B PACKED_SWITCH vAA, +BBBBBBBB
-  DF_UA | DF_CORE_A,
-
-  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
-  DF_UA | DF_CORE_A,
-
-  // 2D CMPL_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 2E CMPG_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 2F CMPL_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 30 CMPG_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 31 CMP_LONG vAA, vBB, vCC
-  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 32 IF_EQ vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 33 IF_NE vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 34 IF_LT vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 35 IF_GE vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 36 IF_GT vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 37 IF_LE vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 38 IF_EQZ vAA, +BBBB
-  DF_UA,
-
-  // 39 IF_NEZ vAA, +BBBB
-  DF_UA,
-
-  // 3A IF_LTZ vAA, +BBBB
-  DF_UA,
-
-  // 3B IF_GEZ vAA, +BBBB
-  DF_UA,
-
-  // 3C IF_GTZ vAA, +BBBB
-  DF_UA,
-
-  // 3D IF_LEZ vAA, +BBBB
-  DF_UA,
-
-  // 3E UNUSED_3E
-  DF_NOP,
-
-  // 3F UNUSED_3F
-  DF_NOP,
-
-  // 40 UNUSED_40
-  DF_NOP,
-
-  // 41 UNUSED_41
-  DF_NOP,
-
-  // 42 UNUSED_42
-  DF_NOP,
-
-  // 43 UNUSED_43
-  DF_NOP,
-
-  // 44 AGET vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 45 AGET_WIDE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 46 AGET_OBJECT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 47 AGET_BOOLEAN vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 48 AGET_BYTE vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 49 AGET_CHAR vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4A AGET_SHORT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4B APUT vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4C APUT_WIDE vAA, vBB, vCC
-  DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4D APUT_OBJECT vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4E APUT_BOOLEAN vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4F APUT_BYTE vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 50 APUT_CHAR vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 51 APUT_SHORT vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 52 IGET vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 53 IGET_WIDE vA, vB, field@CCCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 54 IGET_OBJECT vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 55 IGET_BOOLEAN vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 56 IGET_BYTE vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 57 IGET_CHAR vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 58 IGET_SHORT vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 59 IPUT vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5A IPUT_WIDE vA, vB, field@CCCC
-  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5B IPUT_OBJECT vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5D IPUT_BYTE vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5E IPUT_CHAR vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5F IPUT_SHORT vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 60 SGET vAA, field@BBBB
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 61 SGET_WIDE vAA, field@BBBB
-  DF_DA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 62 SGET_OBJECT vAA, field@BBBB
-  DF_DA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 63 SGET_BOOLEAN vAA, field@BBBB
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 64 SGET_BYTE vAA, field@BBBB
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 65 SGET_CHAR vAA, field@BBBB
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 66 SGET_SHORT vAA, field@BBBB
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 67 SPUT vAA, field@BBBB
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 68 SPUT_WIDE vAA, field@BBBB
-  DF_UA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 69 SPUT_OBJECT vAA, field@BBBB
-  DF_UA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 6A SPUT_BOOLEAN vAA, field@BBBB
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 6B SPUT_BYTE vAA, field@BBBB
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 6C SPUT_CHAR vAA, field@BBBB
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 6D SPUT_SHORT vAA, field@BBBB
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_CLINIT | DF_UMS,
-
-  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 73 RETURN_VOID_NO_BARRIER
-  DF_NOP,
-
-  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_CLINIT | DF_UMS,
-
-  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 79 UNUSED_79
-  DF_NOP,
-
-  // 7A UNUSED_7A
-  DF_NOP,
-
-  // 7B NEG_INT vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 7C NOT_INT vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 7D NEG_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 7E NOT_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 7F NEG_FLOAT vA, vB
-  DF_DA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 80 NEG_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 81 INT_TO_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 82 INT_TO_FLOAT vA, vB
-  DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
-
-  // 83 INT_TO_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
-
-  // 84 LONG_TO_INT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 85 LONG_TO_FLOAT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
-
-  // 86 LONG_TO_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
-
-  // 87 FLOAT_TO_INT vA, vB
-  DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
-
-  // 88 FLOAT_TO_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
-
-  // 89 FLOAT_TO_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 8A DOUBLE_TO_INT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
-
-  // 8B DOUBLE_TO_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
-
-  // 8C DOUBLE_TO_FLOAT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 8D INT_TO_BYTE vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 8E INT_TO_CHAR vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 8F INT_TO_SHORT vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 90 ADD_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 91 SUB_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 92 MUL_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 93 DIV_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 94 REM_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 95 AND_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 96 OR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 97 XOR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 98 SHL_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 99 SHR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9A USHR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9B ADD_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9C SUB_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9D MUL_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9E DIV_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9F REM_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A0 AND_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A1 OR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A2 XOR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A3 SHL_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A4 SHR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A5 USHR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A6 ADD_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // A7 SUB_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // A8 MUL_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // A9 DIV_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AA REM_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AB ADD_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AC SUB_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AD MUL_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AE DIV_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AF REM_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // B0 ADD_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B1 SUB_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B2 MUL_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B3 DIV_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B4 REM_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B5 AND_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B6 OR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B7 XOR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B8 SHL_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B9 SHR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // BA USHR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // BB ADD_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BC SUB_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BD MUL_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BE DIV_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BF REM_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C0 AND_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C1 OR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C2 XOR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C3 SHL_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // C4 SHR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // C5 USHR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // C6 ADD_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // C7 SUB_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // C8 MUL_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // C9 DIV_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // CA REM_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // CB ADD_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CC SUB_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CD MUL_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CE DIV_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CF REM_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D1 RSUB_INT vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D4 REM_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D5 AND_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D6 OR_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DA MUL_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DB DIV_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DC REM_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DD AND_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DE OR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DF XOR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E3 IGET_QUICK
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E4 IGET_WIDE_QUICK
-  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E5 IGET_OBJECT_QUICK
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E6 IPUT_QUICK
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E7 IPUT_WIDE_QUICK
-  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E8 IPUT_OBJECT_QUICK
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E9 INVOKE_VIRTUAL_QUICK
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // EA INVOKE_VIRTUAL_RANGE_QUICK
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // EB IPUT_BOOLEAN_QUICK vA, vB, index
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // EC IPUT_BYTE_QUICK vA, vB, index
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // ED IPUT_CHAR_QUICK vA, vB, index
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // EE IPUT_SHORT_QUICK vA, vB, index
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // EF IGET_BOOLEAN_QUICK vA, vB, index
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // F0 IGET_BYTE_QUICK vA, vB, index
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // F1 IGET_CHAR_QUICK vA, vB, index
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // F2 IGET_SHORT_QUICK vA, vB, index
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // F3 UNUSED_F3
-  DF_NOP,
-
-  // F4 UNUSED_F4
-  DF_NOP,
-
-  // F5 UNUSED_F5
-  DF_NOP,
-
-  // F6 UNUSED_F6
-  DF_NOP,
-
-  // F7 UNUSED_F7
-  DF_NOP,
-
-  // F8 UNUSED_F8
-  DF_NOP,
-
-  // F9 UNUSED_F9
-  DF_NOP,
-
-  // FA UNUSED_FA
-  DF_NOP,
-
-  // FB UNUSED_FB
-  DF_NOP,
-
-  // FC UNUSED_FC
-  DF_NOP,
-
-  // FD UNUSED_FD
-  DF_NOP,
-
-  // FE UNUSED_FE
-  DF_NOP,
-
-  // FF UNUSED_FF
-  DF_NOP,
-
-  // Beginning of extended MIR opcodes
-  // 100 MIR_PHI
-  DF_DA | DF_NULL_TRANSFER_N,
-
-  // 101 MIR_COPY
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 102 MIR_FUSED_CMPL_FLOAT
-  DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 103 MIR_FUSED_CMPG_FLOAT
-  DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 104 MIR_FUSED_CMPL_DOUBLE
-  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 105 MIR_FUSED_CMPG_DOUBLE
-  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 106 MIR_FUSED_CMP_LONG
-  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 107 MIR_NOP
-  DF_NOP,
-
-  // 108 MIR_NULL_CHECK
-  DF_UA | DF_REF_A | DF_NULL_CHK_A | DF_LVN,
-
-  // 109 MIR_RANGE_CHECK
-  0,
-
-  // 10A MIR_DIV_ZERO_CHECK
-  0,
-
-  // 10B MIR_CHECK
-  0,
-
-  // 10D MIR_SELECT
-  DF_DA | DF_UB,
-
-  // 10E MirOpConstVector
-  0,
-
-  // 10F MirOpMoveVector
-  0,
-
-  // 110 MirOpPackedMultiply
-  0,
-