Remove Quick from tree.

So long, old friend.

Change-Id: I0241c798a34b92bf994fed83888da67d6e7f1891
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 33242f1..426c3ca 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -247,12 +247,6 @@
   runtime/reflection_test.cc \
   compiler/compiled_method_test.cc \
   compiler/debug/dwarf/dwarf_test.cc \
-  compiler/dex/gvn_dead_code_elimination_test.cc \
-  compiler/dex/global_value_numbering_test.cc \
-  compiler/dex/local_value_numbering_test.cc \
-  compiler/dex/mir_graph_test.cc \
-  compiler/dex/mir_optimization_test.cc \
-  compiler/dex/type_inference_test.cc \
   compiler/driver/compiled_method_storage_test.cc \
   compiler/driver/compiler_driver_test.cc \
   compiler/elf_writer_test.cc \
@@ -284,7 +278,6 @@
   compiler/utils/test_dex_file_builder_test.cc \
 
 COMPILER_GTEST_COMMON_SRC_FILES_all := \
-  compiler/dex/quick/quick_cfi_test.cc \
   compiler/jni/jni_cfi_test.cc \
   compiler/optimizing/codegen_test.cc \
   compiler/optimizing/constant_folding_test.cc \
@@ -374,7 +367,6 @@
 
 COMPILER_GTEST_HOST_SRC_FILES_x86 := \
   $(COMPILER_GTEST_COMMON_SRC_FILES_x86) \
-  compiler/dex/quick/x86/quick_assemble_x86_test.cc \
   compiler/utils/x86/assembler_x86_test.cc \
 
 COMPILER_GTEST_HOST_SRC_FILES_x86_64 := \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 11ee6dd..f12f007 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -21,40 +21,12 @@
 LIBART_COMPILER_SRC_FILES := \
 	compiled_method.cc \
 	debug/elf_debug_writer.cc \
-	dex/global_value_numbering.cc \
-	dex/gvn_dead_code_elimination.cc \
-	dex/local_value_numbering.cc \
-	dex/type_inference.cc \
-	dex/quick/codegen_util.cc \
-	dex/quick/dex_file_method_inliner.cc \
-	dex/quick/dex_file_to_method_inliner_map.cc \
-	dex/quick/gen_common.cc \
-	dex/quick/gen_invoke.cc \
-	dex/quick/gen_loadstore.cc \
-	dex/quick/lazy_debug_frame_opcode_writer.cc \
-	dex/quick/local_optimizations.cc \
-	dex/quick/mir_to_lir.cc \
-	dex/quick/quick_compiler.cc \
-	dex/quick/ralloc_util.cc \
-	dex/quick/resource_mask.cc \
 	dex/dex_to_dex_compiler.cc \
-	dex/bb_optimizations.cc \
-	dex/compiler_ir.cc \
-	dex/mir_analysis.cc \
-	dex/mir_dataflow.cc \
-	dex/mir_field_info.cc \
-	dex/mir_graph.cc \
-	dex/mir_method_info.cc \
-	dex/mir_optimization.cc \
-	dex/post_opt_passes.cc \
-	dex/pass_driver_me_opts.cc \
-	dex/pass_driver_me_post_opt.cc \
-	dex/pass_manager.cc \
-	dex/ssa_transformation.cc \
 	dex/verified_method.cc \
 	dex/verification_results.cc \
-	dex/vreg_analysis.cc \
 	dex/quick_compiler_callbacks.cc \
+	dex/quick/dex_file_method_inliner.cc \
+	dex/quick/dex_file_to_method_inliner_map.cc \
 	driver/compiled_method_storage.cc \
 	driver/compiler_driver.cc \
 	driver/compiler_options.cc \
@@ -111,12 +83,6 @@
 	oat_writer.cc
 
 LIBART_COMPILER_SRC_FILES_arm := \
-	dex/quick/arm/assemble_arm.cc \
-	dex/quick/arm/call_arm.cc \
-	dex/quick/arm/fp_arm.cc \
-	dex/quick/arm/int_arm.cc \
-	dex/quick/arm/target_arm.cc \
-	dex/quick/arm/utility_arm.cc \
 	jni/quick/arm/calling_convention_arm.cc \
 	linker/arm/relative_patcher_arm_base.cc \
 	linker/arm/relative_patcher_thumb2.cc \
@@ -133,12 +99,6 @@
 # 32bit one.
 LIBART_COMPILER_SRC_FILES_arm64 := \
     $(LIBART_COMPILER_SRC_FILES_arm) \
-	dex/quick/arm64/assemble_arm64.cc \
-	dex/quick/arm64/call_arm64.cc \
-	dex/quick/arm64/fp_arm64.cc \
-	dex/quick/arm64/int_arm64.cc \
-	dex/quick/arm64/target_arm64.cc \
-	dex/quick/arm64/utility_arm64.cc \
 	jni/quick/arm64/calling_convention_arm64.cc \
 	linker/arm64/relative_patcher_arm64.cc \
 	optimizing/code_generator_arm64.cc \
@@ -150,12 +110,6 @@
 	utils/arm64/managed_register_arm64.cc \
 
 LIBART_COMPILER_SRC_FILES_mips := \
-	dex/quick/mips/assemble_mips.cc \
-	dex/quick/mips/call_mips.cc \
-	dex/quick/mips/fp_mips.cc \
-	dex/quick/mips/int_mips.cc \
-	dex/quick/mips/target_mips.cc \
-	dex/quick/mips/utility_mips.cc \
 	jni/quick/mips/calling_convention_mips.cc \
 	optimizing/code_generator_mips.cc \
 	optimizing/intrinsics_mips.cc \
@@ -172,12 +126,6 @@
 
 
 LIBART_COMPILER_SRC_FILES_x86 := \
-	dex/quick/x86/assemble_x86.cc \
-	dex/quick/x86/call_x86.cc \
-	dex/quick/x86/fp_x86.cc \
-	dex/quick/x86/int_x86.cc \
-	dex/quick/x86/target_x86.cc \
-	dex/quick/x86/utility_x86.cc \
 	jni/quick/x86/calling_convention_x86.cc \
 	linker/x86/relative_patcher_x86.cc \
 	linker/x86/relative_patcher_x86_base.cc \
@@ -200,26 +148,20 @@
 LIBART_COMPILER_CFLAGS :=
 
 LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
-  dex/quick/resource_mask.h \
   dex/compiler_enums.h \
   dex/dex_to_dex_compiler.h \
-  dex/global_value_numbering.h \
-  dex/pass_me.h \
   driver/compiler_driver.h \
   driver/compiler_options.h \
   image_writer.h \
   optimizing/locations.h
 
 LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm := \
-  dex/quick/arm/arm_lir.h \
   utils/arm/constants_arm.h
 
 LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm64 := \
-  $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm) \
-  dex/quick/arm64/arm64_lir.h
+  $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm)
 
 LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips := \
-  dex/quick/mips/mips_lir.h \
   utils/mips/assembler_mips.h
 
 LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips64 := \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 6075cd6..6483ef6 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -21,7 +21,6 @@
 #include "art_method.h"
 #include "class_linker.h"
 #include "compiled_method.h"
-#include "dex/pass_manager.h"
 #include "dex/quick_compiler_callbacks.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
 #include "dex/verification_results.h"
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index 223affa..1626317 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -17,7 +17,6 @@
 #include "compiler.h"
 
 #include "base/logging.h"
-#include "dex/quick/quick_compiler_factory.h"
 #include "driver/compiler_driver.h"
 #include "optimizing/optimizing_compiler.h"
 #include "utils.h"
@@ -27,8 +26,7 @@
 Compiler* Compiler::Create(CompilerDriver* driver, Compiler::Kind kind) {
   switch (kind) {
     case kQuick:
-      return CreateQuickCompiler(driver);
-
+      // TODO: Remove Quick in options.
     case kOptimizing:
       return CreateOptimizingCompiler(driver);
 
diff --git a/compiler/dex/bb_optimizations.cc b/compiler/dex/bb_optimizations.cc
deleted file mode 100644
index 11a7e44..0000000
--- a/compiler/dex/bb_optimizations.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "bb_optimizations.h"
-#include "dataflow_iterator.h"
-#include "dataflow_iterator-inl.h"
-
-namespace art {
-
-/*
- * Code Layout pass implementation start.
- */
-bool CodeLayout::Worker(PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-  DCHECK(c_unit != nullptr);
-  BasicBlock* bb = pass_me_data_holder->bb;
-  DCHECK(bb != nullptr);
-  c_unit->mir_graph->LayoutBlocks(bb);
-  // No need of repeating, so just return false.
-  return false;
-}
-
-/*
- * BasicBlock Combine pass implementation start.
- */
-bool BBCombine::Worker(PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-  DCHECK(c_unit != nullptr);
-  BasicBlock* bb = pass_me_data_holder->bb;
-  DCHECK(bb != nullptr);
-  c_unit->mir_graph->CombineBlocks(bb);
-
-  // No need of repeating, so just return false.
-  return false;
-}
-
-/*
- * MethodUseCount pass implementation start.
- */
-bool MethodUseCount::Gate(const PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-  DCHECK(c_unit != nullptr);
-  // First initialize the data.
-  c_unit->mir_graph->InitializeMethodUses();
-
-  // Now check if the pass is to be ignored.
-  bool res = ((c_unit->disable_opt & (1 << kPromoteRegs)) == 0);
-
-  return res;
-}
-
-bool MethodUseCount::Worker(PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-  DCHECK(c_unit != nullptr);
-  BasicBlock* bb = pass_me_data_holder->bb;
-  DCHECK(bb != nullptr);
-  c_unit->mir_graph->CountUses(bb);
-  // No need of repeating, so just return false.
-  return false;
-}
-
-}  // namespace art
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
deleted file mode 100644
index 02d5327..0000000
--- a/compiler/dex/bb_optimizations.h
+++ /dev/null
@@ -1,452 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
-#define ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
-
-#include "base/casts.h"
-#include "compiler_ir.h"
-#include "dex_flags.h"
-#include "pass_me.h"
-#include "mir_graph.h"
-
-namespace art {
-
-/**
- * @class String Change
- * @brief Converts calls to String.<init> to StringFactory instead.
- */
-class StringChange : public PassME {
- public:
-  StringChange() : PassME("StringChange", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->StringChange();
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->HasInvokes();
-  }
-};
-
-/**
- * @class CacheFieldLoweringInfo
- * @brief Cache the lowering info for fields used by IGET/IPUT/SGET/SPUT insns.
- */
-class CacheFieldLoweringInfo : public PassME {
- public:
-  CacheFieldLoweringInfo() : PassME("CacheFieldLoweringInfo", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->DoCacheFieldLoweringInfo();
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->HasFieldAccess();
-  }
-};
-
-/**
- * @class CacheMethodLoweringInfo
- * @brief Cache the lowering info for methods called by INVOKEs.
- */
-class CacheMethodLoweringInfo : public PassME {
- public:
-  CacheMethodLoweringInfo() : PassME("CacheMethodLoweringInfo", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->DoCacheMethodLoweringInfo();
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->HasInvokes();
-  }
-};
-
-/**
- * @class SpecialMethodInliner
- * @brief Performs method inlining pass on special kinds of methods.
- * @details Special methods are methods that fall in one of the following categories:
- * empty, instance getter, instance setter, argument return, and constant return.
- */
-class SpecialMethodInliner : public PassME {
- public:
-  SpecialMethodInliner() : PassME("SpecialMethodInliner") {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->InlineSpecialMethodsGate();
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->InlineSpecialMethodsStart();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    c_unit->mir_graph->InlineSpecialMethods(bb);
-    // No need of repeating, so just return false.
-    return false;
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->InlineSpecialMethodsEnd();
-  }
-};
-
-/**
- * @class CodeLayout
- * @brief Perform the code layout pass.
- */
-class CodeLayout : public PassME {
- public:
-  CodeLayout() : PassME("CodeLayout", kAllNodes, kOptimizationBasicBlockChange, "2_post_layout_cfg") {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->VerifyDataflow();
-    c_unit->mir_graph->ClearAllVisitedFlags();
-  }
-
-  bool Worker(PassDataHolder* data) const;
-};
-
-/**
- * @class NullCheckElimination
- * @brief Null check elimination pass.
- */
-class NullCheckElimination : public PassME {
- public:
-  NullCheckElimination()
-    : PassME("NCE", kRepeatingPreOrderDFSTraversal, "3_post_nce_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->EliminateNullChecksGate();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->EliminateNullChecks(bb);
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->EliminateNullChecksEnd();
-  }
-};
-
-class ClassInitCheckElimination : public PassME {
- public:
-  ClassInitCheckElimination()
-    : PassME("ClInitCheckElimination", kRepeatingPreOrderDFSTraversal) {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->EliminateClassInitChecksGate();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->EliminateClassInitChecks(bb);
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->EliminateClassInitChecksEnd();
-  }
-};
-
-/**
- * @class GlobalValueNumberingPass
- * @brief Performs the global value numbering pass.
- */
-class GlobalValueNumberingPass : public PassME {
- public:
-  GlobalValueNumberingPass()
-    : PassME("GVN", kLoopRepeatingTopologicalSortTraversal, "4_post_gvn_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->ApplyGlobalValueNumberingGate();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->ApplyGlobalValueNumbering(bb);
-  }
-
-  void End(PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->ApplyGlobalValueNumberingEnd();
-  }
-};
-
-/**
- * @class DeadCodeEliminationPass
- * @brief Performs the GVN-based dead code elimination pass.
- */
-class DeadCodeEliminationPass : public PassME {
- public:
-  DeadCodeEliminationPass() : PassME("DCE", kPreOrderDFSTraversal, "4_post_dce_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->EliminateDeadCodeGate();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->EliminateDeadCode(bb);
-  }
-
-  void End(PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->EliminateDeadCodeEnd();
-  }
-};
-
-/**
- * @class GlobalValueNumberingCleanupPass
- * @brief Performs the cleanup after global value numbering pass and the dependent
- *        dead code elimination pass that needs the GVN data.
- */
-class GlobalValueNumberingCleanupPass : public PassME {
- public:
-  GlobalValueNumberingCleanupPass()
-    : PassME("GVNCleanup", kNoNodes, "") {
-  }
-
-  void Start(PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->GlobalValueNumberingCleanup();
-  }
-};
-
-/**
- * @class BBCombine
- * @brief Perform the basic block combination pass.
- */
-class BBCombine : public PassME {
- public:
-  BBCombine() : PassME("BBCombine", kPreOrderDFSTraversal, "5_post_bbcombine_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->HasTryCatchBlocks() ||
-        ((c_unit->disable_opt & (1 << kSuppressExceptionEdges)) != 0);
-  }
-
-  bool Worker(PassDataHolder* data) const;
-};
-
-/**
- * @class ConstantPropagation
- * @brief Perform a constant propagation pass.
- */
-class ConstantPropagation : public PassME {
- public:
-  ConstantPropagation() : PassME("ConstantPropagation") {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->InitializeConstantPropagation();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = down_cast<PassMEDataHolder*>(data)->bb;
-    DCHECK(bb != nullptr);
-    c_unit->mir_graph->DoConstantPropagation(bb);
-    // No need of repeating, so just return false.
-    return false;
-  }
-};
-
-/**
- * @class MethodUseCount
- * @brief Count the register uses of the method
- */
-class MethodUseCount : public PassME {
- public:
-  MethodUseCount() : PassME("UseCount") {
-  }
-
-  bool Worker(PassDataHolder* data) const;
-
-  bool Gate(const PassDataHolder* data) const;
-};
-
-/**
- * @class BasicBlock Optimizations
- * @brief Any simple BasicBlock optimization can be put here.
- */
-class BBOptimizations : public PassME {
- public:
-  BBOptimizations()
-      : PassME("BBOptimizations", kNoNodes, kOptimizationBasicBlockChange, "5_post_bbo_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return ((c_unit->disable_opt & (1 << kBBOpt)) == 0);
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->BasicBlockOptimizationStart();
-
-    /*
-     * This pass has a different ordering depending on the suppress exception,
-     * so do the pass here for now:
-     *   - Later, the Start should just change the ordering and we can move the extended
-     *     creation into the pass driver's main job with a new iterator
-     */
-    c_unit->mir_graph->BasicBlockOptimization();
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->BasicBlockOptimizationEnd();
-    down_cast<PassMEDataHolder*>(data)->dirty = !c_unit->mir_graph->DfsOrdersUpToDate();
-  }
-};
-
-/**
- * @class SuspendCheckElimination
- * @brief Any simple BasicBlock optimization can be put here.
- */
-class SuspendCheckElimination : public PassME {
- public:
-  SuspendCheckElimination()
-    : PassME("SuspendCheckElimination", kTopologicalSortTraversal, "6_post_sce_cfg") {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return c_unit->mir_graph->EliminateSuspendChecksGate();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->EliminateSuspendChecks(bb);
-  }
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
diff --git a/compiler/dex/compiler_ir.cc b/compiler/dex/compiler_ir.cc
deleted file mode 100644
index 6e1853b..0000000
--- a/compiler/dex/compiler_ir.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler_ir.h"
-
-#include "arch/instruction_set_features.h"
-#include "base/dumpable.h"
-#include "dex_flags.h"
-#include "dex/quick/mir_to_lir.h"
-#include "driver/compiler_driver.h"
-#include "mir_graph.h"
-#include "utils.h"
-
-namespace art {
-
-CompilationUnit::CompilationUnit(ArenaPool* pool, InstructionSet isa, CompilerDriver* driver,
-                                 ClassLinker* linker)
-  : compiler_driver(driver),
-    class_linker(linker),
-    dex_file(nullptr),
-    class_loader(nullptr),
-    class_def_idx(0),
-    method_idx(0),
-    access_flags(0),
-    invoke_type(kDirect),
-    shorty(nullptr),
-    disable_opt(0),
-    enable_debug(0),
-    verbose(false),
-    instruction_set(isa),
-    target64(Is64BitInstructionSet(isa)),
-    arena(pool),
-    arena_stack(pool),
-    mir_graph(nullptr),
-    cg(nullptr),
-    timings("QuickCompiler", true, false),
-    print_pass(false) {
-}
-
-CompilationUnit::~CompilationUnit() {
-  overridden_pass_options.clear();
-}
-
-void CompilationUnit::StartTimingSplit(const char* label) {
-  if (compiler_driver->GetDumpPasses()) {
-    timings.StartTiming(label);
-  }
-}
-
-void CompilationUnit::NewTimingSplit(const char* label) {
-  if (compiler_driver->GetDumpPasses()) {
-    timings.EndTiming();
-    timings.StartTiming(label);
-  }
-}
-
-void CompilationUnit::EndTiming() {
-  if (compiler_driver->GetDumpPasses()) {
-    timings.EndTiming();
-    if (enable_debug & (1 << kDebugTimings)) {
-      LOG(INFO) << "TIMINGS " << PrettyMethod(method_idx, *dex_file);
-      LOG(INFO) << Dumpable<TimingLogger>(timings);
-    }
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
deleted file mode 100644
index 5203355..0000000
--- a/compiler/dex/compiler_ir.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_COMPILER_IR_H_
-#define ART_COMPILER_DEX_COMPILER_IR_H_
-
-#include "jni.h"
-#include <string>
-#include <vector>
-
-#include "arch/instruction_set.h"
-#include "base/arena_allocator.h"
-#include "base/scoped_arena_allocator.h"
-#include "base/timing_logger.h"
-#include "invoke_type.h"
-#include "safe_map.h"
-
-namespace art {
-
-class ClassLinker;
-class CompilerDriver;
-class DexFile;
-class Mir2Lir;
-class MIRGraph;
-
-constexpr size_t kOptionStringMaxLength = 2048;
-
-/**
- * Structure abstracting pass option values, which can be of type string or integer.
- */
-struct OptionContent {
-  OptionContent(const OptionContent& option) :
-    type(option.type), container(option.container, option.type) {}
-
-  explicit OptionContent(const char* value) :
-    type(kString), container(value) {}
-
-  explicit OptionContent(int value) :
-    type(kInteger), container(value) {}
-
-  explicit OptionContent(int64_t value) :
-    type(kInteger), container(value) {}
-
-  ~OptionContent() {
-    if (type == kString) {
-      container.StringDelete();
-    }
-  }
-
-  /**
-   * Allows for a transparent display of the option content.
-   */
-  friend std::ostream& operator<<(std::ostream& out, const OptionContent& option) {
-    if (option.type == kString) {
-      out << option.container.s;
-    } else {
-      out << option.container.i;
-    }
-
-    return out;
-  }
-
-  inline const char* GetString() const {
-    return container.s;
-  }
-
-  inline int64_t GetInteger() const {
-    return container.i;
-  }
-
-  /**
-   * @brief Used to compare a string option value to a given @p value.
-   * @details Will return whether the internal string option is equal to
-   * the parameter @p value. It will return false if the type of the
-   * object is not a string.
-   * @param value The string to compare to.
-   * @return Returns whether the internal string option is equal to the
-   * parameter @p value.
-  */
-  inline bool Equals(const char* value) const {
-    DCHECK(value != nullptr);
-    if (type != kString) {
-      return false;
-    }
-    return !strncmp(container.s, value, kOptionStringMaxLength);
-  }
-
-  /**
-   * @brief Used to compare an integer option value to a given @p value.
-   * @details Will return whether the internal integer option is equal to
-   * the parameter @p value. It will return false if the type of the
-   * object is not an integer.
-   * @param value The integer to compare to.
-   * @return Returns whether the internal integer option is equal to the
-   * parameter @p value.
-  */
-  inline bool Equals(int64_t value) const {
-    if (type != kInteger) {
-      return false;
-    }
-    return container.i == value;
-  }
-
-  /**
-   * Describes the type of parameters allowed as option values.
-   */
-  enum OptionType {
-    kString = 0,
-    kInteger
-  };
-
-  OptionType type;
-
- private:
-  /**
-   * Union containing the option value of either type.
-   */
-  union OptionContainer {
-    OptionContainer(const OptionContainer& c, OptionType t) {
-      if (t == kString) {
-        DCHECK(c.s != nullptr);
-        s = strndup(c.s, kOptionStringMaxLength);
-      } else {
-        i = c.i;
-      }
-    }
-
-    explicit OptionContainer(const char* value) {
-      DCHECK(value != nullptr);
-      s = strndup(value, kOptionStringMaxLength);
-    }
-
-    explicit OptionContainer(int64_t value) : i(value) {}
-    ~OptionContainer() {}
-
-    void StringDelete() {
-      if (s != nullptr) {
-        free(s);
-      }
-    }
-
-    char* s;
-    int64_t i;
-  };
-
-  OptionContainer container;
-};
-
-struct CompilationUnit {
-  CompilationUnit(ArenaPool* pool, InstructionSet isa, CompilerDriver* driver, ClassLinker* linker);
-  ~CompilationUnit();
-
-  void StartTimingSplit(const char* label);
-  void NewTimingSplit(const char* label);
-  void EndTiming();
-
-  /*
-   * Fields needed/generated by common frontend and generally used throughout
-   * the compiler.
-  */
-  CompilerDriver* const compiler_driver;
-  ClassLinker* const class_linker;        // Linker to resolve fields and methods.
-  const DexFile* dex_file;                // DexFile containing the method being compiled.
-  jobject class_loader;                   // compiling method's class loader.
-  uint16_t class_def_idx;                 // compiling method's defining class definition index.
-  uint32_t method_idx;                    // compiling method's index into method_ids of DexFile.
-  uint32_t access_flags;                  // compiling method's access flags.
-  InvokeType invoke_type;                 // compiling method's invocation type.
-  const char* shorty;                     // compiling method's shorty.
-  uint32_t disable_opt;                   // opt_control_vector flags.
-  uint32_t enable_debug;                  // debugControlVector flags.
-  bool verbose;
-  const InstructionSet instruction_set;
-  const bool target64;
-
-  // TODO: move memory management to mir_graph, or just switch to using standard containers.
-  ArenaAllocator arena;
-  ArenaStack arena_stack;  // Arenas for ScopedArenaAllocator.
-
-  std::unique_ptr<MIRGraph> mir_graph;   // MIR container.
-  std::unique_ptr<Mir2Lir> cg;           // Target-specific codegen.
-  TimingLogger timings;
-  bool print_pass;                 // Do we want to print a pass or not?
-
-  /**
-   * @brief Holds pass options for current pass being applied to compilation unit.
-   * @details This is updated for every pass to contain the overridden pass options
-   * that were specified by user. The pass itself will check this to see if the
-   * default settings have been changed. The key is simply the option string without
-   * the pass name.
-   */
-  SafeMap<const std::string, const OptionContent> overridden_pass_options;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_COMPILER_IR_H_
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
deleted file mode 100644
index e9402e3..0000000
--- a/compiler/dex/dataflow_iterator-inl.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
-#define ART_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
-
-#include "dataflow_iterator.h"
-
-namespace art {
-
-// Single forward pass over the nodes.
-inline BasicBlock* DataflowIterator::ForwardSingleNext() {
-  BasicBlock* res = nullptr;
-
-  // Are we not yet at the end?
-  if (idx_ < end_idx_) {
-    // Get the next index.
-    BasicBlockId bb_id = (*block_id_list_)[idx_];
-    res = mir_graph_->GetBasicBlock(bb_id);
-    idx_++;
-  }
-
-  return res;
-}
-
-// Repeat full forward passes over all nodes until no change occurs during a complete pass.
-inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
-  BasicBlock* res = nullptr;
-
-  // Are we at the end and have we changed something?
-  if ((idx_ >= end_idx_) && changed_ == true) {
-    // Reset the index.
-    idx_ = start_idx_;
-    repeats_++;
-    changed_ = false;
-  }
-
-  // Are we not yet at the end?
-  if (idx_ < end_idx_) {
-    // Get the BasicBlockId.
-    BasicBlockId bb_id = (*block_id_list_)[idx_];
-    res = mir_graph_->GetBasicBlock(bb_id);
-    idx_++;
-  }
-
-  return res;
-}
-
-// Single reverse pass over the nodes.
-inline BasicBlock* DataflowIterator::ReverseSingleNext() {
-  BasicBlock* res = nullptr;
-
-  // Are we not yet at the end?
-  if (idx_ >= 0) {
-    // Get the BasicBlockId.
-    BasicBlockId bb_id = (*block_id_list_)[idx_];
-    res = mir_graph_->GetBasicBlock(bb_id);
-    idx_--;
-  }
-
-  return res;
-}
-
-// Repeat full backwards passes over all nodes until no change occurs during a complete pass.
-inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
-  BasicBlock* res = nullptr;
-
-  // Are we done and we changed something during the last iteration?
-  if ((idx_ < 0) && changed_) {
-    // Reset the index.
-    idx_ = start_idx_;
-    repeats_++;
-    changed_ = false;
-  }
-
-  // Are we not yet done?
-  if (idx_ >= 0) {
-    // Get the BasicBlockId.
-    BasicBlockId bb_id = (*block_id_list_)[idx_];
-    res = mir_graph_->GetBasicBlock(bb_id);
-    idx_--;
-  }
-
-  return res;
-}
-
-// AllNodes uses the existing block list, and should be considered unordered.
-inline BasicBlock* AllNodesIterator::Next(bool had_change) {
-  // Update changed: if had_changed is true, we remember it for the whole iteration.
-  changed_ |= had_change;
-
-  BasicBlock* res = nullptr;
-  while (idx_ != end_idx_) {
-    BasicBlock* bb = mir_graph_->GetBlockList()[idx_++];
-    DCHECK(bb != nullptr);
-    if (!bb->hidden) {
-      res = bb;
-      break;
-    }
-  }
-
-  return res;
-}
-
-inline BasicBlock* TopologicalSortIterator::Next(bool had_change) {
-  // Update changed: if had_changed is true, we remember it for the whole iteration.
-  changed_ |= had_change;
-
-  while (loop_head_stack_->size() != 0u &&
-      (*loop_ends_)[loop_head_stack_->back().first] == idx_) {
-    loop_head_stack_->pop_back();
-  }
-
-  if (idx_ == end_idx_) {
-    return nullptr;
-  }
-
-  // Get next block and return it.
-  BasicBlockId idx = idx_;
-  idx_ += 1;
-  BasicBlock* bb = mir_graph_->GetBasicBlock((*block_id_list_)[idx]);
-  DCHECK(bb != nullptr);
-  if ((*loop_ends_)[idx] != 0u) {
-    loop_head_stack_->push_back(std::make_pair(idx, false));  // Not recalculating.
-  }
-  return bb;
-}
-
-inline BasicBlock* LoopRepeatingTopologicalSortIterator::Next(bool had_change) {
-  if (idx_ != 0) {
-    // Mark last processed block visited.
-    BasicBlock* bb = mir_graph_->GetBasicBlock((*block_id_list_)[idx_ - 1]);
-    bb->visited = true;
-    if (had_change) {
-      // If we had a change we need to revisit the children.
-      ChildBlockIterator iter(bb, mir_graph_);
-      for (BasicBlock* child_bb = iter.Next(); child_bb != nullptr; child_bb = iter.Next()) {
-        child_bb->visited = false;
-      }
-    }
-  }
-
-  while (true) {
-    // Pop loops we have left and check if we need to recalculate one of them.
-    // NOTE: We need to do this even if idx_ == end_idx_.
-    while (loop_head_stack_->size() != 0u &&
-        (*loop_ends_)[loop_head_stack_->back().first] == idx_) {
-      auto top = loop_head_stack_->back();
-      uint16_t loop_head_idx = top.first;
-      bool recalculated = top.second;
-      loop_head_stack_->pop_back();
-      BasicBlock* loop_head = mir_graph_->GetBasicBlock((*block_id_list_)[loop_head_idx]);
-      DCHECK(loop_head != nullptr);
-      if (!recalculated || !loop_head->visited) {
-        // Recalculating this loop.
-        loop_head_stack_->push_back(std::make_pair(loop_head_idx, true));
-        idx_ = loop_head_idx + 1;
-        return loop_head;
-      }
-    }
-
-    if (idx_ == end_idx_) {
-      return nullptr;
-    }
-
-    // Get next block and return it if unvisited.
-    BasicBlockId idx = idx_;
-    idx_ += 1;
-    BasicBlock* bb = mir_graph_->GetBasicBlock((*block_id_list_)[idx]);
-    DCHECK(bb != nullptr);
-    if ((*loop_ends_)[idx] != 0u) {
-      // If bb->visited is false, the loop needs to be processed from scratch.
-      // Otherwise we mark it as recalculating; for a natural loop we will not
-      // need to recalculate any block in the loop anyway, and for unnatural
-      // loops we will recalculate the loop head only if one of its predecessors
-      // actually changes.
-      bool recalculating = bb->visited;
-      loop_head_stack_->push_back(std::make_pair(idx, recalculating));
-    }
-    if (!bb->visited) {
-      return bb;
-    }
-  }
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_DATAFLOW_ITERATOR_INL_H_
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
deleted file mode 100644
index 097c2a4..0000000
--- a/compiler/dex/dataflow_iterator.h
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_DATAFLOW_ITERATOR_H_
-#define ART_COMPILER_DEX_DATAFLOW_ITERATOR_H_
-
-#include "base/logging.h"
-#include "mir_graph.h"
-
-namespace art {
-
-  /*
-   * This class supports iterating over lists of basic blocks in various
-   * interesting orders.  Note that for efficiency, the visit orders have been pre-computed.
-   * The order itself will not change during the iteration.  However, for some uses,
-   * auxiliary data associated with the basic blocks may be changed during the iteration,
-   * necessitating another pass over the list.  If this behavior is required, use the
-   * "Repeating" variant.  For the repeating variant, the caller must tell the iterator
-   * whether a change has been made that necessitates another pass.  Note that calling Next(true)
-   * does not affect the iteration order or short-circuit the current pass - it simply tells
-   * the iterator that once it has finished walking through the block list it should reset and
-   * do another full pass through the list.
-   */
-  /**
-   * @class DataflowIterator
-   * @brief The main iterator class, all other iterators derive of this one to define an iteration order.
-   */
-  class DataflowIterator {
-    public:
-      virtual ~DataflowIterator() {}
-
-      /**
-       * @brief How many times have we repeated the iterator across the BasicBlocks?
-       * @return the number of iteration repetitions.
-       */
-      int32_t GetRepeatCount() { return repeats_; }
-
-      /**
-       * @brief Has the user of the iterator reported a change yet?
-       * @details Does not mean there was or not a change, it is only whether the user passed a true to the Next function call.
-       * @return whether the user of the iterator reported a change yet.
-       */
-      int32_t GetChanged() { return changed_; }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) = 0;
-
-    protected:
-      /**
-       * @param mir_graph the MIRGraph we are interested in.
-       * @param start_idx the first index we want to iterate across.
-       * @param end_idx the last index we want to iterate (not included).
-       */
-      DataflowIterator(MIRGraph* mir_graph, int32_t start_idx, int32_t end_idx)
-          : mir_graph_(mir_graph),
-            start_idx_(start_idx),
-            end_idx_(end_idx),
-            block_id_list_(nullptr),
-            idx_(0),
-            repeats_(0),
-            changed_(false) {}
-
-      /**
-       * @brief Get the next BasicBlock iterating forward.
-       * @return the next BasicBlock iterating forward.
-       */
-      virtual BasicBlock* ForwardSingleNext() ALWAYS_INLINE;
-
-      /**
-       * @brief Get the next BasicBlock iterating backward.
-       * @return the next BasicBlock iterating backward.
-       */
-      virtual BasicBlock* ReverseSingleNext() ALWAYS_INLINE;
-
-      /**
-       * @brief Get the next BasicBlock iterating forward, restart if a BasicBlock was reported changed during the last iteration.
-       * @return the next BasicBlock iterating forward, with chance of repeating the iteration.
-       */
-      virtual BasicBlock* ForwardRepeatNext() ALWAYS_INLINE;
-
-      /**
-       * @brief Get the next BasicBlock iterating backward, restart if a BasicBlock was reported changed during the last iteration.
-       * @return the next BasicBlock iterating backward, with chance of repeating the iteration.
-       */
-      virtual BasicBlock* ReverseRepeatNext() ALWAYS_INLINE;
-
-      MIRGraph* const mir_graph_;                       /**< @brief the MIRGraph */
-      const int32_t start_idx_;                         /**< @brief the start index for the iteration */
-      const int32_t end_idx_;                           /**< @brief the last index for the iteration */
-      const ArenaVector<BasicBlockId>* block_id_list_;  /**< @brief the list of BasicBlocks we want to iterate on */
-      int32_t idx_;                                     /**< @brief Current index for the iterator */
-      int32_t repeats_;                                 /**< @brief Number of repeats over the iteration */
-      bool changed_;                                    /**< @brief Has something changed during the current iteration? */
-  };  // DataflowIterator
-
-  /**
-   * @class PreOrderDfsIterator
-   * @brief Used to perform a Pre-order Depth-First-Search Iteration of a MIRGraph.
-   */
-  class PreOrderDfsIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit PreOrderDfsIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
-        // Extra setup for the PreOrderDfsIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDfsOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ForwardSingleNext();
-      }
-  };
-
-  /**
-   * @class RepeatingPreOrderDfsIterator
-   * @brief Used to perform a Repeating Pre-order Depth-First-Search Iteration of a MIRGraph.
-   * @details If there is a change during an iteration, the iteration starts over at the end of the iteration.
-   */
-  class RepeatingPreOrderDfsIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit RepeatingPreOrderDfsIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
-        // Extra setup for the RepeatingPreOrderDfsIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDfsOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ForwardRepeatNext();
-      }
-  };
-
-  /**
-   * @class RepeatingPostOrderDfsIterator
-   * @brief Used to perform a Repeating Post-order Depth-First-Search Iteration of a MIRGraph.
-   * @details If there is a change during an iteration, the iteration starts over at the end of the iteration.
-   */
-  class RepeatingPostOrderDfsIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit RepeatingPostOrderDfsIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
-        // Extra setup for the RepeatingPostOrderDfsIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDfsPostOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ForwardRepeatNext();
-      }
-  };
-
-  /**
-   * @class ReversePostOrderDfsIterator
-   * @brief Used to perform a Reverse Post-order Depth-First-Search Iteration of a MIRGraph.
-   */
-  class ReversePostOrderDfsIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit ReversePostOrderDfsIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
-        // Extra setup for the ReversePostOrderDfsIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDfsPostOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ReverseSingleNext();
-      }
-  };
-
-  /**
-   * @class ReversePostOrderDfsIterator
-   * @brief Used to perform a Repeating Reverse Post-order Depth-First-Search Iteration of a MIRGraph.
-   * @details If there is a change during an iteration, the iteration starts over at the end of the iteration.
-   */
-  class RepeatingReversePostOrderDfsIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit RepeatingReversePostOrderDfsIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, mir_graph->GetNumReachableBlocks() -1, 0) {
-        // Extra setup for the RepeatingReversePostOrderDfsIterator
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDfsPostOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ReverseRepeatNext();
-      }
-  };
-
-  /**
-   * @class PostOrderDOMIterator
-   * @brief Used to perform a Post-order Domination Iteration of a MIRGraph.
-   */
-  class PostOrderDOMIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit PostOrderDOMIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetNumReachableBlocks()) {
-        // Extra setup for thePostOrderDOMIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetDomPostOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) {
-        // Update changed: if had_changed is true, we remember it for the whole iteration.
-        changed_ |= had_change;
-
-        return ForwardSingleNext();
-      }
-  };
-
-  /**
-   * @class AllNodesIterator
-   * @brief Used to perform an iteration on all the BasicBlocks a MIRGraph.
-   */
-  class AllNodesIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit AllNodesIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetBlockList().size()) {
-      }
-
-      /**
-       * @brief Resetting the iterator.
-       */
-      void Reset() {
-        idx_ = 0;
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) ALWAYS_INLINE;
-  };
-
-  /**
-   * @class TopologicalSortIterator
-   * @brief Used to perform a Topological Sort Iteration of a MIRGraph.
-   */
-  class TopologicalSortIterator : public DataflowIterator {
-    public:
-      /**
-       * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-       * @param mir_graph The MIRGraph considered.
-       */
-      explicit TopologicalSortIterator(MIRGraph* mir_graph)
-          : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder().size()),
-            loop_ends_(&mir_graph->GetTopologicalSortOrderLoopEnds()),
-            loop_head_stack_(mir_graph_->GetTopologicalSortOrderLoopHeadStack()) {
-        // Extra setup for TopologicalSortIterator.
-        idx_ = start_idx_;
-        block_id_list_ = &mir_graph->GetTopologicalSortOrder();
-      }
-
-      /**
-       * @brief Get the next BasicBlock depending on iteration order.
-       * @param had_change did the user of the iteration change the previous BasicBlock.
-       * @return the next BasicBlock following the iteration order, 0 if finished.
-       */
-      virtual BasicBlock* Next(bool had_change = false) OVERRIDE;
-
-    private:
-     const ArenaVector<BasicBlockId>* const loop_ends_;
-     ArenaVector<std::pair<uint16_t, bool>>* const loop_head_stack_;
-  };
-
-  /**
-   * @class LoopRepeatingTopologicalSortIterator
-   * @brief Used to perform a Topological Sort Iteration of a MIRGraph, repeating loops as needed.
-   * @details The iterator uses the visited flags to keep track of the blocks that need
-   * recalculation and keeps a stack of loop heads in the MIRGraph. At the end of the loop
-   * it returns back to the loop head if it needs to be recalculated. Due to the use of
-   * the visited flags and the loop head stack in the MIRGraph, it's not possible to use
-   * two iterators at the same time or modify this data during iteration (though inspection
-   * of this data is allowed and sometimes even expected).
-   *
-   * NOTE: This iterator is not suitable for passes that need to propagate changes to
-   * predecessors, such as type inferrence.
-   */
-  class LoopRepeatingTopologicalSortIterator : public DataflowIterator {
-    public:
-     /**
-      * @brief The constructor, using all of the reachable blocks of the MIRGraph.
-      * @param mir_graph The MIRGraph considered.
-      */
-     explicit LoopRepeatingTopologicalSortIterator(MIRGraph* mir_graph)
-         : DataflowIterator(mir_graph, 0, mir_graph->GetTopologicalSortOrder().size()),
-           loop_ends_(&mir_graph->GetTopologicalSortOrderLoopEnds()),
-           loop_head_stack_(mir_graph_->GetTopologicalSortOrderLoopHeadStack()) {
-       // Extra setup for RepeatingTopologicalSortIterator.
-       idx_ = start_idx_;
-       block_id_list_ = &mir_graph->GetTopologicalSortOrder();
-       // Clear visited flags and check that the loop head stack is empty.
-       mir_graph->ClearAllVisitedFlags();
-       DCHECK_EQ(loop_head_stack_->size(), 0u);
-     }
-
-     ~LoopRepeatingTopologicalSortIterator() {
-       DCHECK_EQ(loop_head_stack_->size(), 0u);
-     }
-
-     /**
-      * @brief Get the next BasicBlock depending on iteration order.
-      * @param had_change did the user of the iteration change the previous BasicBlock.
-      * @return the next BasicBlock following the iteration order, 0 if finished.
-      */
-     virtual BasicBlock* Next(bool had_change = false) OVERRIDE;
-
-    private:
-     const ArenaVector<BasicBlockId>* const loop_ends_;
-     ArenaVector<std::pair<uint16_t, bool>>* const loop_head_stack_;
-  };
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_DATAFLOW_ITERATOR_H_
diff --git a/compiler/dex/dex_flags.h b/compiler/dex/dex_flags.h
deleted file mode 100644
index e8eb40c..0000000
--- a/compiler/dex/dex_flags.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_DEX_FLAGS_H_
-#define ART_COMPILER_DEX_DEX_FLAGS_H_
-
-namespace art {
-
-// Suppress optimization if corresponding bit set.
-enum OptControlVector {
-  kLoadStoreElimination = 0,
-  kLoadHoisting,
-  kSuppressLoads,
-  kNullCheckElimination,
-  kClassInitCheckElimination,
-  kGlobalValueNumbering,
-  kGvnDeadCodeElimination,
-  kLocalValueNumbering,
-  kPromoteRegs,
-  kTrackLiveTemps,
-  kSafeOptimizations,
-  kBBOpt,
-  kSuspendCheckElimination,
-  kMatch,
-  kPromoteCompilerTemps,
-  kBranchFusing,
-  kSuppressExceptionEdges,
-  kSuppressMethodInlining,
-};
-
-// Force code generation paths for testing.
-enum DebugControlVector {
-  kDebugVerbose,
-  kDebugDumpCFG,
-  kDebugSlowFieldPath,
-  kDebugSlowInvokePath,
-  kDebugSlowStringPath,
-  kDebugSlowTypePath,
-  kDebugSlowestFieldPath,
-  kDebugSlowestStringPath,
-  kDebugExerciseResolveMethod,
-  kDebugVerifyDataflow,
-  kDebugShowMemoryUsage,
-  kDebugShowNops,
-  kDebugCountOpcodes,
-  kDebugDumpCheckStats,
-  kDebugShowSummaryMemoryUsage,
-  kDebugShowFilterStats,
-  kDebugTimings,
-  kDebugCodegenDump
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_DEX_FLAGS_H_
diff --git a/compiler/dex/dex_types.h b/compiler/dex/dex_types.h
deleted file mode 100644
index f485c1c..0000000
--- a/compiler/dex/dex_types.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_DEX_TYPES_H_
-#define ART_COMPILER_DEX_DEX_TYPES_H_
-
-namespace art {
-
-typedef uint32_t DexOffset;          // Dex offset in code units.
-typedef uint16_t NarrowDexOffset;    // For use in structs, Dex offsets range from 0 .. 0xffff.
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_DEX_TYPES_H_
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
deleted file mode 100644
index 94ba4fa..0000000
--- a/compiler/dex/global_value_numbering.cc
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "global_value_numbering.h"
-
-#include "base/bit_vector-inl.h"
-#include "base/stl_util.h"
-#include "local_value_numbering.h"
-
-namespace art {
-
-GlobalValueNumbering::GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator,
-                                           Mode mode)
-    : cu_(cu),
-      mir_graph_(cu->mir_graph.get()),
-      allocator_(allocator),
-      bbs_processed_(0u),
-      max_bbs_to_process_(kMaxBbsToProcessMultiplyFactor * mir_graph_->GetNumReachableBlocks()),
-      last_value_(kNullValue),
-      modifications_allowed_(true),
-      mode_(mode),
-      global_value_map_(std::less<uint64_t>(), allocator->Adapter()),
-      array_location_map_(ArrayLocationComparator(), allocator->Adapter()),
-      array_location_reverse_map_(allocator->Adapter()),
-      ref_set_map_(std::less<ValueNameSet>(), allocator->Adapter()),
-      lvns_(mir_graph_->GetNumBlocks(), nullptr, allocator->Adapter()),
-      work_lvn_(nullptr),
-      merge_lvns_(allocator->Adapter()) {
-}
-
-GlobalValueNumbering::~GlobalValueNumbering() {
-  STLDeleteElements(&lvns_);
-}
-
-LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
-                                                             ScopedArenaAllocator* allocator) {
-  if (UNLIKELY(!Good())) {
-    return nullptr;
-  }
-  if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock) {
-    DCHECK(bb->first_mir_insn == nullptr);
-    return nullptr;
-  }
-  if (mode_ == kModeGvn && UNLIKELY(bbs_processed_ == max_bbs_to_process_)) {
-    // If we're still trying to converge, stop now. Otherwise, proceed to apply optimizations.
-    last_value_ = kNoValue;  // Make bad.
-    return nullptr;
-  }
-  if (mode_ == kModeGvnPostProcessing &&
-    mir_graph_->GetTopologicalSortOrderLoopHeadStack()->empty()) {
-    // Modifications outside loops are performed during the main phase.
-    return nullptr;
-  }
-  if (allocator == nullptr) {
-    allocator = allocator_;
-  }
-  DCHECK(work_lvn_.get() == nullptr);
-  work_lvn_.reset(new (allocator) LocalValueNumbering(this, bb->id, allocator));
-  if (bb->block_type == kEntryBlock) {
-    work_lvn_->PrepareEntryBlock();
-    DCHECK(bb->first_mir_insn == nullptr);  // modifications_allowed_ is irrelevant.
-  } else {
-    // To avoid repeated allocation on the ArenaStack, reuse a single vector kept as a member.
-    DCHECK(merge_lvns_.empty());
-    // If we're running the full GVN, the RepeatingTopologicalSortIterator keeps the loop
-    // head stack in the MIRGraph up to date and for a loop head we need to check whether
-    // we're making the initial computation and need to merge only preceding blocks in the
-    // topological order, or we're recalculating a loop head and need to merge all incoming
-    // LVNs. When we're not at a loop head (including having an empty loop head stack) all
-    // predecessors should be preceding blocks and we shall merge all of them anyway.
-    bool use_all_predecessors = true;
-    uint16_t loop_head_idx = 0u;  // Used only if !use_all_predecessors.
-    if (mode_ == kModeGvn && mir_graph_->GetTopologicalSortOrderLoopHeadStack()->size() != 0) {
-      // Full GVN inside a loop, see if we're at the loop head for the first time.
-      modifications_allowed_ = false;
-      auto top = mir_graph_->GetTopologicalSortOrderLoopHeadStack()->back();
-      loop_head_idx = top.first;
-      bool recalculating = top.second;
-      use_all_predecessors = recalculating ||
-          loop_head_idx != mir_graph_->GetTopologicalSortOrderIndexes()[bb->id];
-    } else {
-      modifications_allowed_ = true;
-    }
-    for (BasicBlockId pred_id : bb->predecessors) {
-      DCHECK_NE(pred_id, NullBasicBlockId);
-      if (lvns_[pred_id] != nullptr &&
-          (use_all_predecessors ||
-              mir_graph_->GetTopologicalSortOrderIndexes()[pred_id] < loop_head_idx)) {
-        merge_lvns_.push_back(lvns_[pred_id]);
-      }
-    }
-    // Determine merge type.
-    LocalValueNumbering::MergeType merge_type = LocalValueNumbering::kNormalMerge;
-    if (bb->catch_entry) {
-      merge_type = LocalValueNumbering::kCatchMerge;
-    } else if (bb->last_mir_insn != nullptr &&
-        IsInstructionReturn(bb->last_mir_insn->dalvikInsn.opcode) &&
-        bb->GetFirstNonPhiInsn() == bb->last_mir_insn) {
-      merge_type = LocalValueNumbering::kReturnMerge;
-    }
-    // At least one predecessor must have been processed before this bb.
-    CHECK(!merge_lvns_.empty());
-    if (merge_lvns_.size() == 1u) {
-      work_lvn_->MergeOne(*merge_lvns_[0], merge_type);
-    } else {
-      work_lvn_->Merge(merge_type);
-    }
-  }
-  return work_lvn_.get();
-}
-
-bool GlobalValueNumbering::FinishBasicBlock(BasicBlock* bb) {
-  DCHECK(work_lvn_ != nullptr);
-  DCHECK_EQ(bb->id, work_lvn_->Id());
-  ++bbs_processed_;
-  merge_lvns_.clear();
-
-  bool change = false;
-  if (mode_ == kModeGvn) {
-    change = (lvns_[bb->id] == nullptr) || !lvns_[bb->id]->Equals(*work_lvn_);
-    // In GVN mode, keep the latest LVN even if Equals() indicates no change. This is
-    // to keep the correct values of fields that do not contribute to Equals() as long
-    // as they depend only on predecessor LVNs' fields that do contribute to Equals().
-    // Currently, that's LVN::merge_map_ used by LVN::GetStartingVregValueNumberImpl().
-    std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
-    lvns_[bb->id] = work_lvn_.release();
-  } else {
-    DCHECK_EQ(mode_, kModeGvnPostProcessing);  // kModeLvn doesn't use FinishBasicBlock().
-    DCHECK(lvns_[bb->id] != nullptr);
-    DCHECK(lvns_[bb->id]->Equals(*work_lvn_));
-    work_lvn_.reset();
-  }
-  return change;
-}
-
-uint16_t GlobalValueNumbering::GetArrayLocation(uint16_t base, uint16_t index) {
-  auto cmp = array_location_map_.key_comp();
-  ArrayLocation key = { base, index };
-  auto lb = array_location_map_.lower_bound(key);
-  if (lb != array_location_map_.end() && !cmp(key, lb->first)) {
-    return lb->second;
-  }
-  uint16_t location = static_cast<uint16_t>(array_location_reverse_map_.size());
-  DCHECK_EQ(location, array_location_reverse_map_.size());  // No overflow.
-  auto it = array_location_map_.PutBefore(lb, key, location);
-  array_location_reverse_map_.push_back(&*it);
-  return location;
-}
-
-bool GlobalValueNumbering::NullCheckedInAllPredecessors(
-    const ScopedArenaVector<uint16_t>& merge_names) const {
-  // Implicit parameters:
-  //   - *work_lvn_: the LVN for which we're checking predecessors.
-  //   - merge_lvns_: the predecessor LVNs.
-  DCHECK_EQ(merge_lvns_.size(), merge_names.size());
-  for (size_t i = 0, size = merge_lvns_.size(); i != size; ++i) {
-    const LocalValueNumbering* pred_lvn = merge_lvns_[i];
-    uint16_t value_name = merge_names[i];
-    if (!pred_lvn->IsValueNullChecked(value_name)) {
-      // Check if the predecessor has an IF_EQZ/IF_NEZ as the last insn.
-      const BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_lvn->Id());
-      if (!HasNullCheckLastInsn(pred_bb, work_lvn_->Id())) {
-        return false;
-      }
-      // IF_EQZ/IF_NEZ checks some sreg, see if that sreg contains the value_name.
-      int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
-      if (pred_lvn->GetSregValue(s_reg) != value_name) {
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
-bool GlobalValueNumbering::DivZeroCheckedInAllPredecessors(
-    const ScopedArenaVector<uint16_t>& merge_names) const {
-  // Implicit parameters:
-  //   - *work_lvn_: the LVN for which we're checking predecessors.
-  //   - merge_lvns_: the predecessor LVNs.
-  DCHECK_EQ(merge_lvns_.size(), merge_names.size());
-  for (size_t i = 0, size = merge_lvns_.size(); i != size; ++i) {
-    const LocalValueNumbering* pred_lvn = merge_lvns_[i];
-    uint16_t value_name = merge_names[i];
-    if (!pred_lvn->IsValueDivZeroChecked(value_name)) {
-      return false;
-    }
-  }
-  return true;
-}
-
-bool GlobalValueNumbering::IsBlockEnteredOnTrue(uint16_t cond, BasicBlockId bb_id) {
-  DCHECK_NE(cond, kNoValue);
-  BasicBlock* bb = mir_graph_->GetBasicBlock(bb_id);
-  if (bb->predecessors.size() == 1u) {
-    BasicBlockId pred_id = bb->predecessors[0];
-    BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
-    if (pred_bb->BranchesToSuccessorOnlyIfNotZero(bb_id)) {
-      DCHECK(lvns_[pred_id] != nullptr);
-      uint16_t operand = lvns_[pred_id]->GetSregValue(pred_bb->last_mir_insn->ssa_rep->uses[0]);
-      if (operand == cond) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-bool GlobalValueNumbering::IsTrueInBlock(uint16_t cond, BasicBlockId bb_id) {
-  // We're not doing proper value propagation, so just see if the condition is used
-  // with if-nez/if-eqz to branch/fall-through to this bb or one of its dominators.
-  DCHECK_NE(cond, kNoValue);
-  if (IsBlockEnteredOnTrue(cond, bb_id)) {
-    return true;
-  }
-  BasicBlock* bb = mir_graph_->GetBasicBlock(bb_id);
-  for (uint32_t dom_id : bb->dominators->Indexes()) {
-    if (IsBlockEnteredOnTrue(cond, dom_id)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-}  // namespace art
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
deleted file mode 100644
index c514f75..0000000
--- a/compiler/dex/global_value_numbering.h
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_GLOBAL_VALUE_NUMBERING_H_
-#define ART_COMPILER_DEX_GLOBAL_VALUE_NUMBERING_H_
-
-#include "base/arena_object.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "mir_graph.h"
-#include "compiler_ir.h"
-#include "dex_flags.h"
-
-namespace art {
-
-class LocalValueNumbering;
-class MirFieldInfo;
-
-class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
- public:
-  static constexpr uint16_t kNoValue = 0xffffu;
-  static constexpr uint16_t kNullValue = 1u;
-
-  enum Mode {
-    kModeGvn,
-    kModeGvnPostProcessing,
-    kModeLvn
-  };
-
-  static bool Skip(CompilationUnit* cu) {
-    return (cu->disable_opt & (1u << kGlobalValueNumbering)) != 0u ||
-        cu->mir_graph->GetMaxNestedLoops() > kMaxAllowedNestedLoops;
-  }
-
-  // Instance and static field id map is held by MIRGraph to avoid multiple recalculations
-  // when doing LVN.
-  template <typename Container>  // Container of MirIFieldLoweringInfo or MirSFieldLoweringInfo.
-  static uint16_t* PrepareGvnFieldIds(ScopedArenaAllocator* allocator,
-                                      const Container& field_infos);
-
-  GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator, Mode mode);
-  ~GlobalValueNumbering();
-
-  CompilationUnit* GetCompilationUnit() const {
-    return cu_;
-  }
-
-  MIRGraph* GetMirGraph() const {
-    return mir_graph_;
-  }
-
-  // Prepare LVN for the basic block.
-  LocalValueNumbering* PrepareBasicBlock(BasicBlock* bb,
-                                         ScopedArenaAllocator* allocator = nullptr);
-
-  // Finish processing the basic block.
-  bool FinishBasicBlock(BasicBlock* bb);
-
-  // Checks that the value names didn't overflow.
-  bool Good() const {
-    return last_value_ < kNoValue;
-  }
-
-  // Allow modifications.
-  void StartPostProcessing();
-
-  bool CanModify() const {
-    return modifications_allowed_ && Good();
-  }
-
-  // Retrieve the LVN with GVN results for a given BasicBlock.
-  const LocalValueNumbering* GetLvn(BasicBlockId bb_id) const;
-
- private:
-  // Allocate a new value name.
-  uint16_t NewValueName();
-
-  // Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
-  typedef ScopedArenaSafeMap<uint64_t, uint16_t> ValueMap;
-
-  static uint64_t BuildKey(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
-    return (static_cast<uint64_t>(op) << 48 | static_cast<uint64_t>(operand1) << 32 |
-            static_cast<uint64_t>(operand2) << 16 | static_cast<uint64_t>(modifier));
-  }
-
-  // Look up a value in the global value map, adding a new entry if there was none before.
-  uint16_t LookupValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
-    uint16_t res;
-    uint64_t key = BuildKey(op, operand1, operand2, modifier);
-    auto lb = global_value_map_.lower_bound(key);
-    if (lb != global_value_map_.end() && lb->first == key) {
-      res = lb->second;
-    } else {
-      res = NewValueName();
-      global_value_map_.PutBefore(lb, key, res);
-    }
-    return res;
-  }
-
-  // Look up a value in the global value map, don't add a new entry if there was none before.
-  uint16_t FindValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) const {
-    uint16_t res;
-    uint64_t key = BuildKey(op, operand1, operand2, modifier);
-    auto lb = global_value_map_.lower_bound(key);
-    if (lb != global_value_map_.end() && lb->first == key) {
-      res = lb->second;
-    } else {
-      res = kNoValue;
-    }
-    return res;
-  }
-
-  // Get an instance field id.
-  uint16_t GetIFieldId(MIR* mir) {
-    return GetMirGraph()->GetGvnIFieldId(mir);
-  }
-
-  // Get a static field id.
-  uint16_t GetSFieldId(MIR* mir) {
-    return GetMirGraph()->GetGvnSFieldId(mir);
-  }
-
-  // Get an instance field type based on field id.
-  uint16_t GetIFieldType(uint16_t field_id) {
-    return static_cast<uint16_t>(GetMirGraph()->GetIFieldLoweringInfo(field_id).MemAccessType());
-  }
-
-  // Get a static field type based on field id.
-  uint16_t GetSFieldType(uint16_t field_id) {
-    return static_cast<uint16_t>(GetMirGraph()->GetSFieldLoweringInfo(field_id).MemAccessType());
-  }
-
-  struct ArrayLocation {
-    uint16_t base;
-    uint16_t index;
-  };
-
-  struct ArrayLocationComparator {
-    bool operator()(const ArrayLocation& lhs, const ArrayLocation& rhs) const {
-      if (lhs.base != rhs.base) {
-        return lhs.base < rhs.base;
-      }
-      return lhs.index < rhs.index;
-    }
-  };
-
-  typedef ScopedArenaSafeMap<ArrayLocation, uint16_t, ArrayLocationComparator> ArrayLocationMap;
-
-  // Get an array location.
-  uint16_t GetArrayLocation(uint16_t base, uint16_t index);
-
-  // Get the array base from an array location.
-  uint16_t GetArrayLocationBase(uint16_t location) const {
-    return array_location_reverse_map_[location]->first.base;
-  }
-
-  // Get the array index from an array location.
-  uint16_t GetArrayLocationIndex(uint16_t location) const {
-    return array_location_reverse_map_[location]->first.index;
-  }
-
-  // A set of value names.
-  typedef ScopedArenaSet<uint16_t> ValueNameSet;
-
-  // A map from a set of references to the set id.
-  typedef ScopedArenaSafeMap<ValueNameSet, uint16_t> RefSetIdMap;
-
-  uint16_t GetRefSetId(const ValueNameSet& ref_set) {
-    uint16_t res = kNoValue;
-    auto lb = ref_set_map_.lower_bound(ref_set);
-    if (lb != ref_set_map_.end() && !ref_set_map_.key_comp()(ref_set, lb->first)) {
-      res = lb->second;
-    } else {
-      res = NewValueName();
-      ref_set_map_.PutBefore(lb, ref_set, res);
-    }
-    return res;
-  }
-
-  const BasicBlock* GetBasicBlock(uint16_t bb_id) const {
-    return mir_graph_->GetBasicBlock(bb_id);
-  }
-
-  static bool HasNullCheckLastInsn(const BasicBlock* pred_bb, BasicBlockId succ_id) {
-    return pred_bb->BranchesToSuccessorOnlyIfNotZero(succ_id);
-  }
-
-  bool NullCheckedInAllPredecessors(const ScopedArenaVector<uint16_t>& merge_names) const;
-
-  bool DivZeroCheckedInAllPredecessors(const ScopedArenaVector<uint16_t>& merge_names) const;
-
-  bool IsBlockEnteredOnTrue(uint16_t cond, BasicBlockId bb_id);
-  bool IsTrueInBlock(uint16_t cond, BasicBlockId bb_id);
-
-  ScopedArenaAllocator* Allocator() const {
-    return allocator_;
-  }
-
-  CompilationUnit* const cu_;
-  MIRGraph* const mir_graph_;
-  ScopedArenaAllocator* const allocator_;
-
-  // The maximum number of nested loops that we accept for GVN.
-  static constexpr size_t kMaxAllowedNestedLoops = 6u;
-
-  // The number of BBs that we need to process grows exponentially with the number
-  // of nested loops. Don't allow excessive processing for too many nested loops or
-  // otherwise expensive methods.
-  static constexpr uint32_t kMaxBbsToProcessMultiplyFactor = 20u;
-
-  uint32_t bbs_processed_;
-  uint32_t max_bbs_to_process_;  // Doesn't apply after the main GVN has converged.
-
-  // We have 32-bit last_value_ so that we can detect when we run out of value names, see Good().
-  // We usually don't check Good() until the end of LVN unless we're about to modify code.
-  uint32_t last_value_;
-
-  // Marks whether code modifications are allowed. The initial GVN is done without code
-  // modifications to settle the value names. Afterwards, we allow modifications and rerun
-  // LVN once for each BasicBlock.
-  bool modifications_allowed_;
-
-  // Specifies the mode of operation.
-  Mode mode_;
-
-  ValueMap global_value_map_;
-  ArrayLocationMap array_location_map_;
-  ScopedArenaVector<const ArrayLocationMap::value_type*> array_location_reverse_map_;
-  RefSetIdMap ref_set_map_;
-
-  ScopedArenaVector<const LocalValueNumbering*> lvns_;        // Owning.
-  std::unique_ptr<LocalValueNumbering> work_lvn_;
-  ScopedArenaVector<const LocalValueNumbering*> merge_lvns_;  // Not owning.
-
-  friend class LocalValueNumbering;
-  friend class GlobalValueNumberingTest;
-
-  DISALLOW_COPY_AND_ASSIGN(GlobalValueNumbering);
-};
-std::ostream& operator<<(std::ostream& os, const GlobalValueNumbering::Mode& rhs);
-
-inline const LocalValueNumbering* GlobalValueNumbering::GetLvn(BasicBlockId bb_id) const {
-  DCHECK_EQ(mode_, kModeGvnPostProcessing);
-  DCHECK_LT(bb_id, lvns_.size());
-  DCHECK(lvns_[bb_id] != nullptr);
-  return lvns_[bb_id];
-}
-
-inline void GlobalValueNumbering::StartPostProcessing() {
-  DCHECK(Good());
-  DCHECK_EQ(mode_, kModeGvn);
-  mode_ = kModeGvnPostProcessing;
-}
-
-inline uint16_t GlobalValueNumbering::NewValueName() {
-  DCHECK_NE(mode_, kModeGvnPostProcessing);
-  ++last_value_;
-  return last_value_;
-}
-
-template <typename Container>  // Container of MirIFieldLoweringInfo or MirSFieldLoweringInfo.
-uint16_t* GlobalValueNumbering::PrepareGvnFieldIds(ScopedArenaAllocator* allocator,
-                                                   const Container& field_infos) {
-  size_t size = field_infos.size();
-  uint16_t* field_ids = allocator->AllocArray<uint16_t>(size, kArenaAllocMisc);
-  for (size_t i = 0u; i != size; ++i) {
-    size_t idx = i;
-    const MirFieldInfo& cur_info = field_infos[i];
-    if (cur_info.IsResolved()) {
-      for (size_t j = 0; j != i; ++j) {
-        const MirFieldInfo& prev_info = field_infos[j];
-        if (prev_info.IsResolved() &&
-            prev_info.DeclaringDexFile() == cur_info.DeclaringDexFile() &&
-            prev_info.DeclaringFieldIndex() == cur_info.DeclaringFieldIndex()) {
-          DCHECK_EQ(cur_info.MemAccessType(), prev_info.MemAccessType());
-          idx = j;
-          break;
-        }
-      }
-    }
-    field_ids[i] = idx;
-  }
-  return field_ids;
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_GLOBAL_VALUE_NUMBERING_H_
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
deleted file mode 100644
index 7d647e5..0000000
--- a/compiler/dex/global_value_numbering_test.cc
+++ /dev/null
@@ -1,2428 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "dataflow_iterator-inl.h"
-#include "dex/mir_field_info.h"
-#include "global_value_numbering.h"
-#include "local_value_numbering.h"
-#include "gtest/gtest.h"
-
-namespace art {
-
-class GlobalValueNumberingTest : public testing::Test {
- protected:
-  static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
-
-  struct IFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct SFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct BBDef {
-    static constexpr size_t kMaxSuccessors = 4;
-    static constexpr size_t kMaxPredecessors = 4;
-
-    BBType type;
-    size_t num_successors;
-    BasicBlockId successors[kMaxPredecessors];
-    size_t num_predecessors;
-    BasicBlockId predecessors[kMaxPredecessors];
-  };
-
-  struct MIRDef {
-    static constexpr size_t kMaxSsaDefs = 2;
-    static constexpr size_t kMaxSsaUses = 4;
-
-    BasicBlockId bbid;
-    Instruction::Code opcode;
-    int64_t value;
-    uint32_t field_info;
-    size_t num_uses;
-    int32_t uses[kMaxSsaUses];
-    size_t num_defs;
-    int32_t defs[kMaxSsaDefs];
-  };
-
-#define DEF_SUCC0() \
-    0u, { }
-#define DEF_SUCC1(s1) \
-    1u, { s1 }
-#define DEF_SUCC2(s1, s2) \
-    2u, { s1, s2 }
-#define DEF_SUCC3(s1, s2, s3) \
-    3u, { s1, s2, s3 }
-#define DEF_SUCC4(s1, s2, s3, s4) \
-    4u, { s1, s2, s3, s4 }
-#define DEF_PRED0() \
-    0u, { }
-#define DEF_PRED1(p1) \
-    1u, { p1 }
-#define DEF_PRED2(p1, p2) \
-    2u, { p1, p2 }
-#define DEF_PRED3(p1, p2, p3) \
-    3u, { p1, p2, p3 }
-#define DEF_PRED4(p1, p2, p3, p4) \
-    4u, { p1, p2, p3, p4 }
-#define DEF_BB(type, succ, pred) \
-    { type, succ, pred }
-
-#define DEF_CONST(bb, opcode, reg, value) \
-    { bb, opcode, value, 0u, 0, { }, 1, { reg } }
-#define DEF_CONST_WIDE(bb, opcode, reg, value) \
-    { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_CONST_STRING(bb, opcode, reg, index) \
-    { bb, opcode, index, 0u, 0, { }, 1, { reg } }
-#define DEF_IGET(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
-#define DEF_AGET(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
-#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
-#define DEF_APUT(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
-#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
-#define DEF_INVOKE1(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_UNIQUE_REF(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 0, { }, 1, { reg } }  // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
-#define DEF_IFZ(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_MOVE(bb, opcode, reg, src) \
-    { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
-#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
-    { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
-#define DEF_PHI2(bb, reg, src1, src2) \
-    { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
-#define DEF_BINOP(bb, opcode, result, src1, src2) \
-    { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
-#define DEF_UNOP(bb, opcode, result, src) DEF_MOVE(bb, opcode, result, src)
-
-  void DoPrepareIFields(const IFieldDef* defs, size_t count) {
-    cu_.mir_graph->ifield_lowering_infos_.clear();
-    cu_.mir_graph->ifield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
-      }
-      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareIFields(const IFieldDef (&defs)[count]) {
-    DoPrepareIFields(defs, count);
-  }
-
-  void DoPrepareSFields(const SFieldDef* defs, size_t count) {
-    cu_.mir_graph->sfield_lowering_infos_.clear();
-    cu_.mir_graph->sfield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const SFieldDef* def = &defs[i];
-      MirSFieldLoweringInfo field_info(def->field_idx, def->type);
-      // Mark even unresolved fields as initialized.
-      field_info.flags_ |= MirSFieldLoweringInfo::kFlagClassIsInitialized;
-      // NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by GVN.
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
-      }
-      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareSFields(const SFieldDef (&defs)[count]) {
-    DoPrepareSFields(defs, count);
-  }
-
-  void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
-    cu_.mir_graph->block_id_map_.clear();
-    cu_.mir_graph->block_list_.clear();
-    ASSERT_LT(3u, count);  // null, entry, exit and at least one bytecode block.
-    ASSERT_EQ(kNullBlock, defs[0].type);
-    ASSERT_EQ(kEntryBlock, defs[1].type);
-    ASSERT_EQ(kExitBlock, defs[2].type);
-    for (size_t i = 0u; i != count; ++i) {
-      const BBDef* def = &defs[i];
-      BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
-      if (def->num_successors <= 2) {
-        bb->successor_block_list_type = kNotUsed;
-        bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
-        bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
-      } else {
-        bb->successor_block_list_type = kPackedSwitch;
-        bb->fall_through = 0u;
-        bb->taken = 0u;
-        bb->successor_blocks.reserve(def->num_successors);
-        for (size_t j = 0u; j != def->num_successors; ++j) {
-          SuccessorBlockInfo* successor_block_info =
-              static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
-                                                               kArenaAllocSuccessors));
-          successor_block_info->block = j;
-          successor_block_info->key = 0u;  // Not used by class init check elimination.
-          bb->successor_blocks.push_back(successor_block_info);
-        }
-      }
-      bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
-      if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
-        bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
-            cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
-        bb->data_flow_info->live_in_v = live_in_v_;
-      }
-    }
-    ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
-    cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
-    ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
-    cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
-    ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
-  }
-
-  template <size_t count>
-  void PrepareBasicBlocks(const BBDef (&defs)[count]) {
-    DoPrepareBasicBlocks(defs, count);
-  }
-
-  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
-    mir_count_ = count;
-    mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
-    ssa_reps_.resize(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const MIRDef* def = &defs[i];
-      MIR* mir = &mirs_[i];
-      ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
-      BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
-      bb->AppendMIR(mir);
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
-      mir->dalvikInsn.vB_wide = def->value;
-      if (IsInstructionIGetOrIPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
-        mir->meta.ifield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_info].MemAccessType(),
-                  IGetOrIPutMemAccessType(def->opcode));
-      } else if (IsInstructionSGetOrSPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
-        mir->meta.sfield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
-                  SGetOrSPutMemAccessType(def->opcode));
-      } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
-        mir->meta.phi_incoming =
-            allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
-        ASSERT_EQ(def->num_uses, bb->predecessors.size());
-        std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
-      }
-      mir->ssa_rep = &ssa_reps_[i];
-      mir->ssa_rep->num_uses = def->num_uses;
-      mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
-      mir->ssa_rep->num_defs = def->num_defs;
-      mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->offset = i;  // LVN uses offset only for debug output
-      mir->optimization_flags = 0u;
-    }
-    DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
-        cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
-    code_item->insns_size_in_code_units_ = 2u * count;
-    cu_.mir_graph->current_code_item_ = code_item;
-  }
-
-  template <size_t count>
-  void PrepareMIRs(const MIRDef (&defs)[count]) {
-    DoPrepareMIRs(defs, count);
-  }
-
-  void DoPrepareVregToSsaMapExit(BasicBlockId bb_id, const int32_t* map, size_t count) {
-    BasicBlock* bb = cu_.mir_graph->GetBasicBlock(bb_id);
-    ASSERT_TRUE(bb != nullptr);
-    ASSERT_TRUE(bb->data_flow_info != nullptr);
-    bb->data_flow_info->vreg_to_ssa_map_exit =
-        cu_.arena.AllocArray<int32_t>(count, kArenaAllocDFInfo);
-    std::copy_n(map, count, bb->data_flow_info->vreg_to_ssa_map_exit);
-  }
-
-  template <size_t count>
-  void PrepareVregToSsaMapExit(BasicBlockId bb_id, const int32_t (&map)[count]) {
-    DoPrepareVregToSsaMapExit(bb_id, map, count);
-  }
-
-  template <size_t count>
-  void MarkAsWideSRegs(const int32_t (&sregs)[count]) {
-    for (int32_t sreg : sregs) {
-      cu_.mir_graph->reg_location_[sreg].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].high_word = true;
-    }
-  }
-
-  void PerformGVN() {
-    DoPerformGVN<LoopRepeatingTopologicalSortIterator>();
-  }
-
-  void PerformPreOrderDfsGVN() {
-    DoPerformGVN<RepeatingPreOrderDfsIterator>();
-  }
-
-  template <typename IteratorType>
-  void DoPerformGVN() {
-    cu_.mir_graph->SSATransformationStart();
-    cu_.mir_graph->ComputeDFSOrders();
-    cu_.mir_graph->ComputeDominators();
-    cu_.mir_graph->ComputeTopologicalSortOrder();
-    cu_.mir_graph->SSATransformationEnd();
-    cu_.mir_graph->temp_.gvn.ifield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
-    cu_.mir_graph->temp_.gvn.sfield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
-    ASSERT_TRUE(gvn_ == nullptr);
-    gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
-                                                           GlobalValueNumbering::kModeGvn));
-    value_names_.resize(mir_count_, 0xffffu);
-    IteratorType iterator(cu_.mir_graph.get());
-    bool change = false;
-    for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
-      LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
-      if (lvn != nullptr) {
-        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-          value_names_[mir - mirs_] = lvn->GetValueNumber(mir);
-        }
-      }
-      change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
-      ASSERT_TRUE(gvn_->Good());
-    }
-  }
-
-  void PerformGVNCodeModifications() {
-    ASSERT_TRUE(gvn_ != nullptr);
-    ASSERT_TRUE(gvn_->Good());
-    gvn_->StartPostProcessing();
-    TopologicalSortIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
-      if (lvn != nullptr) {
-        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-          uint16_t value_name = lvn->GetValueNumber(mir);
-          ASSERT_EQ(value_name, value_names_[mir - mirs_]);
-        }
-      }
-      bool change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
-      ASSERT_FALSE(change);
-      ASSERT_TRUE(gvn_->Good());
-    }
-  }
-
-  GlobalValueNumberingTest()
-      : pool_(),
-        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
-        mir_count_(0u),
-        mirs_(nullptr),
-        ssa_reps_(),
-        allocator_(),
-        gvn_(),
-        value_names_(),
-        live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false)) {
-    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
-    cu_.access_flags = kAccStatic;  // Don't let "this" interfere with this test.
-    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
-    // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
-    // 0 constants are integral, not references, and the values are all narrow.
-    // Nothing else is used by LVN/GVN. Tests can override the default values as needed.
-    cu_.mir_graph->reg_location_ =
-        cu_.arena.AllocArray<RegLocation>(kMaxSsaRegs, kArenaAllocRegAlloc);
-    cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
-    // Bind all possible sregs to live vregs for test purposes.
-    live_in_v_->SetInitialBits(kMaxSsaRegs);
-    cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
-    cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
-    for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
-      cu_.mir_graph->ssa_base_vregs_.push_back(i);
-      cu_.mir_graph->ssa_subscripts_.push_back(0);
-    }
-    // Set shorty for a void-returning method without arguments.
-    cu_.shorty = "V";
-  }
-
-  static constexpr size_t kMaxSsaRegs = 16384u;
-
-  ArenaPool pool_;
-  CompilationUnit cu_;
-  size_t mir_count_;
-  MIR* mirs_;
-  std::vector<SSARepresentation> ssa_reps_;
-  std::unique_ptr<ScopedArenaAllocator> allocator_;
-  std::unique_ptr<GlobalValueNumbering> gvn_;
-  std::vector<uint16_t> value_names_;
-  ArenaBitVector* live_in_v_;
-};
-
-constexpr uint16_t GlobalValueNumberingTest::kNoValue;
-
-class GlobalValueNumberingTestDiamond : public GlobalValueNumberingTest {
- public:
-  GlobalValueNumberingTestDiamond();
-
- private:
-  static const BBDef kDiamondBbs[];
-};
-
-const GlobalValueNumberingTest::BBDef GlobalValueNumberingTestDiamond::kDiamondBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // Block #3, top of the diamond.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #4, left side.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #5, right side.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),  // Block #6, bottom.
-};
-
-GlobalValueNumberingTestDiamond::GlobalValueNumberingTestDiamond()
-    : GlobalValueNumberingTest() {
-  PrepareBasicBlocks(kDiamondBbs);
-}
-
-class GlobalValueNumberingTestLoop : public GlobalValueNumberingTest {
- public:
-  GlobalValueNumberingTestLoop();
-
- private:
-  static const BBDef kLoopBbs[];
-};
-
-const GlobalValueNumberingTest::BBDef GlobalValueNumberingTestLoop::kLoopBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)),  // "taken" loops to self.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-};
-
-GlobalValueNumberingTestLoop::GlobalValueNumberingTestLoop()
-    : GlobalValueNumberingTest() {
-  PrepareBasicBlocks(kLoopBbs);
-}
-
-class GlobalValueNumberingTestCatch : public GlobalValueNumberingTest {
- public:
-  GlobalValueNumberingTestCatch();
-
- private:
-  static const BBDef kCatchBbs[];
-};
-
-const GlobalValueNumberingTest::BBDef GlobalValueNumberingTestCatch::kCatchBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),     // The top.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // The throwing insn.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Catch handler.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),  // The merged block.
-};
-
-GlobalValueNumberingTestCatch::GlobalValueNumberingTestCatch()
-    : GlobalValueNumberingTest() {
-  PrepareBasicBlocks(kCatchBbs);
-  // Mark catch handler.
-  BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(5u);
-  catch_handler->catch_entry = true;
-  // Add successor block info to the check block.
-  BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
-  check_bb->successor_block_list_type = kCatch;
-  SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
-      (cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessors));
-  successor_block_info->block = catch_handler->id;
-  check_bb->successor_blocks.push_back(successor_block_info);
-}
-
-class GlobalValueNumberingTestTwoConsecutiveLoops : public GlobalValueNumberingTest {
- public:
-  GlobalValueNumberingTestTwoConsecutiveLoops();
-
- private:
-  static const BBDef kTwoConsecutiveLoopsBbs[];
-};
-
-const GlobalValueNumberingTest::BBDef
-GlobalValueNumberingTestTwoConsecutiveLoops::kTwoConsecutiveLoopsBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(9)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 6), DEF_PRED2(3, 5)),  // "taken" skips over the loop.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(4)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(4)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(8, 9), DEF_PRED2(6, 8)),  // "taken" skips over the loop.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(7)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(7)),
-};
-
-GlobalValueNumberingTestTwoConsecutiveLoops::GlobalValueNumberingTestTwoConsecutiveLoops()
-    : GlobalValueNumberingTest() {
-  PrepareBasicBlocks(kTwoConsecutiveLoopsBbs);
-}
-
-class GlobalValueNumberingTestTwoNestedLoops : public GlobalValueNumberingTest {
- public:
-  GlobalValueNumberingTestTwoNestedLoops();
-
- private:
-  static const BBDef kTwoNestedLoopsBbs[];
-};
-
-const GlobalValueNumberingTest::BBDef
-GlobalValueNumberingTestTwoNestedLoops::kTwoNestedLoopsBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(8)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 8), DEF_PRED2(3, 7)),  // "taken" skips over the loop.
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 7), DEF_PRED2(4, 6)),  // "taken" skips over the loop.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(5)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(5)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-};
-
-GlobalValueNumberingTestTwoNestedLoops::GlobalValueNumberingTestTwoNestedLoops()
-    : GlobalValueNumberingTest() {
-  PrepareBasicBlocks(kTwoNestedLoopsBbs);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, NonAliasingIFields) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessShort },
-      { 5u, 1u, 5u, false, kDexMemAccessChar },
-      { 6u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-      { 7u, 1u, 7u, false, kDexMemAccessWord },
-      { 8u, 0u, 0u, false, kDexMemAccessWord },    // Unresolved.
-      { 9u, 1u, 9u, false, kDexMemAccessWord },
-      { 10u, 1u, 10u, false, kDexMemAccessWord },
-      { 11u, 1u, 11u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 100u),
-      DEF_IGET(3, Instruction::IGET, 1u, 100u, 0u),
-      DEF_IGET(6, Instruction::IGET, 2u, 100u, 0u),   // Same as at the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 200u),
-      DEF_IGET(4, Instruction::IGET, 4u, 200u, 1u),
-      DEF_IGET(6, Instruction::IGET, 5u, 200u, 1u),   // Same as at the left side.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 300u),
-      DEF_IGET(3, Instruction::IGET, 7u, 300u, 2u),
-      DEF_CONST(5, Instruction::CONST, 8u, 1000),
-      DEF_IPUT(5, Instruction::IPUT, 8u, 300u, 2u),
-      DEF_IGET(6, Instruction::IGET, 10u, 300u, 2u),  // Differs from the top and the CONST.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 400u),
-      DEF_IGET(3, Instruction::IGET, 12u, 400u, 3u),
-      DEF_CONST(3, Instruction::CONST, 13u, 2000),
-      DEF_IPUT(4, Instruction::IPUT, 13u, 400u, 3u),
-      DEF_IPUT(5, Instruction::IPUT, 13u, 400u, 3u),
-      DEF_IGET(6, Instruction::IGET, 16u, 400u, 3u),  // Differs from the top, equals the CONST.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 500u),
-      DEF_IGET(3, Instruction::IGET_SHORT, 18u, 500u, 4u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 19u, 500u, 5u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 20u, 500u, 6u),  // Clobbers field #4, not #5.
-      DEF_IGET(6, Instruction::IGET_SHORT, 21u, 500u, 4u),  // Differs from the top.
-      DEF_IGET(6, Instruction::IGET_CHAR, 22u, 500u, 5u),   // Same as the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 600u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 601u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 602u),
-      DEF_IGET(3, Instruction::IGET, 26u, 600u, 7u),
-      DEF_IGET(3, Instruction::IGET, 27u, 601u, 7u),
-      DEF_IPUT(4, Instruction::IPUT, 28u, 602u, 8u),  // Doesn't clobber field #7 for other refs.
-      DEF_IGET(6, Instruction::IGET, 29u, 600u, 7u),  // Same as the top.
-      DEF_IGET(6, Instruction::IGET, 30u, 601u, 7u),  // Same as the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 700u),
-      DEF_CONST(4, Instruction::CONST, 32u, 3000),
-      DEF_IPUT(4, Instruction::IPUT, 32u, 700u, 9u),
-      DEF_IPUT(4, Instruction::IPUT, 32u, 700u, 10u),
-      DEF_CONST(5, Instruction::CONST, 35u, 3001),
-      DEF_IPUT(5, Instruction::IPUT, 35u, 700u, 9u),
-      DEF_IPUT(5, Instruction::IPUT, 35u, 700u, 10u),
-      DEF_IGET(6, Instruction::IGET, 38u, 700u, 9u),
-      DEF_IGET(6, Instruction::IGET, 39u, 700u, 10u),  // Same value as read from field #9.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 800u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 801u),
-      DEF_CONST(4, Instruction::CONST, 42u, 3000),
-      DEF_IPUT(4, Instruction::IPUT, 42u, 800u, 11u),
-      DEF_IPUT(4, Instruction::IPUT, 42u, 801u, 11u),
-      DEF_CONST(5, Instruction::CONST, 45u, 3001),
-      DEF_IPUT(5, Instruction::IPUT, 45u, 800u, 11u),
-      DEF_IPUT(5, Instruction::IPUT, 45u, 801u, 11u),
-      DEF_IGET(6, Instruction::IGET, 48u, 800u, 11u),
-      DEF_IGET(6, Instruction::IGET, 49u, 801u, 11u),  // Same value as read from ref 46u.
-
-      // Invoke doesn't interfere with non-aliasing refs. There's one test above where a reference
-      // escapes in the left BB (we let a reference escape if we use it to store to an unresolved
-      // field) and the INVOKE in the right BB shouldn't interfere with that either.
-      DEF_INVOKE1(5, Instruction::INVOKE_STATIC, 48u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[1], value_names_[2]);
-
-  EXPECT_EQ(value_names_[4], value_names_[5]);
-
-  EXPECT_NE(value_names_[7], value_names_[10]);
-  EXPECT_NE(value_names_[8], value_names_[10]);
-
-  EXPECT_NE(value_names_[12], value_names_[16]);
-  EXPECT_EQ(value_names_[13], value_names_[16]);
-
-  EXPECT_NE(value_names_[18], value_names_[21]);
-  EXPECT_EQ(value_names_[19], value_names_[22]);
-
-  EXPECT_EQ(value_names_[26], value_names_[29]);
-  EXPECT_EQ(value_names_[27], value_names_[30]);
-
-  EXPECT_EQ(value_names_[38], value_names_[39]);
-
-  EXPECT_EQ(value_names_[48], value_names_[49]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, AliasingIFieldsSingleObject) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessShort },
-      { 5u, 1u, 5u, false, kDexMemAccessChar },
-      { 6u, 0u, 0u, false, kDexMemAccessShort },  // Unresolved.
-      { 7u, 1u, 7u, false, kDexMemAccessWord },
-      { 8u, 1u, 8u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_IGET(3, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IGET(6, Instruction::IGET, 1u, 100u, 0u),   // Same as at the top.
-
-      DEF_IGET(4, Instruction::IGET, 2u, 100u, 1u),
-      DEF_IGET(6, Instruction::IGET, 3u, 100u, 1u),   // Same as at the left side.
-
-      DEF_IGET(3, Instruction::IGET, 4u, 100u, 2u),
-      DEF_CONST(5, Instruction::CONST, 5u, 1000),
-      DEF_IPUT(5, Instruction::IPUT, 5u, 100u, 2u),
-      DEF_IGET(6, Instruction::IGET, 7u, 100u, 2u),   // Differs from the top and the CONST.
-
-      DEF_IGET(3, Instruction::IGET, 8u, 100u, 3u),
-      DEF_CONST(3, Instruction::CONST, 9u, 2000),
-      DEF_IPUT(4, Instruction::IPUT, 9u, 100u, 3u),
-      DEF_IPUT(5, Instruction::IPUT, 9u, 100u, 3u),
-      DEF_IGET(6, Instruction::IGET, 12u, 100u, 3u),  // Differs from the top, equals the CONST.
-
-      DEF_IGET(3, Instruction::IGET_SHORT, 13u, 100u, 4u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 14u, 100u, 5u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 15u, 100u, 6u),  // Clobbers field #4, not #5.
-      DEF_IGET(6, Instruction::IGET_SHORT, 16u, 100u, 4u),  // Differs from the top.
-      DEF_IGET(6, Instruction::IGET_CHAR, 17u, 100u, 5u),   // Same as the top.
-
-      DEF_CONST(4, Instruction::CONST, 18u, 3000),
-      DEF_IPUT(4, Instruction::IPUT, 18u, 100u, 7u),
-      DEF_IPUT(4, Instruction::IPUT, 18u, 100u, 8u),
-      DEF_CONST(5, Instruction::CONST, 21u, 3001),
-      DEF_IPUT(5, Instruction::IPUT, 21u, 100u, 7u),
-      DEF_IPUT(5, Instruction::IPUT, 21u, 100u, 8u),
-      DEF_IGET(6, Instruction::IGET, 24u, 100u, 7u),
-      DEF_IGET(6, Instruction::IGET, 25u, 100u, 8u),  // Same value as read from field #7.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  EXPECT_EQ(value_names_[2], value_names_[3]);
-
-  EXPECT_NE(value_names_[4], value_names_[7]);
-  EXPECT_NE(value_names_[5], value_names_[7]);
-
-  EXPECT_NE(value_names_[8], value_names_[12]);
-  EXPECT_EQ(value_names_[9], value_names_[12]);
-
-  EXPECT_NE(value_names_[13], value_names_[16]);
-  EXPECT_EQ(value_names_[14], value_names_[17]);
-
-  EXPECT_EQ(value_names_[24], value_names_[25]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, AliasingIFieldsTwoObjects) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessShort },
-      { 5u, 1u, 5u, false, kDexMemAccessChar },
-      { 6u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-      { 7u, 1u, 7u, false, kDexMemAccessWord },
-      { 8u, 1u, 8u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_IGET(3, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IPUT(4, Instruction::IPUT, 1u, 101u, 0u),   // May alias with the IGET at the top.
-      DEF_IGET(6, Instruction::IGET, 2u, 100u, 0u),   // Differs from the top.
-
-      DEF_IGET(3, Instruction::IGET, 3u, 100u, 1u),
-      DEF_IPUT(5, Instruction::IPUT, 3u, 101u, 1u),   // If aliasing, stores the same value.
-      DEF_IGET(6, Instruction::IGET, 5u, 100u, 1u),   // Same as the top.
-
-      DEF_IGET(3, Instruction::IGET, 6u, 100u, 2u),
-      DEF_CONST(5, Instruction::CONST, 7u, 1000),
-      DEF_IPUT(5, Instruction::IPUT, 7u, 101u, 2u),
-      DEF_IGET(6, Instruction::IGET, 9u, 100u, 2u),   // Differs from the top and the CONST.
-
-      DEF_IGET(3, Instruction::IGET, 10u, 100u, 3u),
-      DEF_CONST(3, Instruction::CONST, 11u, 2000),
-      DEF_IPUT(4, Instruction::IPUT, 11u, 101u, 3u),
-      DEF_IPUT(5, Instruction::IPUT, 11u, 101u, 3u),
-      DEF_IGET(6, Instruction::IGET, 14u, 100u, 3u),  // Differs from the top and the CONST.
-
-      DEF_IGET(3, Instruction::IGET_SHORT, 15u, 100u, 4u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 16u, 100u, 5u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 17u, 101u, 6u),  // Clobbers field #4, not #5.
-      DEF_IGET(6, Instruction::IGET_SHORT, 18u, 100u, 4u),  // Differs from the top.
-      DEF_IGET(6, Instruction::IGET_CHAR, 19u, 100u, 5u),   // Same as the top.
-
-      DEF_CONST(4, Instruction::CONST, 20u, 3000),
-      DEF_IPUT(4, Instruction::IPUT, 20u, 100u, 7u),
-      DEF_IPUT(4, Instruction::IPUT, 20u, 101u, 8u),
-      DEF_CONST(5, Instruction::CONST, 23u, 3001),
-      DEF_IPUT(5, Instruction::IPUT, 23u, 100u, 7u),
-      DEF_IPUT(5, Instruction::IPUT, 23u, 101u, 8u),
-      DEF_IGET(6, Instruction::IGET, 26u, 100u, 7u),
-      DEF_IGET(6, Instruction::IGET, 27u, 101u, 8u),  // Same value as read from field #7.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[2]);
-
-  EXPECT_EQ(value_names_[3], value_names_[5]);
-
-  EXPECT_NE(value_names_[6], value_names_[9]);
-  EXPECT_NE(value_names_[7], value_names_[9]);
-
-  EXPECT_NE(value_names_[10], value_names_[14]);
-  EXPECT_NE(value_names_[10], value_names_[14]);
-
-  EXPECT_NE(value_names_[15], value_names_[18]);
-  EXPECT_EQ(value_names_[16], value_names_[19]);
-
-  EXPECT_EQ(value_names_[26], value_names_[27]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, SFields) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessShort },
-      { 5u, 1u, 5u, false, kDexMemAccessChar },
-      { 6u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-      { 7u, 1u, 7u, false, kDexMemAccessWord },
-      { 8u, 1u, 8u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_SGET(3, Instruction::SGET, 0u, 0u),
-      DEF_SGET(6, Instruction::SGET, 1u, 0u),         // Same as at the top.
-
-      DEF_SGET(4, Instruction::SGET, 2u, 1u),
-      DEF_SGET(6, Instruction::SGET, 3u, 1u),         // Same as at the left side.
-
-      DEF_SGET(3, Instruction::SGET, 4u, 2u),
-      DEF_CONST(5, Instruction::CONST, 5u, 100),
-      DEF_SPUT(5, Instruction::SPUT, 5u, 2u),
-      DEF_SGET(6, Instruction::SGET, 7u, 2u),         // Differs from the top and the CONST.
-
-      DEF_SGET(3, Instruction::SGET, 8u, 3u),
-      DEF_CONST(3, Instruction::CONST, 9u, 200),
-      DEF_SPUT(4, Instruction::SPUT, 9u, 3u),
-      DEF_SPUT(5, Instruction::SPUT, 9u, 3u),
-      DEF_SGET(6, Instruction::SGET, 12u, 3u),        // Differs from the top, equals the CONST.
-
-      DEF_SGET(3, Instruction::SGET_SHORT, 13u, 4u),
-      DEF_SGET(3, Instruction::SGET_CHAR, 14u, 5u),
-      DEF_SPUT(4, Instruction::SPUT_SHORT, 15u, 6u),  // Clobbers field #4, not #5.
-      DEF_SGET(6, Instruction::SGET_SHORT, 16u, 4u),  // Differs from the top.
-      DEF_SGET(6, Instruction::SGET_CHAR, 17u, 5u),   // Same as the top.
-
-      DEF_CONST(4, Instruction::CONST, 18u, 300),
-      DEF_SPUT(4, Instruction::SPUT, 18u, 7u),
-      DEF_SPUT(4, Instruction::SPUT, 18u, 8u),
-      DEF_CONST(5, Instruction::CONST, 21u, 301),
-      DEF_SPUT(5, Instruction::SPUT, 21u, 7u),
-      DEF_SPUT(5, Instruction::SPUT, 21u, 8u),
-      DEF_SGET(6, Instruction::SGET, 24u, 7u),
-      DEF_SGET(6, Instruction::SGET, 25u, 8u),        // Same value as read from field #7.
-  };
-
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  EXPECT_EQ(value_names_[2], value_names_[3]);
-
-  EXPECT_NE(value_names_[4], value_names_[7]);
-  EXPECT_NE(value_names_[5], value_names_[7]);
-
-  EXPECT_NE(value_names_[8], value_names_[12]);
-  EXPECT_EQ(value_names_[9], value_names_[12]);
-
-  EXPECT_NE(value_names_[13], value_names_[16]);
-  EXPECT_EQ(value_names_[14], value_names_[17]);
-
-  EXPECT_EQ(value_names_[24], value_names_[25]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, NonAliasingArrays) {
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 100u),
-      DEF_AGET(3, Instruction::AGET, 1u, 100u, 101u),
-      DEF_AGET(6, Instruction::AGET, 2u, 100u, 101u),   // Same as at the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 200u),
-      DEF_IGET(4, Instruction::AGET, 4u, 200u, 201u),
-      DEF_IGET(6, Instruction::AGET, 5u, 200u, 201u),   // Same as at the left side.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 300u),
-      DEF_AGET(3, Instruction::AGET, 7u, 300u, 301u),
-      DEF_CONST(5, Instruction::CONST, 8u, 1000),
-      DEF_APUT(5, Instruction::APUT, 8u, 300u, 301u),
-      DEF_AGET(6, Instruction::AGET, 10u, 300u, 301u),  // Differs from the top and the CONST.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 400u),
-      DEF_AGET(3, Instruction::AGET, 12u, 400u, 401u),
-      DEF_CONST(3, Instruction::CONST, 13u, 2000),
-      DEF_APUT(4, Instruction::APUT, 13u, 400u, 401u),
-      DEF_APUT(5, Instruction::APUT, 13u, 400u, 401u),
-      DEF_AGET(6, Instruction::AGET, 16u, 400u, 401u),  // Differs from the top, equals the CONST.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 500u),
-      DEF_AGET(3, Instruction::AGET, 18u, 500u, 501u),
-      DEF_APUT(4, Instruction::APUT, 19u, 500u, 502u),  // Clobbers value at index 501u.
-      DEF_AGET(6, Instruction::AGET, 20u, 500u, 501u),  // Differs from the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 600u),
-      DEF_CONST(4, Instruction::CONST, 22u, 3000),
-      DEF_APUT(4, Instruction::APUT, 22u, 600u, 601u),
-      DEF_APUT(4, Instruction::APUT, 22u, 600u, 602u),
-      DEF_CONST(5, Instruction::CONST, 25u, 3001),
-      DEF_APUT(5, Instruction::APUT, 25u, 600u, 601u),
-      DEF_APUT(5, Instruction::APUT, 25u, 600u, 602u),
-      DEF_AGET(6, Instruction::AGET, 28u, 600u, 601u),
-      DEF_AGET(6, Instruction::AGET, 29u, 600u, 602u),  // Same value as read from index 601u.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 700u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 701u),
-      DEF_AGET(3, Instruction::AGET, 32u, 700u, 702u),
-      DEF_APUT(4, Instruction::APUT, 33u, 701u, 702u),  // Doesn't interfere with unrelated array.
-      DEF_AGET(6, Instruction::AGET, 34u, 700u, 702u),  // Same value as at the top.
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[1], value_names_[2]);
-
-  EXPECT_EQ(value_names_[4], value_names_[5]);
-
-  EXPECT_NE(value_names_[7], value_names_[10]);
-  EXPECT_NE(value_names_[8], value_names_[10]);
-
-  EXPECT_NE(value_names_[12], value_names_[16]);
-  EXPECT_EQ(value_names_[13], value_names_[16]);
-
-  EXPECT_NE(value_names_[18], value_names_[20]);
-
-  EXPECT_NE(value_names_[28], value_names_[22]);
-  EXPECT_NE(value_names_[28], value_names_[25]);
-  EXPECT_EQ(value_names_[28], value_names_[29]);
-
-  EXPECT_EQ(value_names_[32], value_names_[34]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, AliasingArrays) {
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      // NOTE: We're also testing that these tests really do not interfere with each other.
-
-      DEF_AGET(3, Instruction::AGET_BOOLEAN, 0u, 100u, 101u),
-      DEF_AGET(6, Instruction::AGET_BOOLEAN, 1u, 100u, 101u),  // Same as at the top.
-
-      DEF_IGET(4, Instruction::AGET_OBJECT, 2u, 200u, 201u),
-      DEF_IGET(6, Instruction::AGET_OBJECT, 3u, 200u, 201u),  // Same as at the left side.
-
-      DEF_AGET(3, Instruction::AGET_WIDE, 4u, 300u, 301u),
-      DEF_CONST(5, Instruction::CONST_WIDE, 6u, 1000),
-      DEF_APUT(5, Instruction::APUT_WIDE, 6u, 300u, 301u),
-      DEF_AGET(6, Instruction::AGET_WIDE, 8u, 300u, 301u),  // Differs from the top and the CONST.
-
-      DEF_AGET(3, Instruction::AGET_SHORT, 10u, 400u, 401u),
-      DEF_CONST(3, Instruction::CONST, 11u, 2000),
-      DEF_APUT(4, Instruction::APUT_SHORT, 11u, 400u, 401u),
-      DEF_APUT(5, Instruction::APUT_SHORT, 11u, 400u, 401u),
-      DEF_AGET(6, Instruction::AGET_SHORT, 12u, 400u, 401u),  // Differs from the top, == CONST.
-
-      DEF_AGET(3, Instruction::AGET_CHAR, 13u, 500u, 501u),
-      DEF_APUT(4, Instruction::APUT_CHAR, 14u, 500u, 502u),  // Clobbers value at index 501u.
-      DEF_AGET(6, Instruction::AGET_CHAR, 15u, 500u, 501u),  // Differs from the top.
-
-      DEF_AGET(3, Instruction::AGET_BYTE, 16u, 600u, 602u),
-      DEF_APUT(4, Instruction::APUT_BYTE, 17u, 601u, 602u),  // Clobbers values in array 600u.
-      DEF_AGET(6, Instruction::AGET_BYTE, 18u, 600u, 602u),  // Differs from the top.
-
-      DEF_CONST(4, Instruction::CONST, 19u, 3000),
-      DEF_APUT(4, Instruction::APUT, 19u, 700u, 701u),
-      DEF_APUT(4, Instruction::APUT, 19u, 700u, 702u),
-      DEF_CONST(5, Instruction::CONST, 22u, 3001),
-      DEF_APUT(5, Instruction::APUT, 22u, 700u, 701u),
-      DEF_APUT(5, Instruction::APUT, 22u, 700u, 702u),
-      DEF_AGET(6, Instruction::AGET, 25u, 700u, 701u),
-      DEF_AGET(6, Instruction::AGET, 26u, 700u, 702u),  // Same value as read from index 601u.
-  };
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 4, 6, 8 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  EXPECT_EQ(value_names_[2], value_names_[3]);
-
-  EXPECT_NE(value_names_[4], value_names_[7]);
-  EXPECT_NE(value_names_[5], value_names_[7]);
-
-  EXPECT_NE(value_names_[8], value_names_[12]);
-  EXPECT_EQ(value_names_[9], value_names_[12]);
-
-  EXPECT_NE(value_names_[13], value_names_[15]);
-
-  EXPECT_NE(value_names_[16], value_names_[18]);
-
-  EXPECT_NE(value_names_[25], value_names_[19]);
-  EXPECT_NE(value_names_[25], value_names_[22]);
-  EXPECT_EQ(value_names_[25], value_names_[26]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, Phi) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000),
-      DEF_CONST(4, Instruction::CONST, 1u, 2000),
-      DEF_CONST(5, Instruction::CONST, 2u, 3000),
-      DEF_MOVE(4, Instruction::MOVE, 3u, 0u),
-      DEF_MOVE(4, Instruction::MOVE, 4u, 1u),
-      DEF_MOVE(5, Instruction::MOVE, 5u, 0u),
-      DEF_MOVE(5, Instruction::MOVE, 6u, 2u),
-      DEF_PHI2(6, 7u, 3u, 5u),    // Same as CONST 0u (1000).
-      DEF_PHI2(6, 8u, 3u, 0u),    // Same as CONST 0u (1000).
-      DEF_PHI2(6, 9u, 0u, 5u),    // Same as CONST 0u (1000).
-      DEF_PHI2(6, 10u, 4u, 5u),   // Merge 1u (2000) and 0u (1000).
-      DEF_PHI2(6, 11u, 1u, 5u),   // Merge 1u (2000) and 0u (1000).
-      DEF_PHI2(6, 12u, 4u, 0u),   // Merge 1u (2000) and 0u (1000).
-      DEF_PHI2(6, 13u, 1u, 0u),   // Merge 1u (2000) and 0u (1000).
-      DEF_PHI2(6, 14u, 3u, 6u),   // Merge 0u (1000) and 2u (3000).
-      DEF_PHI2(6, 15u, 0u, 6u),   // Merge 0u (1000) and 2u (3000).
-      DEF_PHI2(6, 16u, 3u, 2u),   // Merge 0u (1000) and 2u (3000).
-      DEF_PHI2(6, 17u, 0u, 2u),   // Merge 0u (1000) and 2u (3000).
-      DEF_PHI2(6, 18u, 4u, 6u),   // Merge 1u (2000) and 2u (3000).
-      DEF_PHI2(6, 19u, 1u, 6u),   // Merge 1u (2000) and 2u (3000).
-      DEF_PHI2(6, 20u, 4u, 2u),   // Merge 1u (2000) and 2u (3000).
-      DEF_PHI2(6, 21u, 1u, 2u),   // Merge 1u (2000) and 2u (3000).
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[7]);
-  EXPECT_EQ(value_names_[0], value_names_[8]);
-  EXPECT_EQ(value_names_[0], value_names_[9]);
-  EXPECT_NE(value_names_[10], value_names_[0]);
-  EXPECT_NE(value_names_[10], value_names_[1]);
-  EXPECT_NE(value_names_[10], value_names_[2]);
-  EXPECT_EQ(value_names_[10], value_names_[11]);
-  EXPECT_EQ(value_names_[10], value_names_[12]);
-  EXPECT_EQ(value_names_[10], value_names_[13]);
-  EXPECT_NE(value_names_[14], value_names_[0]);
-  EXPECT_NE(value_names_[14], value_names_[1]);
-  EXPECT_NE(value_names_[14], value_names_[2]);
-  EXPECT_NE(value_names_[14], value_names_[10]);
-  EXPECT_EQ(value_names_[14], value_names_[15]);
-  EXPECT_EQ(value_names_[14], value_names_[16]);
-  EXPECT_EQ(value_names_[14], value_names_[17]);
-  EXPECT_NE(value_names_[18], value_names_[0]);
-  EXPECT_NE(value_names_[18], value_names_[1]);
-  EXPECT_NE(value_names_[18], value_names_[2]);
-  EXPECT_NE(value_names_[18], value_names_[10]);
-  EXPECT_NE(value_names_[18], value_names_[14]);
-  EXPECT_EQ(value_names_[18], value_names_[19]);
-  EXPECT_EQ(value_names_[18], value_names_[20]);
-  EXPECT_EQ(value_names_[18], value_names_[21]);
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, PhiWide) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000),
-      DEF_CONST_WIDE(4, Instruction::CONST_WIDE, 2u, 2000),
-      DEF_CONST_WIDE(5, Instruction::CONST_WIDE, 4u, 3000),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 6u, 0u),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 8u, 2u),
-      DEF_MOVE_WIDE(5, Instruction::MOVE_WIDE, 10u, 0u),
-      DEF_MOVE_WIDE(5, Instruction::MOVE_WIDE, 12u, 4u),
-      DEF_PHI2(6, 14u, 6u, 10u),    // Same as CONST_WIDE 0u (1000).
-      DEF_PHI2(6, 15u, 7u, 11u),    // Same as CONST_WIDE 0u (1000), high word.
-      DEF_PHI2(6, 16u, 6u,  0u),    // Same as CONST_WIDE 0u (1000).
-      DEF_PHI2(6, 17u, 7u,  1u),    // Same as CONST_WIDE 0u (1000), high word.
-      DEF_PHI2(6, 18u, 0u, 10u),    // Same as CONST_WIDE 0u (1000).
-      DEF_PHI2(6, 19u, 1u, 11u),    // Same as CONST_WIDE 0u (1000), high word.
-      DEF_PHI2(6, 20u, 8u, 10u),    // Merge 2u (2000) and 0u (1000).
-      DEF_PHI2(6, 21u, 9u, 11u),    // Merge 2u (2000) and 0u (1000), high word.
-      DEF_PHI2(6, 22u, 2u, 10u),    // Merge 2u (2000) and 0u (1000).
-      DEF_PHI2(6, 23u, 3u, 11u),    // Merge 2u (2000) and 0u (1000), high word.
-      DEF_PHI2(6, 24u, 8u,  0u),    // Merge 2u (2000) and 0u (1000).
-      DEF_PHI2(6, 25u, 9u,  1u),    // Merge 2u (2000) and 0u (1000), high word.
-      DEF_PHI2(6, 26u, 2u,  0u),    // Merge 2u (2000) and 0u (1000).
-      DEF_PHI2(6, 27u, 5u,  1u),    // Merge 2u (2000) and 0u (1000), high word.
-      DEF_PHI2(6, 28u, 6u, 12u),    // Merge 0u (1000) and 4u (3000).
-      DEF_PHI2(6, 29u, 7u, 13u),    // Merge 0u (1000) and 4u (3000), high word.
-      DEF_PHI2(6, 30u, 0u, 12u),    // Merge 0u (1000) and 4u (3000).
-      DEF_PHI2(6, 31u, 1u, 13u),    // Merge 0u (1000) and 4u (3000), high word.
-      DEF_PHI2(6, 32u, 6u,  4u),    // Merge 0u (1000) and 4u (3000).
-      DEF_PHI2(6, 33u, 7u,  5u),    // Merge 0u (1000) and 4u (3000), high word.
-      DEF_PHI2(6, 34u, 0u,  4u),    // Merge 0u (1000) and 4u (3000).
-      DEF_PHI2(6, 35u, 1u,  5u),    // Merge 0u (1000) and 4u (3000), high word.
-      DEF_PHI2(6, 36u, 8u, 12u),    // Merge 2u (2000) and 4u (3000).
-      DEF_PHI2(6, 37u, 9u, 13u),    // Merge 2u (2000) and 4u (3000), high word.
-      DEF_PHI2(6, 38u, 2u, 12u),    // Merge 2u (2000) and 4u (3000).
-      DEF_PHI2(6, 39u, 3u, 13u),    // Merge 2u (2000) and 4u (3000), high word.
-      DEF_PHI2(6, 40u, 8u,  4u),    // Merge 2u (2000) and 4u (3000).
-      DEF_PHI2(6, 41u, 9u,  5u),    // Merge 2u (2000) and 4u (3000), high word.
-      DEF_PHI2(6, 42u, 2u,  4u),    // Merge 2u (2000) and 4u (3000).
-      DEF_PHI2(6, 43u, 3u,  5u),    // Merge 2u (2000) and 4u (3000), high word.
-  };
-
-  PrepareMIRs(mirs);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    if ((mirs_[i].ssa_rep->defs[0] % 2) == 0) {
-      const int32_t wide_sregs[] = { mirs_[i].ssa_rep->defs[0] };
-      MarkAsWideSRegs(wide_sregs);
-    }
-  }
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[7]);
-  EXPECT_EQ(value_names_[0], value_names_[9]);
-  EXPECT_EQ(value_names_[0], value_names_[11]);
-  EXPECT_NE(value_names_[13], value_names_[0]);
-  EXPECT_NE(value_names_[13], value_names_[1]);
-  EXPECT_NE(value_names_[13], value_names_[2]);
-  EXPECT_EQ(value_names_[13], value_names_[15]);
-  EXPECT_EQ(value_names_[13], value_names_[17]);
-  EXPECT_EQ(value_names_[13], value_names_[19]);
-  EXPECT_NE(value_names_[21], value_names_[0]);
-  EXPECT_NE(value_names_[21], value_names_[1]);
-  EXPECT_NE(value_names_[21], value_names_[2]);
-  EXPECT_NE(value_names_[21], value_names_[13]);
-  EXPECT_EQ(value_names_[21], value_names_[23]);
-  EXPECT_EQ(value_names_[21], value_names_[25]);
-  EXPECT_EQ(value_names_[21], value_names_[27]);
-  EXPECT_NE(value_names_[29], value_names_[0]);
-  EXPECT_NE(value_names_[29], value_names_[1]);
-  EXPECT_NE(value_names_[29], value_names_[2]);
-  EXPECT_NE(value_names_[29], value_names_[13]);
-  EXPECT_NE(value_names_[29], value_names_[21]);
-  EXPECT_EQ(value_names_[29], value_names_[31]);
-  EXPECT_EQ(value_names_[29], value_names_[33]);
-  EXPECT_EQ(value_names_[29], value_names_[35]);
-  // High words should get kNoValue.
-  EXPECT_EQ(value_names_[8], kNoValue);
-  EXPECT_EQ(value_names_[10], kNoValue);
-  EXPECT_EQ(value_names_[12], kNoValue);
-  EXPECT_EQ(value_names_[14], kNoValue);
-  EXPECT_EQ(value_names_[16], kNoValue);
-  EXPECT_EQ(value_names_[18], kNoValue);
-  EXPECT_EQ(value_names_[20], kNoValue);
-  EXPECT_EQ(value_names_[22], kNoValue);
-  EXPECT_EQ(value_names_[24], kNoValue);
-  EXPECT_EQ(value_names_[26], kNoValue);
-  EXPECT_EQ(value_names_[28], kNoValue);
-  EXPECT_EQ(value_names_[30], kNoValue);
-  EXPECT_EQ(value_names_[32], kNoValue);
-  EXPECT_EQ(value_names_[34], kNoValue);
-  EXPECT_EQ(value_names_[36], kNoValue);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, NonAliasingIFields) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessWord },
-      { 5u, 1u, 5u, false, kDexMemAccessShort },
-      { 6u, 1u, 6u, false, kDexMemAccessChar },
-      { 7u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-      { 8u, 1u, 8u, false, kDexMemAccessWord },
-      { 9u, 0u, 0u, false, kDexMemAccessWord },    // Unresolved.
-      { 10u, 1u, 10u, false, kDexMemAccessWord },
-      { 11u, 1u, 11u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 100u),
-      DEF_IGET(3, Instruction::IGET, 1u, 100u, 0u),
-      DEF_IGET(4, Instruction::IGET, 2u, 100u, 0u),   // Same as at the top.
-      DEF_IGET(5, Instruction::IGET, 3u, 100u, 0u),   // Same as at the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 200u),
-      DEF_IGET(3, Instruction::IGET, 5u, 200u, 1u),
-      DEF_IGET(4, Instruction::IGET, 6u, 200u, 1u),   // Differs from top...
-      DEF_IPUT(4, Instruction::IPUT, 7u, 200u, 1u),   // Because of this IPUT.
-      DEF_IGET(5, Instruction::IGET, 8u, 200u, 1u),   // Differs from top and the loop IGET.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 300u),
-      DEF_IGET(3, Instruction::IGET, 10u, 300u, 2u),
-      DEF_IPUT(4, Instruction::IPUT, 11u, 300u, 2u),  // Because of this IPUT...
-      DEF_IGET(4, Instruction::IGET, 12u, 300u, 2u),  // Differs from top.
-      DEF_IGET(5, Instruction::IGET, 13u, 300u, 2u),  // Differs from top but same as the loop IGET.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 400u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 401u),
-      DEF_CONST(3, Instruction::CONST, 16u, 3000),
-      DEF_IPUT(3, Instruction::IPUT, 16u, 400u, 3u),
-      DEF_IPUT(3, Instruction::IPUT, 16u, 400u, 4u),
-      DEF_IPUT(3, Instruction::IPUT, 16u, 401u, 3u),
-      DEF_IGET(4, Instruction::IGET, 20u, 400u, 3u),  // Differs from 16u and 23u.
-      DEF_IGET(4, Instruction::IGET, 21u, 400u, 4u),  // Same as 20u.
-      DEF_IGET(4, Instruction::IGET, 22u, 401u, 3u),  // Same as 20u.
-      DEF_CONST(4, Instruction::CONST, 23u, 4000),
-      DEF_IPUT(4, Instruction::IPUT, 23u, 400u, 3u),
-      DEF_IPUT(4, Instruction::IPUT, 23u, 400u, 4u),
-      DEF_IPUT(4, Instruction::IPUT, 23u, 401u, 3u),
-      DEF_IGET(5, Instruction::IGET, 27u, 400u, 3u),  // Differs from 16u and 20u...
-      DEF_IGET(5, Instruction::IGET, 28u, 400u, 4u),  // and same as the CONST 23u
-      DEF_IGET(5, Instruction::IGET, 29u, 400u, 4u),  // and same as the CONST 23u.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 500u),
-      DEF_IGET(3, Instruction::IGET_SHORT, 31u, 500u, 5u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 32u, 500u, 6u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 33u, 500u, 7u),  // Clobbers field #5, not #6.
-      DEF_IGET(5, Instruction::IGET_SHORT, 34u, 500u, 5u),  // Differs from the top.
-      DEF_IGET(5, Instruction::IGET_CHAR, 35u, 500u, 6u),   // Same as the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 600u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 601u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 602u),
-      DEF_IGET(3, Instruction::IGET, 39u, 600u, 8u),
-      DEF_IGET(3, Instruction::IGET, 40u, 601u, 8u),
-      DEF_IPUT(4, Instruction::IPUT, 41u, 602u, 9u),  // Doesn't clobber field #8 for other refs.
-      DEF_IGET(5, Instruction::IGET, 42u, 600u, 8u),  // Same as the top.
-      DEF_IGET(5, Instruction::IGET, 43u, 601u, 8u),  // Same as the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 700u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 701u),
-      DEF_CONST(3, Instruction::CONST, 46u, 3000),
-      DEF_IPUT(3, Instruction::IPUT, 46u, 700u, 10u),
-      DEF_IPUT(3, Instruction::IPUT, 46u, 700u, 11u),
-      DEF_IPUT(3, Instruction::IPUT, 46u, 701u, 10u),
-      DEF_IGET(4, Instruction::IGET, 50u, 700u, 10u),  // Differs from the CONSTs 46u and 53u.
-      DEF_IGET(4, Instruction::IGET, 51u, 700u, 11u),  // Same as 50u.
-      DEF_IGET(4, Instruction::IGET, 52u, 701u, 10u),  // Same as 50u.
-      DEF_CONST(4, Instruction::CONST, 53u, 3001),
-      DEF_IPUT(4, Instruction::IPUT, 53u, 700u, 10u),
-      DEF_IPUT(4, Instruction::IPUT, 53u, 700u, 11u),
-      DEF_IPUT(4, Instruction::IPUT, 53u, 701u, 10u),
-      DEF_IGET(5, Instruction::IGET, 57u, 700u, 10u),  // Same as the CONST 53u.
-      DEF_IGET(5, Instruction::IGET, 58u, 700u, 11u),  // Same as the CONST 53u.
-      DEF_IGET(5, Instruction::IGET, 59u, 701u, 10u),  // Same as the CONST 53u.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[1], value_names_[2]);
-  EXPECT_EQ(value_names_[1], value_names_[3]);
-
-  EXPECT_NE(value_names_[5], value_names_[6]);
-  EXPECT_NE(value_names_[5], value_names_[7]);
-  EXPECT_NE(value_names_[6], value_names_[7]);
-
-  EXPECT_NE(value_names_[10], value_names_[12]);
-  EXPECT_EQ(value_names_[12], value_names_[13]);
-
-  EXPECT_NE(value_names_[20], value_names_[16]);
-  EXPECT_NE(value_names_[20], value_names_[23]);
-  EXPECT_EQ(value_names_[20], value_names_[21]);
-  EXPECT_EQ(value_names_[20], value_names_[22]);
-  EXPECT_NE(value_names_[27], value_names_[16]);
-  EXPECT_NE(value_names_[27], value_names_[20]);
-  EXPECT_EQ(value_names_[27], value_names_[28]);
-  EXPECT_EQ(value_names_[27], value_names_[29]);
-
-  EXPECT_NE(value_names_[31], value_names_[34]);
-  EXPECT_EQ(value_names_[32], value_names_[35]);
-
-  EXPECT_EQ(value_names_[39], value_names_[42]);
-  EXPECT_EQ(value_names_[40], value_names_[43]);
-
-  EXPECT_NE(value_names_[50], value_names_[46]);
-  EXPECT_NE(value_names_[50], value_names_[53]);
-  EXPECT_EQ(value_names_[50], value_names_[51]);
-  EXPECT_EQ(value_names_[50], value_names_[52]);
-  EXPECT_EQ(value_names_[57], value_names_[53]);
-  EXPECT_EQ(value_names_[58], value_names_[53]);
-  EXPECT_EQ(value_names_[59], value_names_[53]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, AliasingIFieldsSingleObject) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-      { 4u, 1u, 4u, false, kDexMemAccessWord },
-      { 5u, 1u, 5u, false, kDexMemAccessShort },
-      { 6u, 1u, 6u, false, kDexMemAccessChar },
-      { 7u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_IGET(3, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IGET(4, Instruction::IGET, 1u, 100u, 0u),   // Same as at the top.
-      DEF_IGET(5, Instruction::IGET, 2u, 100u, 0u),   // Same as at the top.
-
-      DEF_IGET(3, Instruction::IGET, 3u, 100u, 1u),
-      DEF_IGET(4, Instruction::IGET, 4u, 100u, 1u),   // Differs from top...
-      DEF_IPUT(4, Instruction::IPUT, 5u, 100u, 1u),   // Because of this IPUT.
-      DEF_IGET(5, Instruction::IGET, 6u, 100u, 1u),   // Differs from top and the loop IGET.
-
-      DEF_IGET(3, Instruction::IGET, 7u, 100u, 2u),
-      DEF_IPUT(4, Instruction::IPUT, 8u, 100u, 2u),   // Because of this IPUT...
-      DEF_IGET(4, Instruction::IGET, 9u, 100u, 2u),   // Differs from top.
-      DEF_IGET(5, Instruction::IGET, 10u, 100u, 2u),  // Differs from top but same as the loop IGET.
-
-      DEF_CONST(3, Instruction::CONST, 11u, 3000),
-      DEF_IPUT(3, Instruction::IPUT, 11u, 100u, 3u),
-      DEF_IPUT(3, Instruction::IPUT, 11u, 100u, 4u),
-      DEF_IGET(4, Instruction::IGET, 14u, 100u, 3u),  // Differs from 11u and 16u.
-      DEF_IGET(4, Instruction::IGET, 15u, 100u, 4u),  // Same as 14u.
-      DEF_CONST(4, Instruction::CONST, 16u, 4000),
-      DEF_IPUT(4, Instruction::IPUT, 16u, 100u, 3u),
-      DEF_IPUT(4, Instruction::IPUT, 16u, 100u, 4u),
-      DEF_IGET(5, Instruction::IGET, 19u, 100u, 3u),  // Differs from 11u and 14u...
-      DEF_IGET(5, Instruction::IGET, 20u, 100u, 4u),  // and same as the CONST 16u.
-
-      DEF_IGET(3, Instruction::IGET_SHORT, 21u, 100u, 5u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 22u, 100u, 6u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 23u, 100u, 7u),  // Clobbers field #5, not #6.
-      DEF_IGET(5, Instruction::IGET_SHORT, 24u, 100u, 5u),  // Differs from the top.
-      DEF_IGET(5, Instruction::IGET_CHAR, 25u, 100u, 6u),   // Same as the top.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  EXPECT_NE(value_names_[3], value_names_[4]);
-  EXPECT_NE(value_names_[3], value_names_[6]);
-  EXPECT_NE(value_names_[4], value_names_[6]);
-
-  EXPECT_NE(value_names_[7], value_names_[9]);
-  EXPECT_EQ(value_names_[9], value_names_[10]);
-
-  EXPECT_NE(value_names_[14], value_names_[11]);
-  EXPECT_NE(value_names_[14], value_names_[16]);
-  EXPECT_EQ(value_names_[14], value_names_[15]);
-  EXPECT_NE(value_names_[19], value_names_[11]);
-  EXPECT_NE(value_names_[19], value_names_[14]);
-  EXPECT_EQ(value_names_[19], value_names_[16]);
-  EXPECT_EQ(value_names_[19], value_names_[20]);
-
-  EXPECT_NE(value_names_[21], value_names_[24]);
-  EXPECT_EQ(value_names_[22], value_names_[25]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, AliasingIFieldsTwoObjects) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-      { 3u, 1u, 3u, false, kDexMemAccessShort },
-      { 4u, 1u, 4u, false, kDexMemAccessChar },
-      { 5u, 0u, 0u, false, kDexMemAccessShort },   // Unresolved.
-      { 6u, 1u, 6u, false, kDexMemAccessWord },
-      { 7u, 1u, 7u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_IGET(3, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IPUT(4, Instruction::IPUT, 1u, 101u, 0u),   // May alias with the IGET at the top.
-      DEF_IGET(5, Instruction::IGET, 2u, 100u, 0u),   // Differs from the top.
-
-      DEF_IGET(3, Instruction::IGET, 3u, 100u, 1u),
-      DEF_IPUT(4, Instruction::IPUT, 3u, 101u, 1u),   // If aliasing, stores the same value.
-      DEF_IGET(5, Instruction::IGET, 5u, 100u, 1u),   // Same as the top.
-
-      DEF_IGET(3, Instruction::IGET, 6u, 100u, 2u),
-      DEF_CONST(4, Instruction::CONST, 7u, 1000),
-      DEF_IPUT(4, Instruction::IPUT, 7u, 101u, 2u),
-      DEF_IGET(5, Instruction::IGET, 9u, 100u, 2u),   // Differs from the top and the CONST.
-
-      DEF_IGET(3, Instruction::IGET_SHORT, 10u, 100u, 3u),
-      DEF_IGET(3, Instruction::IGET_CHAR, 11u, 100u, 4u),
-      DEF_IPUT(4, Instruction::IPUT_SHORT, 12u, 101u, 5u),  // Clobbers field #3, not #4.
-      DEF_IGET(5, Instruction::IGET_SHORT, 13u, 100u, 3u),  // Differs from the top.
-      DEF_IGET(5, Instruction::IGET_CHAR, 14u, 100u, 4u),   // Same as the top.
-
-      DEF_CONST(3, Instruction::CONST, 15u, 3000),
-      DEF_IPUT(3, Instruction::IPUT, 15u, 100u, 6u),
-      DEF_IPUT(3, Instruction::IPUT, 15u, 100u, 7u),
-      DEF_IPUT(3, Instruction::IPUT, 15u, 101u, 6u),
-      DEF_IGET(4, Instruction::IGET, 19u, 100u, 6u),  // Differs from CONSTs 15u and 22u.
-      DEF_IGET(4, Instruction::IGET, 20u, 100u, 7u),  // Same value as 19u.
-      DEF_IGET(4, Instruction::IGET, 21u, 101u, 6u),  // Same value as read from field #7.
-      DEF_CONST(4, Instruction::CONST, 22u, 3001),
-      DEF_IPUT(4, Instruction::IPUT, 22u, 100u, 6u),
-      DEF_IPUT(4, Instruction::IPUT, 22u, 100u, 7u),
-      DEF_IPUT(4, Instruction::IPUT, 22u, 101u, 6u),
-      DEF_IGET(5, Instruction::IGET, 26u, 100u, 6u),  // Same as CONST 22u.
-      DEF_IGET(5, Instruction::IGET, 27u, 100u, 7u),  // Same as CONST 22u.
-      DEF_IGET(5, Instruction::IGET, 28u, 101u, 6u),  // Same as CONST 22u.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[2]);
-
-  EXPECT_EQ(value_names_[3], value_names_[5]);
-
-  EXPECT_NE(value_names_[6], value_names_[9]);
-  EXPECT_NE(value_names_[7], value_names_[9]);
-
-  EXPECT_NE(value_names_[10], value_names_[13]);
-  EXPECT_EQ(value_names_[11], value_names_[14]);
-
-  EXPECT_NE(value_names_[19], value_names_[15]);
-  EXPECT_NE(value_names_[19], value_names_[22]);
-  EXPECT_EQ(value_names_[22], value_names_[26]);
-  EXPECT_EQ(value_names_[22], value_names_[27]);
-  EXPECT_EQ(value_names_[22], value_names_[28]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, IFieldToBaseDependency) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // For the IGET that loads sreg 3u using base 2u, the following IPUT creates a dependency
-      // from the field value to the base. However, this dependency does not result in an
-      // infinite loop since the merge of the field value for base 0u gets assigned a value name
-      // based only on the base 0u, not on the actual value, and breaks the dependency cycle.
-      DEF_IGET(3, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_IGET(4, Instruction::IGET, 2u, 0u, 0u),
-      DEF_IGET(4, Instruction::IGET, 3u, 2u, 0u),
-      DEF_IPUT(4, Instruction::IPUT, 3u, 0u, 0u),
-      DEF_IGET(5, Instruction::IGET, 5u, 0u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[1], value_names_[2]);
-  EXPECT_EQ(value_names_[3], value_names_[5]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, SFields) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_SGET(3, Instruction::SGET, 0u, 0u),
-      DEF_SGET(4, Instruction::SGET, 1u, 0u),         // Same as at the top.
-      DEF_SGET(5, Instruction::SGET, 2u, 0u),         // Same as at the top.
-
-      DEF_SGET(3, Instruction::SGET, 3u, 1u),
-      DEF_SGET(4, Instruction::SGET, 4u, 1u),         // Differs from top...
-      DEF_SPUT(4, Instruction::SPUT, 5u, 1u),         // Because of this SPUT.
-      DEF_SGET(5, Instruction::SGET, 6u, 1u),         // Differs from top and the loop SGET.
-
-      DEF_SGET(3, Instruction::SGET, 7u, 2u),
-      DEF_SPUT(4, Instruction::SPUT, 8u, 2u),         // Because of this SPUT...
-      DEF_SGET(4, Instruction::SGET, 9u, 2u),         // Differs from top.
-      DEF_SGET(5, Instruction::SGET, 10u, 2u),        // Differs from top but same as the loop SGET.
-  };
-
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  EXPECT_NE(value_names_[3], value_names_[4]);
-  EXPECT_NE(value_names_[3], value_names_[6]);
-  EXPECT_NE(value_names_[4], value_names_[5]);
-
-  EXPECT_NE(value_names_[7], value_names_[9]);
-  EXPECT_EQ(value_names_[9], value_names_[10]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, NonAliasingArrays) {
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 100u),
-      DEF_AGET(3, Instruction::AGET, 1u, 100u, 101u),
-      DEF_AGET(4, Instruction::AGET, 2u, 100u, 101u),   // Same as at the top.
-      DEF_AGET(5, Instruction::AGET, 3u, 100u, 101u),   // Same as at the top.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 200u),
-      DEF_AGET(3, Instruction::AGET, 5u, 200u, 201u),
-      DEF_AGET(4, Instruction::AGET, 6u, 200u, 201u),  // Differs from top...
-      DEF_APUT(4, Instruction::APUT, 7u, 200u, 201u),  // Because of this IPUT.
-      DEF_AGET(5, Instruction::AGET, 8u, 200u, 201u),  // Differs from top and the loop AGET.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 300u),
-      DEF_AGET(3, Instruction::AGET, 10u, 300u, 301u),
-      DEF_APUT(4, Instruction::APUT, 11u, 300u, 301u),  // Because of this IPUT...
-      DEF_AGET(4, Instruction::AGET, 12u, 300u, 301u),  // Differs from top.
-      DEF_AGET(5, Instruction::AGET, 13u, 300u, 301u),  // Differs from top but == the loop AGET.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 400u),
-      DEF_CONST(3, Instruction::CONST, 15u, 3000),
-      DEF_APUT(3, Instruction::APUT, 15u, 400u, 401u),
-      DEF_APUT(3, Instruction::APUT, 15u, 400u, 402u),
-      DEF_AGET(4, Instruction::AGET, 18u, 400u, 401u),  // Differs from 15u and 20u.
-      DEF_AGET(4, Instruction::AGET, 19u, 400u, 402u),  // Same as 18u.
-      DEF_CONST(4, Instruction::CONST, 20u, 4000),
-      DEF_APUT(4, Instruction::APUT, 20u, 400u, 401u),
-      DEF_APUT(4, Instruction::APUT, 20u, 400u, 402u),
-      DEF_AGET(5, Instruction::AGET, 23u, 400u, 401u),  // Differs from 15u and 18u...
-      DEF_AGET(5, Instruction::AGET, 24u, 400u, 402u),  // and same as the CONST 20u.
-
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 500u),
-      DEF_AGET(3, Instruction::AGET, 26u, 500u, 501u),
-      DEF_APUT(4, Instruction::APUT, 27u, 500u, 502u),  // Clobbers element at index 501u.
-      DEF_AGET(5, Instruction::AGET, 28u, 500u, 501u),  // Differs from the top.
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[1], value_names_[2]);
-  EXPECT_EQ(value_names_[1], value_names_[3]);
-
-  EXPECT_NE(value_names_[5], value_names_[6]);
-  EXPECT_NE(value_names_[5], value_names_[8]);
-  EXPECT_NE(value_names_[6], value_names_[8]);
-
-  EXPECT_NE(value_names_[10], value_names_[12]);
-  EXPECT_EQ(value_names_[12], value_names_[13]);
-
-  EXPECT_NE(value_names_[18], value_names_[15]);
-  EXPECT_NE(value_names_[18], value_names_[20]);
-  EXPECT_EQ(value_names_[18], value_names_[19]);
-  EXPECT_NE(value_names_[23], value_names_[15]);
-  EXPECT_NE(value_names_[23], value_names_[18]);
-  EXPECT_EQ(value_names_[23], value_names_[20]);
-  EXPECT_EQ(value_names_[23], value_names_[24]);
-
-  EXPECT_NE(value_names_[26], value_names_[28]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, AliasingArrays) {
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_AGET(3, Instruction::AGET_WIDE, 0u, 100u, 101u),
-      DEF_AGET(4, Instruction::AGET_WIDE, 2u, 100u, 101u),   // Same as at the top.
-      DEF_AGET(5, Instruction::AGET_WIDE, 4u, 100u, 101u),   // Same as at the top.
-
-      DEF_AGET(3, Instruction::AGET_BYTE, 6u, 200u, 201u),
-      DEF_AGET(4, Instruction::AGET_BYTE, 7u, 200u, 201u),  // Differs from top...
-      DEF_APUT(4, Instruction::APUT_BYTE, 8u, 200u, 201u),  // Because of this IPUT.
-      DEF_AGET(5, Instruction::AGET_BYTE, 9u, 200u, 201u),  // Differs from top and the loop AGET.
-
-      DEF_AGET(3, Instruction::AGET, 10u, 300u, 301u),
-      DEF_APUT(4, Instruction::APUT, 11u, 300u, 301u),  // Because of this IPUT...
-      DEF_AGET(4, Instruction::AGET, 12u, 300u, 301u),   // Differs from top.
-      DEF_AGET(5, Instruction::AGET, 13u, 300u, 301u),  // Differs from top but == the loop AGET.
-
-      DEF_CONST(3, Instruction::CONST, 14u, 3000),
-      DEF_APUT(3, Instruction::APUT_CHAR, 14u, 400u, 401u),
-      DEF_APUT(3, Instruction::APUT_CHAR, 14u, 400u, 402u),
-      DEF_AGET(4, Instruction::AGET_CHAR, 15u, 400u, 401u),  // Differs from 11u and 16u.
-      DEF_AGET(4, Instruction::AGET_CHAR, 16u, 400u, 402u),  // Same as 14u.
-      DEF_CONST(4, Instruction::CONST, 17u, 4000),
-      DEF_APUT(4, Instruction::APUT_CHAR, 17u, 400u, 401u),
-      DEF_APUT(4, Instruction::APUT_CHAR, 17u, 400u, 402u),
-      DEF_AGET(5, Instruction::AGET_CHAR, 19u, 400u, 401u),  // Differs from 11u and 14u...
-      DEF_AGET(5, Instruction::AGET_CHAR, 20u, 400u, 402u),  // and same as the CONST 16u.
-
-      DEF_AGET(3, Instruction::AGET_SHORT, 21u, 500u, 501u),
-      DEF_APUT(4, Instruction::APUT_SHORT, 22u, 500u, 502u),  // Clobbers element at index 501u.
-      DEF_AGET(5, Instruction::AGET_SHORT, 23u, 500u, 501u),  // Differs from the top.
-
-      DEF_AGET(3, Instruction::AGET_OBJECT, 24u, 600u, 601u),
-      DEF_APUT(4, Instruction::APUT_OBJECT, 25u, 601u, 602u),  // Clobbers 600u/601u.
-      DEF_AGET(5, Instruction::AGET_OBJECT, 26u, 600u, 601u),  // Differs from the top.
-
-      DEF_AGET(3, Instruction::AGET_BOOLEAN, 27u, 700u, 701u),
-      DEF_APUT(4, Instruction::APUT_BOOLEAN, 27u, 701u, 702u),  // Storing the same value.
-      DEF_AGET(5, Instruction::AGET_BOOLEAN, 29u, 700u, 701u),  // Differs from the top.
-  };
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 0, 2, 4 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  EXPECT_NE(value_names_[3], value_names_[4]);
-  EXPECT_NE(value_names_[3], value_names_[6]);
-  EXPECT_NE(value_names_[4], value_names_[6]);
-
-  EXPECT_NE(value_names_[7], value_names_[9]);
-  EXPECT_EQ(value_names_[9], value_names_[10]);
-
-  EXPECT_NE(value_names_[14], value_names_[11]);
-  EXPECT_NE(value_names_[14], value_names_[16]);
-  EXPECT_EQ(value_names_[14], value_names_[15]);
-  EXPECT_NE(value_names_[19], value_names_[11]);
-  EXPECT_NE(value_names_[19], value_names_[14]);
-  EXPECT_EQ(value_names_[19], value_names_[16]);
-  EXPECT_EQ(value_names_[19], value_names_[20]);
-
-  EXPECT_NE(value_names_[21], value_names_[23]);
-
-  EXPECT_NE(value_names_[24], value_names_[26]);
-
-  EXPECT_EQ(value_names_[27], value_names_[29]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, Phi) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000),
-      DEF_PHI2(4, 1u, 0u, 6u),                     // Merge CONST 0u (1000) with the same.
-      DEF_PHI2(4, 2u, 0u, 7u),                     // Merge CONST 0u (1000) with the Phi itself.
-      DEF_PHI2(4, 3u, 0u, 8u),                     // Merge CONST 0u (1000) and CONST 4u (2000).
-      DEF_PHI2(4, 4u, 0u, 9u),                     // Merge CONST 0u (1000) and Phi 3u.
-      DEF_CONST(4, Instruction::CONST, 5u, 2000),
-      DEF_MOVE(4, Instruction::MOVE, 6u, 0u),
-      DEF_MOVE(4, Instruction::MOVE, 7u, 2u),
-      DEF_MOVE(4, Instruction::MOVE, 8u, 5u),
-      DEF_MOVE(4, Instruction::MOVE, 9u, 3u),
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[1], value_names_[0]);
-  EXPECT_EQ(value_names_[2], value_names_[0]);
-
-  EXPECT_NE(value_names_[3], value_names_[0]);
-  EXPECT_NE(value_names_[3], value_names_[5]);
-  EXPECT_NE(value_names_[4], value_names_[0]);
-  EXPECT_NE(value_names_[4], value_names_[5]);
-  EXPECT_NE(value_names_[4], value_names_[3]);
-}
-
-TEST_F(GlobalValueNumberingTestLoop, IFieldLoopVariable) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 0),
-      DEF_IPUT(3, Instruction::IPUT, 0u, 100u, 0u),
-      DEF_IGET(4, Instruction::IGET, 2u, 100u, 0u),
-      DEF_BINOP(4, Instruction::ADD_INT, 3u, 2u, 101u),
-      DEF_IPUT(4, Instruction::IPUT, 3u, 100u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[2], value_names_[0]);
-  EXPECT_NE(value_names_[3], value_names_[0]);
-  EXPECT_NE(value_names_[3], value_names_[2]);
-
-
-  // Set up vreg_to_ssa_map_exit for prologue and loop and set post-processing mode
-  // as needed for GetStartingVregValueNumber().
-  const int32_t prologue_vreg_to_ssa_map_exit[] = { 0 };
-  const int32_t loop_vreg_to_ssa_map_exit[] = { 3 };
-  PrepareVregToSsaMapExit(3, prologue_vreg_to_ssa_map_exit);
-  PrepareVregToSsaMapExit(4, loop_vreg_to_ssa_map_exit);
-  gvn_->StartPostProcessing();
-
-  // Check that vreg 0 has the same value number as the result of IGET 2u.
-  const LocalValueNumbering* loop = gvn_->GetLvn(4);
-  EXPECT_EQ(value_names_[2], loop->GetStartingVregValueNumber(0));
-}
-
-TEST_F(GlobalValueNumberingTestCatch, IFields) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 200u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 201u),
-      DEF_IGET(3, Instruction::IGET, 2u, 100u, 0u),
-      DEF_IGET(3, Instruction::IGET, 3u, 200u, 0u),
-      DEF_IGET(3, Instruction::IGET, 4u, 201u, 0u),
-      DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 201u),     // Clobbering catch, 201u escapes.
-      DEF_IGET(4, Instruction::IGET, 6u, 100u, 0u),         // Differs from IGET 2u.
-      DEF_IPUT(4, Instruction::IPUT, 6u, 100u, 1u),
-      DEF_IPUT(4, Instruction::IPUT, 6u, 101u, 0u),
-      DEF_IPUT(4, Instruction::IPUT, 6u, 200u, 0u),
-      DEF_IGET(5, Instruction::IGET, 10u, 100u, 0u),        // Differs from IGETs 2u and 6u.
-      DEF_IGET(5, Instruction::IGET, 11u, 200u, 0u),        // Same as the top.
-      DEF_IGET(5, Instruction::IGET, 12u, 201u, 0u),        // Differs from the top, 201u escaped.
-      DEF_IPUT(5, Instruction::IPUT, 10u, 100u, 1u),
-      DEF_IPUT(5, Instruction::IPUT, 10u, 101u, 0u),
-      DEF_IPUT(5, Instruction::IPUT, 10u, 200u, 0u),
-      DEF_IGET(6, Instruction::IGET, 16u, 100u, 0u),        // Differs from IGETs 2u, 6u and 10u.
-      DEF_IGET(6, Instruction::IGET, 17u, 100u, 1u),        // Same as IGET 16u.
-      DEF_IGET(6, Instruction::IGET, 18u, 101u, 0u),        // Same as IGET 16u.
-      DEF_IGET(6, Instruction::IGET, 19u, 200u, 0u),        // Same as IGET 16u.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[2], value_names_[6]);
-  EXPECT_NE(value_names_[2], value_names_[10]);
-  EXPECT_NE(value_names_[6], value_names_[10]);
-  EXPECT_EQ(value_names_[3], value_names_[11]);
-  EXPECT_NE(value_names_[4], value_names_[12]);
-
-  EXPECT_NE(value_names_[2], value_names_[16]);
-  EXPECT_NE(value_names_[6], value_names_[16]);
-  EXPECT_NE(value_names_[10], value_names_[16]);
-  EXPECT_EQ(value_names_[16], value_names_[17]);
-  EXPECT_EQ(value_names_[16], value_names_[18]);
-  EXPECT_EQ(value_names_[16], value_names_[19]);
-}
-
-TEST_F(GlobalValueNumberingTestCatch, SFields) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(3, Instruction::SGET, 0u, 0u),
-      DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 100u),     // Clobbering catch.
-      DEF_SGET(4, Instruction::SGET, 2u, 0u),               // Differs from SGET 0u.
-      DEF_SPUT(4, Instruction::SPUT, 2u, 1u),
-      DEF_SGET(5, Instruction::SGET, 4u, 0u),               // Differs from SGETs 0u and 2u.
-      DEF_SPUT(5, Instruction::SPUT, 4u, 1u),
-      DEF_SGET(6, Instruction::SGET, 6u, 0u),               // Differs from SGETs 0u, 2u and 4u.
-      DEF_SGET(6, Instruction::SGET, 7u, 1u),               // Same as field #1.
-  };
-
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_NE(value_names_[0], value_names_[4]);
-  EXPECT_NE(value_names_[2], value_names_[4]);
-  EXPECT_NE(value_names_[0], value_names_[6]);
-  EXPECT_NE(value_names_[2], value_names_[6]);
-  EXPECT_NE(value_names_[4], value_names_[6]);
-  EXPECT_EQ(value_names_[6], value_names_[7]);
-}
-
-TEST_F(GlobalValueNumberingTestCatch, Arrays) {
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 200u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 201u),
-      DEF_AGET(3, Instruction::AGET, 2u, 100u, 101u),
-      DEF_AGET(3, Instruction::AGET, 3u, 200u, 202u),
-      DEF_AGET(3, Instruction::AGET, 4u, 200u, 203u),
-      DEF_AGET(3, Instruction::AGET, 5u, 201u, 202u),
-      DEF_AGET(3, Instruction::AGET, 6u, 201u, 203u),
-      DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 201u),     // Clobbering catch, 201u escapes.
-      DEF_AGET(4, Instruction::AGET, 8u, 100u, 101u),       // Differs from AGET 2u.
-      DEF_APUT(4, Instruction::APUT, 8u, 100u, 102u),
-      DEF_APUT(4, Instruction::APUT, 8u, 200u, 202u),
-      DEF_APUT(4, Instruction::APUT, 8u, 200u, 203u),
-      DEF_APUT(4, Instruction::APUT, 8u, 201u, 202u),
-      DEF_APUT(4, Instruction::APUT, 8u, 201u, 203u),
-      DEF_AGET(5, Instruction::AGET, 14u, 100u, 101u),      // Differs from AGETs 2u and 8u.
-      DEF_AGET(5, Instruction::AGET, 15u, 200u, 202u),      // Same as AGET 3u.
-      DEF_AGET(5, Instruction::AGET, 16u, 200u, 203u),      // Same as AGET 4u.
-      DEF_AGET(5, Instruction::AGET, 17u, 201u, 202u),      // Differs from AGET 5u.
-      DEF_AGET(5, Instruction::AGET, 18u, 201u, 203u),      // Differs from AGET 6u.
-      DEF_APUT(5, Instruction::APUT, 14u, 100u, 102u),
-      DEF_APUT(5, Instruction::APUT, 14u, 200u, 202u),
-      DEF_APUT(5, Instruction::APUT, 14u, 200u, 203u),
-      DEF_APUT(5, Instruction::APUT, 14u, 201u, 202u),
-      DEF_APUT(5, Instruction::APUT, 14u, 201u, 203u),
-      DEF_AGET(6, Instruction::AGET, 24u, 100u, 101u),      // Differs from AGETs 2u, 8u and 14u.
-      DEF_AGET(6, Instruction::AGET, 25u, 100u, 101u),      // Same as AGET 24u.
-      DEF_AGET(6, Instruction::AGET, 26u, 200u, 202u),      // Same as AGET 24u.
-      DEF_AGET(6, Instruction::AGET, 27u, 200u, 203u),      // Same as AGET 24u.
-      DEF_AGET(6, Instruction::AGET, 28u, 201u, 202u),      // Same as AGET 24u.
-      DEF_AGET(6, Instruction::AGET, 29u, 201u, 203u),      // Same as AGET 24u.
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[2], value_names_[8]);
-  EXPECT_NE(value_names_[2], value_names_[14]);
-  EXPECT_NE(value_names_[8], value_names_[14]);
-  EXPECT_EQ(value_names_[3], value_names_[15]);
-  EXPECT_EQ(value_names_[4], value_names_[16]);
-  EXPECT_NE(value_names_[5], value_names_[17]);
-  EXPECT_NE(value_names_[6], value_names_[18]);
-  EXPECT_NE(value_names_[2], value_names_[24]);
-  EXPECT_NE(value_names_[8], value_names_[24]);
-  EXPECT_NE(value_names_[14], value_names_[24]);
-  EXPECT_EQ(value_names_[24], value_names_[25]);
-  EXPECT_EQ(value_names_[24], value_names_[26]);
-  EXPECT_EQ(value_names_[24], value_names_[27]);
-  EXPECT_EQ(value_names_[24], value_names_[28]);
-  EXPECT_EQ(value_names_[24], value_names_[29]);
-}
-
-TEST_F(GlobalValueNumberingTestCatch, Phi) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000),
-      DEF_CONST(3, Instruction::CONST, 1u, 2000),
-      DEF_MOVE(3, Instruction::MOVE, 2u, 1u),
-      DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 100u),     // Clobbering catch.
-      DEF_CONST(5, Instruction::CONST, 4u, 1000),
-      DEF_CONST(5, Instruction::CONST, 5u, 3000),
-      DEF_MOVE(5, Instruction::MOVE, 6u, 5u),
-      DEF_PHI2(6, 7u, 0u, 4u),
-      DEF_PHI2(6, 8u, 0u, 5u),
-      DEF_PHI2(6, 9u, 0u, 6u),
-      DEF_PHI2(6, 10u, 1u, 4u),
-      DEF_PHI2(6, 11u, 1u, 5u),
-      DEF_PHI2(6, 12u, 1u, 6u),
-      DEF_PHI2(6, 13u, 2u, 4u),
-      DEF_PHI2(6, 14u, 2u, 5u),
-      DEF_PHI2(6, 15u, 2u, 6u),
-  };
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  ASSERT_EQ(value_names_[4], value_names_[0]);  // Both CONSTs are 1000.
-  EXPECT_EQ(value_names_[7], value_names_[0]);  // Merging CONST 0u and CONST 4u, both 1000.
-  EXPECT_NE(value_names_[8], value_names_[0]);
-  EXPECT_NE(value_names_[8], value_names_[5]);
-  EXPECT_EQ(value_names_[9], value_names_[8]);
-  EXPECT_NE(value_names_[10], value_names_[1]);
-  EXPECT_NE(value_names_[10], value_names_[4]);
-  EXPECT_NE(value_names_[10], value_names_[8]);
-  EXPECT_NE(value_names_[11], value_names_[1]);
-  EXPECT_NE(value_names_[11], value_names_[5]);
-  EXPECT_NE(value_names_[11], value_names_[8]);
-  EXPECT_NE(value_names_[11], value_names_[10]);
-  EXPECT_EQ(value_names_[12], value_names_[11]);
-  EXPECT_EQ(value_names_[13], value_names_[10]);
-  EXPECT_EQ(value_names_[14], value_names_[11]);
-  EXPECT_EQ(value_names_[15], value_names_[11]);
-}
-
-TEST_F(GlobalValueNumberingTest, NullCheckIFields) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },  // Object.
-      { 1u, 1u, 1u, false, kDexMemAccessObject },  // Object.
-  };
-  static const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // 4 is fall-through, 5 is taken.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET(3, Instruction::IGET_OBJECT, 0u, 100u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 100u, 1u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 101u, 0u),
-      DEF_IFZ(3, Instruction::IF_NEZ, 0u),            // Null-check for field #0 for taken.
-      DEF_UNIQUE_REF(4, Instruction::NEW_ARRAY, 4u),
-      DEF_IPUT(4, Instruction::IPUT_OBJECT, 4u, 100u, 0u),
-      DEF_IPUT(4, Instruction::IPUT_OBJECT, 4u, 100u, 1u),
-      DEF_IPUT(4, Instruction::IPUT_OBJECT, 4u, 101u, 0u),
-      DEF_IGET(5, Instruction::IGET_OBJECT, 8u, 100u, 0u),   // 100u/#0, IF_NEZ/NEW_ARRAY.
-      DEF_IGET(5, Instruction::IGET_OBJECT, 9u, 100u, 1u),   // 100u/#1, -/NEW_ARRAY.
-      DEF_IGET(5, Instruction::IGET_OBJECT, 10u, 101u, 0u),  // 101u/#0, -/NEW_ARRAY.
-      DEF_CONST(5, Instruction::CONST, 11u, 0),
-      DEF_AGET(5, Instruction::AGET, 12u, 8u, 11u),   // Null-check eliminated.
-      DEF_AGET(5, Instruction::AGET, 13u, 9u, 11u),   // Null-check kept.
-      DEF_AGET(5, Instruction::AGET, 14u, 10u, 11u),  // Null-check kept.
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, true, false, false,                      // BB #3; unimportant.
-      false, true, true, true,                        // BB #4; unimportant.
-      true, true, true, false, true, false, false,    // BB #5; only the last three are important.
-  };
-
-  PrepareIFields(ifields);
-  PrepareBasicBlocks(bbs);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTest, NullCheckSFields) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-      { 1u, 1u, 1u, false, kDexMemAccessObject },
-  };
-  static const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // 4 is fall-through, 5 is taken.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(3, Instruction::SGET_OBJECT, 0u, 0u),
-      DEF_SGET(3, Instruction::SGET_OBJECT, 1u, 1u),
-      DEF_IFZ(3, Instruction::IF_NEZ, 0u),            // Null-check for field #0 for taken.
-      DEF_UNIQUE_REF(4, Instruction::NEW_ARRAY, 3u),
-      DEF_SPUT(4, Instruction::SPUT_OBJECT, 3u, 0u),
-      DEF_SPUT(4, Instruction::SPUT_OBJECT, 3u, 1u),
-      DEF_SGET(5, Instruction::SGET_OBJECT, 6u, 0u),  // Field #0 is null-checked, IF_NEZ/NEW_ARRAY.
-      DEF_SGET(5, Instruction::SGET_OBJECT, 7u, 1u),  // Field #1 is not null-checked, -/NEW_ARRAY.
-      DEF_CONST(5, Instruction::CONST, 8u, 0),
-      DEF_AGET(5, Instruction::AGET, 9u, 6u, 8u),     // Null-check eliminated.
-      DEF_AGET(5, Instruction::AGET, 10u, 7u, 8u),    // Null-check kept.
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, false, false, false, false, false, false, false, false, true, false
-  };
-
-  PrepareSFields(sfields);
-  PrepareBasicBlocks(bbs);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTest, NullCheckArrays) {
-  static const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // 4 is fall-through, 5 is taken.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
-  };
-  static const MIRDef mirs[] = {
-      DEF_AGET(3, Instruction::AGET_OBJECT, 0u, 100u, 102u),
-      DEF_AGET(3, Instruction::AGET_OBJECT, 1u, 100u, 103u),
-      DEF_AGET(3, Instruction::AGET_OBJECT, 2u, 101u, 102u),
-      DEF_IFZ(3, Instruction::IF_NEZ, 0u),            // Null-check for field #0 for taken.
-      DEF_UNIQUE_REF(4, Instruction::NEW_ARRAY, 4u),
-      DEF_APUT(4, Instruction::APUT_OBJECT, 4u, 100u, 102u),
-      DEF_APUT(4, Instruction::APUT_OBJECT, 4u, 100u, 103u),
-      DEF_APUT(4, Instruction::APUT_OBJECT, 4u, 101u, 102u),
-      DEF_AGET(5, Instruction::AGET_OBJECT, 8u, 100u, 102u),   // Null-checked, IF_NEZ/NEW_ARRAY.
-      DEF_AGET(5, Instruction::AGET_OBJECT, 9u, 100u, 103u),   // Not null-checked, -/NEW_ARRAY.
-      DEF_AGET(5, Instruction::AGET_OBJECT, 10u, 101u, 102u),  // Not null-checked, -/NEW_ARRAY.
-      DEF_CONST(5, Instruction::CONST, 11u, 0),
-      DEF_AGET(5, Instruction::AGET, 12u, 8u, 11u),    // Null-check eliminated.
-      DEF_AGET(5, Instruction::AGET, 13u, 9u, 11u),    // Null-check kept.
-      DEF_AGET(5, Instruction::AGET, 14u, 10u, 11u),   // Null-check kept.
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, true, false, false,                      // BB #3; unimportant.
-      false, true, true, true,                        // BB #4; unimportant.
-      true, true, true, false, true, false, false,    // BB #5; only the last three are important.
-  };
-
-  PrepareBasicBlocks(bbs);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, RangeCheckArrays) {
-  // NOTE: We don't merge range checks when we merge value names for Phis or memory locations.
-  static const MIRDef mirs[] = {
-      DEF_AGET(4, Instruction::AGET, 0u, 100u, 101u),
-      DEF_AGET(5, Instruction::AGET, 1u, 100u, 101u),
-      DEF_APUT(6, Instruction::APUT, 2u, 100u, 101u),
-
-      DEF_AGET(4, Instruction::AGET, 3u, 200u, 201u),
-      DEF_AGET(5, Instruction::AGET, 4u, 200u, 202u),
-      DEF_APUT(6, Instruction::APUT, 5u, 200u, 201u),
-
-      DEF_AGET(4, Instruction::AGET, 6u, 300u, 302u),
-      DEF_AGET(5, Instruction::AGET, 7u, 301u, 302u),
-      DEF_APUT(6, Instruction::APUT, 8u, 300u, 302u),
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, false, true,
-      false, false, true,
-      false, false, false,
-  };
-  static const bool expected_ignore_range_check[] = {
-      false, false, true,
-      false, false, false,
-      false, false, false,
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  ASSERT_EQ(arraysize(expected_ignore_range_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-    EXPECT_EQ(expected_ignore_range_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, MergeSameValueInDifferentMemoryLocations) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 100u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_ARRAY, 200u),
-      DEF_CONST(4, Instruction::CONST, 2u, 1000),
-      DEF_IPUT(4, Instruction::IPUT, 2u, 100u, 0u),
-      DEF_IPUT(4, Instruction::IPUT, 2u, 100u, 1u),
-      DEF_IPUT(4, Instruction::IPUT, 2u, 101u, 0u),
-      DEF_APUT(4, Instruction::APUT, 2u, 200u, 202u),
-      DEF_APUT(4, Instruction::APUT, 2u, 200u, 203u),
-      DEF_APUT(4, Instruction::APUT, 2u, 201u, 202u),
-      DEF_APUT(4, Instruction::APUT, 2u, 201u, 203u),
-      DEF_SPUT(4, Instruction::SPUT, 2u, 0u),
-      DEF_SPUT(4, Instruction::SPUT, 2u, 1u),
-      DEF_CONST(5, Instruction::CONST, 12u, 2000),
-      DEF_IPUT(5, Instruction::IPUT, 12u, 100u, 0u),
-      DEF_IPUT(5, Instruction::IPUT, 12u, 100u, 1u),
-      DEF_IPUT(5, Instruction::IPUT, 12u, 101u, 0u),
-      DEF_APUT(5, Instruction::APUT, 12u, 200u, 202u),
-      DEF_APUT(5, Instruction::APUT, 12u, 200u, 203u),
-      DEF_APUT(5, Instruction::APUT, 12u, 201u, 202u),
-      DEF_APUT(5, Instruction::APUT, 12u, 201u, 203u),
-      DEF_SPUT(5, Instruction::SPUT, 12u, 0u),
-      DEF_SPUT(5, Instruction::SPUT, 12u, 1u),
-      DEF_PHI2(6, 22u, 2u, 12u),
-      DEF_IGET(6, Instruction::IGET, 23u, 100u, 0u),
-      DEF_IGET(6, Instruction::IGET, 24u, 100u, 1u),
-      DEF_IGET(6, Instruction::IGET, 25u, 101u, 0u),
-      DEF_AGET(6, Instruction::AGET, 26u, 200u, 202u),
-      DEF_AGET(6, Instruction::AGET, 27u, 200u, 203u),
-      DEF_AGET(6, Instruction::AGET, 28u, 201u, 202u),
-      DEF_AGET(6, Instruction::AGET, 29u, 201u, 203u),
-      DEF_SGET(6, Instruction::SGET, 30u, 0u),
-      DEF_SGET(6, Instruction::SGET, 31u, 1u),
-  };
-  PrepareIFields(ifields);
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[2], value_names_[12]);
-  EXPECT_NE(value_names_[2], value_names_[22]);
-  EXPECT_NE(value_names_[12], value_names_[22]);
-  for (size_t i = 23; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(value_names_[22], value_names_[i]) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTest, InfiniteLocationLoop) {
-  // This is a pattern that lead to an infinite loop during the GVN development. This has been
-  // fixed by rewriting the merging of AliasingValues to merge only locations read from or
-  // written to in each incoming LVN rather than merging all locations read from or written to
-  // in any incoming LVN. It also showed up only when the GVN used the DFS ordering instead of
-  // the "topological" ordering but, since the "topological" ordering is not really topological
-  // when there are cycles and an optimizing Java compiler (or a tool like proguard) could
-  // theoretically create any sort of flow graph, this could have shown up in real code.
-  //
-  // While we were merging all the locations:
-  // The first time the Phi evaluates to the same value name as CONST 0u.  After the second
-  // evaluation, when the BB #9 has been processed, the Phi receives its own value name.
-  // However, the index from the first evaluation keeps disappearing and reappearing in the
-  // LVN's aliasing_array_value_map_'s load_value_map for BBs #9, #4, #5, #7 because of the
-  // DFS ordering of LVN evaluation.
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-  };
-  static const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(4)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 2), DEF_PRED2(3, 9)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 7), DEF_PRED1(4)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(9), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(8, 9), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(9), DEF_PRED1(7)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED3(6, 7, 8)),
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 0),
-      DEF_PHI2(4, 1u, 0u, 10u),
-      DEF_INVOKE1(6, Instruction::INVOKE_STATIC, 100u),
-      DEF_IGET(6, Instruction::IGET_OBJECT, 3u, 100u, 0u),
-      DEF_CONST(6, Instruction::CONST, 4u, 1000),
-      DEF_APUT(6, Instruction::APUT, 4u, 3u, 1u),            // Index is Phi 1u.
-      DEF_INVOKE1(8, Instruction::INVOKE_STATIC, 100u),
-      DEF_IGET(8, Instruction::IGET_OBJECT, 7u, 100u, 0u),
-      DEF_CONST(8, Instruction::CONST, 8u, 2000),
-      DEF_APUT(8, Instruction::APUT, 9u, 7u, 1u),            // Index is Phi 1u.
-      DEF_CONST(9, Instruction::CONST, 10u, 3000),
-  };
-  PrepareIFields(ifields);
-  PrepareBasicBlocks(bbs);
-  PrepareMIRs(mirs);
-  // Using DFS order for this test. The GVN result should not depend on the used ordering
-  // once the GVN actually converges. But creating a test for this convergence issue with
-  // the topological ordering could be a very challenging task.
-  PerformPreOrderDfsGVN();
-}
-
-TEST_F(GlobalValueNumberingTestTwoConsecutiveLoops, IFieldAndPhi) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-  };
-  static const MIRDef mirs[] = {
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 0u, 100u),
-      DEF_IPUT(3, Instruction::IPUT_OBJECT, 0u, 200u, 0u),
-      DEF_PHI2(4, 2u, 0u, 3u),
-      DEF_MOVE(5, Instruction::MOVE_OBJECT, 3u, 300u),
-      DEF_IPUT(5, Instruction::IPUT_OBJECT, 3u, 200u, 0u),
-      DEF_MOVE(6, Instruction::MOVE_OBJECT, 5u, 2u),
-      DEF_IGET(6, Instruction::IGET_OBJECT, 6u, 200u, 0u),
-      DEF_MOVE(7, Instruction::MOVE_OBJECT, 7u, 5u),
-      DEF_IGET(7, Instruction::IGET_OBJECT, 8u, 200u, 0u),
-      DEF_MOVE(8, Instruction::MOVE_OBJECT, 9u, 5u),
-      DEF_IGET(8, Instruction::IGET_OBJECT, 10u, 200u, 0u),
-      DEF_MOVE(9, Instruction::MOVE_OBJECT, 11u, 5u),
-      DEF_IGET(9, Instruction::IGET_OBJECT, 12u, 200u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[3]);
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_NE(value_names_[3], value_names_[2]);
-  EXPECT_EQ(value_names_[2], value_names_[5]);
-  EXPECT_EQ(value_names_[5], value_names_[6]);
-  EXPECT_EQ(value_names_[5], value_names_[7]);
-  EXPECT_EQ(value_names_[5], value_names_[8]);
-  EXPECT_EQ(value_names_[5], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[5], value_names_[11]);
-  EXPECT_EQ(value_names_[5], value_names_[12]);
-}
-
-TEST_F(GlobalValueNumberingTestTwoConsecutiveLoops, NullCheck) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-  };
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-  };
-  static const MIRDef mirs[] = {
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 0u, 100u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 200u, 0u),
-      DEF_SGET(3, Instruction::SGET_OBJECT, 2u, 0u),
-      DEF_AGET(3, Instruction::AGET_OBJECT, 3u, 300u, 201u),
-      DEF_PHI2(4, 4u, 0u, 8u),
-      DEF_IGET(5, Instruction::IGET_OBJECT, 5u, 200u, 0u),
-      DEF_SGET(5, Instruction::SGET_OBJECT, 6u, 0u),
-      DEF_AGET(5, Instruction::AGET_OBJECT, 7u, 300u, 201u),
-      DEF_MOVE(5, Instruction::MOVE_OBJECT, 8u, 400u),
-      DEF_IPUT(5, Instruction::IPUT_OBJECT, 4u, 200u, 0u),          // PUT the Phi 4u.
-      DEF_SPUT(5, Instruction::SPUT_OBJECT, 4u, 0u),                // PUT the Phi 4u.
-      DEF_APUT(5, Instruction::APUT_OBJECT, 4u, 300u, 201u),        // PUT the Phi 4u.
-      DEF_MOVE(6, Instruction::MOVE_OBJECT, 12u, 4u),
-      DEF_IGET(6, Instruction::IGET_OBJECT, 13u, 200u, 0u),
-      DEF_SGET(6, Instruction::SGET_OBJECT, 14u, 0u),
-      DEF_AGET(6, Instruction::AGET_OBJECT, 15u, 300u, 201u),
-      DEF_AGET(6, Instruction::AGET_OBJECT, 16u, 12u, 600u),
-      DEF_AGET(6, Instruction::AGET_OBJECT, 17u, 13u, 600u),
-      DEF_AGET(6, Instruction::AGET_OBJECT, 18u, 14u, 600u),
-      DEF_AGET(6, Instruction::AGET_OBJECT, 19u, 15u, 600u),
-      DEF_MOVE(8, Instruction::MOVE_OBJECT, 20u, 12u),
-      DEF_IGET(8, Instruction::IGET_OBJECT, 21u, 200u, 0u),
-      DEF_SGET(8, Instruction::SGET_OBJECT, 22u, 0u),
-      DEF_AGET(8, Instruction::AGET_OBJECT, 23u, 300u, 201u),
-      DEF_AGET(8, Instruction::AGET_OBJECT, 24u, 12u, 600u),
-      DEF_AGET(8, Instruction::AGET_OBJECT, 25u, 13u, 600u),
-      DEF_AGET(8, Instruction::AGET_OBJECT, 26u, 14u, 600u),
-      DEF_AGET(8, Instruction::AGET_OBJECT, 27u, 15u, 600u),
-      DEF_MOVE(9, Instruction::MOVE_OBJECT, 28u, 12u),
-      DEF_IGET(9, Instruction::IGET_OBJECT, 29u, 200u, 0u),
-      DEF_SGET(9, Instruction::SGET_OBJECT, 30u, 0u),
-      DEF_AGET(9, Instruction::AGET_OBJECT, 31u, 300u, 201u),
-      DEF_AGET(9, Instruction::AGET_OBJECT, 32u, 12u, 600u),
-      DEF_AGET(9, Instruction::AGET_OBJECT, 33u, 13u, 600u),
-      DEF_AGET(9, Instruction::AGET_OBJECT, 34u, 14u, 600u),
-      DEF_AGET(9, Instruction::AGET_OBJECT, 35u, 15u, 600u),
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, false, false, false,                                   // BB #3.
-      false, true, false, true, false, true, false, true,           // BBs #4 and #5.
-      false, true, false, true, false, false, false, false,         // BB #6.
-      false, true, false, true, true, true, true, true,             // BB #7.
-      false, true, false, true, true, true, true, true,             // BB #8.
-  };
-  static const bool expected_ignore_range_check[] = {
-      false, false, false, false,                                   // BB #3.
-      false, false, false, true, false, false, false, true,         // BBs #4 and #5.
-      false, false, false, true, false, false, false, false,        // BB #6.
-      false, false, false, true, true, true, true, true,            // BB #7.
-      false, false, false, true, true, true, true, true,            // BB #8.
-  };
-
-  PrepareIFields(ifields);
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[4]);
-  EXPECT_NE(value_names_[1], value_names_[5]);
-  EXPECT_NE(value_names_[2], value_names_[6]);
-  EXPECT_NE(value_names_[3], value_names_[7]);
-  EXPECT_NE(value_names_[4], value_names_[8]);
-  EXPECT_EQ(value_names_[4], value_names_[12]);
-  EXPECT_EQ(value_names_[5], value_names_[13]);
-  EXPECT_EQ(value_names_[6], value_names_[14]);
-  EXPECT_EQ(value_names_[7], value_names_[15]);
-  EXPECT_EQ(value_names_[12], value_names_[20]);
-  EXPECT_EQ(value_names_[13], value_names_[21]);
-  EXPECT_EQ(value_names_[14], value_names_[22]);
-  EXPECT_EQ(value_names_[15], value_names_[23]);
-  EXPECT_EQ(value_names_[12], value_names_[28]);
-  EXPECT_EQ(value_names_[13], value_names_[29]);
-  EXPECT_EQ(value_names_[14], value_names_[30]);
-  EXPECT_EQ(value_names_[15], value_names_[31]);
-  PerformGVNCodeModifications();
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-    EXPECT_EQ(expected_ignore_range_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTestTwoNestedLoops, IFieldAndPhi) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-  };
-  static const MIRDef mirs[] = {
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 0u, 100u),
-      DEF_IPUT(3, Instruction::IPUT_OBJECT, 0u, 200u, 0u),
-      DEF_PHI2(4, 2u, 0u, 11u),
-      DEF_MOVE(4, Instruction::MOVE_OBJECT, 3u, 2u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 4u, 200u, 0u),
-      DEF_MOVE(5, Instruction::MOVE_OBJECT, 5u, 3u),
-      DEF_IGET(5, Instruction::IGET_OBJECT, 6u, 200u, 0u),
-      DEF_MOVE(6, Instruction::MOVE_OBJECT, 7u, 3u),
-      DEF_IGET(6, Instruction::IGET_OBJECT, 8u, 200u, 0u),
-      DEF_MOVE(7, Instruction::MOVE_OBJECT, 9u, 3u),
-      DEF_IGET(7, Instruction::IGET_OBJECT, 10u, 200u, 0u),
-      DEF_MOVE(7, Instruction::MOVE_OBJECT, 11u, 300u),
-      DEF_IPUT(7, Instruction::IPUT_OBJECT, 11u, 200u, 0u),
-      DEF_MOVE(8, Instruction::MOVE_OBJECT, 13u, 3u),
-      DEF_IGET(8, Instruction::IGET_OBJECT, 14u, 200u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN();
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[11]);
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_NE(value_names_[11], value_names_[2]);
-  EXPECT_EQ(value_names_[2], value_names_[3]);
-  EXPECT_EQ(value_names_[3], value_names_[4]);
-  EXPECT_EQ(value_names_[3], value_names_[5]);
-  EXPECT_EQ(value_names_[3], value_names_[6]);
-  EXPECT_EQ(value_names_[3], value_names_[7]);
-  EXPECT_EQ(value_names_[3], value_names_[8]);
-  EXPECT_EQ(value_names_[3], value_names_[9]);
-  EXPECT_EQ(value_names_[3], value_names_[10]);
-  EXPECT_EQ(value_names_[3], value_names_[13]);
-  EXPECT_EQ(value_names_[3], value_names_[14]);
-}
-
-TEST_F(GlobalValueNumberingTest, NormalPathToCatchEntry) {
-  // When there's an empty catch block, all the exception paths lead to the next block in
-  // the normal path and we can also have normal "taken" or "fall-through" branches to that
-  // path. Check that LocalValueNumbering::PruneNonAliasingRefsForCatch() can handle it.
-  static const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
-  };
-  static const MIRDef mirs[] = {
-      DEF_INVOKE1(4, Instruction::INVOKE_STATIC, 100u),
-  };
-  PrepareBasicBlocks(bbs);
-  BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(5u);
-  catch_handler->catch_entry = true;
-  // Add successor block info to the check block.
-  BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
-  check_bb->successor_block_list_type = kCatch;
-  SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
-      (cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessors));
-  successor_block_info->block = catch_handler->id;
-  check_bb->successor_blocks.push_back(successor_block_info);
-  BasicBlock* merge_block = cu_.mir_graph->GetBasicBlock(4u);
-  std::swap(merge_block->taken, merge_block->fall_through);
-  PrepareMIRs(mirs);
-  PerformGVN();
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, DivZeroCheckDiamond) {
-  static const MIRDef mirs[] = {
-      DEF_BINOP(3u, Instruction::DIV_INT, 1u, 20u, 21u),
-      DEF_BINOP(3u, Instruction::DIV_INT, 2u, 24u, 21u),
-      DEF_BINOP(3u, Instruction::DIV_INT, 3u, 20u, 23u),
-      DEF_BINOP(4u, Instruction::DIV_INT, 4u, 24u, 22u),
-      DEF_BINOP(4u, Instruction::DIV_INT, 9u, 24u, 25u),
-      DEF_BINOP(5u, Instruction::DIV_INT, 5u, 24u, 21u),
-      DEF_BINOP(5u, Instruction::DIV_INT, 10u, 24u, 26u),
-      DEF_PHI2(6u, 27u, 25u, 26u),
-      DEF_BINOP(6u, Instruction::DIV_INT, 12u, 20u, 27u),
-      DEF_BINOP(6u, Instruction::DIV_INT, 6u, 24u, 21u),
-      DEF_BINOP(6u, Instruction::DIV_INT, 7u, 20u, 23u),
-      DEF_BINOP(6u, Instruction::DIV_INT, 8u, 20u, 22u),
-  };
-
-  static const bool expected_ignore_div_zero_check[] = {
-      false,  // New divisor seen.
-      true,   // Eliminated since it has first divisor as first one.
-      false,  // New divisor seen.
-      false,  // New divisor seen.
-      false,  // New divisor seen.
-      true,   // Eliminated in dominating block.
-      false,  // New divisor seen.
-      false,  // Phi node.
-      true,   // Eliminated on both sides of diamond and merged via phi.
-      true,   // Eliminated in dominating block.
-      true,   // Eliminated in dominating block.
-      false,  // Only eliminated on one path of diamond.
-  };
-
-  PrepareMIRs(mirs);
-  PerformGVN();
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_div_zero_check), mir_count_);
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected = expected_ignore_div_zero_check[i] ? MIR_IGNORE_DIV_ZERO_CHECK : 0u;
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTestDiamond, CheckCastDiamond) {
-  static const MIRDef mirs[] = {
-      DEF_UNOP(3u, Instruction::INSTANCE_OF, 0u, 100u),
-      DEF_UNOP(3u, Instruction::INSTANCE_OF, 1u, 200u),
-      DEF_IFZ(3u, Instruction::IF_NEZ, 0u),
-      DEF_INVOKE1(4u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(5u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(5u, Instruction::CHECK_CAST, 200u),
-      DEF_INVOKE1(5u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(6u, Instruction::CHECK_CAST, 100u),
-  };
-
-  static const bool expected_ignore_check_cast[] = {
-      false,  // instance-of
-      false,  // instance-of
-      false,  // if-nez
-      false,  // Not eliminated, fall-through branch.
-      true,   // Eliminated.
-      false,  // Not eliminated, different value.
-      false,  // Not eliminated, different type.
-      false,  // Not eliminated, bottom block.
-  };
-
-  PrepareMIRs(mirs);
-  mirs_[0].dalvikInsn.vC = 1234;  // type for instance-of
-  mirs_[1].dalvikInsn.vC = 1234;  // type for instance-of
-  mirs_[3].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[4].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[5].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[6].dalvikInsn.vB = 4321;  // type for check-cast
-  mirs_[7].dalvikInsn.vB = 1234;  // type for check-cast
-  PerformGVN();
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_check_cast), mir_count_);
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected = expected_ignore_check_cast[i] ? MIR_IGNORE_CHECK_CAST : 0u;
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(GlobalValueNumberingTest, CheckCastDominators) {
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(7)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // Block #3, top of the diamond.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(3)),     // Block #4, left side.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #5, right side.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(5)),     // Block #6, right side.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 6)),  // Block #7, bottom.
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNOP(3u, Instruction::INSTANCE_OF, 0u, 100u),
-      DEF_UNOP(3u, Instruction::INSTANCE_OF, 1u, 200u),
-      DEF_IFZ(3u, Instruction::IF_NEZ, 0u),
-      DEF_INVOKE1(4u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(6u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(6u, Instruction::CHECK_CAST, 200u),
-      DEF_INVOKE1(6u, Instruction::CHECK_CAST, 100u),
-      DEF_INVOKE1(7u, Instruction::CHECK_CAST, 100u),
-  };
-
-  static const bool expected_ignore_check_cast[] = {
-      false,  // instance-of
-      false,  // instance-of
-      false,  // if-nez
-      false,  // Not eliminated, fall-through branch.
-      true,   // Eliminated.
-      false,  // Not eliminated, different value.
-      false,  // Not eliminated, different type.
-      false,  // Not eliminated, bottom block.
-  };
-
-  PrepareBasicBlocks(bbs);
-  PrepareMIRs(mirs);
-  mirs_[0].dalvikInsn.vC = 1234;  // type for instance-of
-  mirs_[1].dalvikInsn.vC = 1234;  // type for instance-of
-  mirs_[3].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[4].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[5].dalvikInsn.vB = 1234;  // type for check-cast
-  mirs_[6].dalvikInsn.vB = 4321;  // type for check-cast
-  mirs_[7].dalvikInsn.vB = 1234;  // type for check-cast
-  PerformGVN();
-  PerformGVNCodeModifications();
-  ASSERT_EQ(arraysize(expected_ignore_check_cast), mir_count_);
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected = expected_ignore_check_cast[i] ? MIR_IGNORE_CHECK_CAST : 0u;
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
deleted file mode 100644
index 445859c..0000000
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ /dev/null
@@ -1,1473 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <sstream>
-
-#include "gvn_dead_code_elimination.h"
-
-#include "base/arena_bit_vector.h"
-#include "base/bit_vector-inl.h"
-#include "base/macros.h"
-#include "base/allocator.h"
-#include "compiler_enums.h"
-#include "dataflow_iterator-inl.h"
-#include "dex_instruction.h"
-#include "dex/mir_graph.h"
-#include "local_value_numbering.h"
-
-namespace art {
-
-constexpr uint16_t GvnDeadCodeElimination::kNoValue;
-constexpr uint16_t GvnDeadCodeElimination::kNPos;
-
-inline uint16_t GvnDeadCodeElimination::MIRData::PrevChange(int v_reg) const {
-  DCHECK(has_def);
-  DCHECK(v_reg == vreg_def || v_reg == vreg_def + 1);
-  return (v_reg == vreg_def) ? prev_value.change : prev_value_high.change;
-}
-
-inline void GvnDeadCodeElimination::MIRData::SetPrevChange(int v_reg, uint16_t change) {
-  DCHECK(has_def);
-  DCHECK(v_reg == vreg_def || v_reg == vreg_def + 1);
-  if (v_reg == vreg_def) {
-    prev_value.change = change;
-  } else {
-    prev_value_high.change = change;
-  }
-}
-
-inline void GvnDeadCodeElimination::MIRData::RemovePrevChange(int v_reg, MIRData* prev_data) {
-  DCHECK_NE(PrevChange(v_reg), kNPos);
-  DCHECK(v_reg == prev_data->vreg_def || v_reg == prev_data->vreg_def + 1);
-  if (vreg_def == v_reg) {
-    if (prev_data->vreg_def == v_reg) {
-      prev_value = prev_data->prev_value;
-      low_def_over_high_word = prev_data->low_def_over_high_word;
-    } else {
-      prev_value = prev_data->prev_value_high;
-      low_def_over_high_word = !prev_data->high_def_over_low_word;
-    }
-  } else {
-    if (prev_data->vreg_def == v_reg) {
-      prev_value_high = prev_data->prev_value;
-      high_def_over_low_word = !prev_data->low_def_over_high_word;
-    } else {
-      prev_value_high = prev_data->prev_value_high;
-      high_def_over_low_word = prev_data->high_def_over_low_word;
-    }
-  }
-}
-
-GvnDeadCodeElimination::VRegChains::VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc)
-    : num_vregs_(num_vregs),
-      vreg_data_(alloc->AllocArray<VRegValue>(num_vregs, kArenaAllocMisc)),
-      vreg_high_words_(false, Allocator::GetNoopAllocator(),
-                       BitVector::BitsToWords(num_vregs),
-                       alloc->AllocArray<uint32_t>(BitVector::BitsToWords(num_vregs))),
-      mir_data_(alloc->Adapter()) {
-  mir_data_.reserve(100);
-}
-
-inline void GvnDeadCodeElimination::VRegChains::Reset() {
-  DCHECK(mir_data_.empty());
-  std::fill_n(vreg_data_, num_vregs_, VRegValue());
-  vreg_high_words_.ClearAllBits();
-}
-
-void GvnDeadCodeElimination::VRegChains::AddMIRWithDef(MIR* mir, int v_reg, bool wide,
-                                                       uint16_t new_value) {
-  uint16_t pos = mir_data_.size();
-  mir_data_.emplace_back(mir);
-  MIRData* data = &mir_data_.back();
-  data->has_def = true;
-  data->wide_def = wide;
-  data->vreg_def = v_reg;
-
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  data->prev_value = vreg_data_[v_reg];
-  data->low_def_over_high_word =
-      (vreg_data_[v_reg].change != kNPos)
-      ? GetMIRData(vreg_data_[v_reg].change)->vreg_def + 1 == v_reg
-      : vreg_high_words_.IsBitSet(v_reg);
-  vreg_data_[v_reg].value = new_value;
-  vreg_data_[v_reg].change = pos;
-  vreg_high_words_.ClearBit(v_reg);
-
-  if (wide) {
-    DCHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
-    data->prev_value_high = vreg_data_[v_reg + 1];
-    data->high_def_over_low_word =
-        (vreg_data_[v_reg + 1].change != kNPos)
-        ? GetMIRData(vreg_data_[v_reg + 1].change)->vreg_def == v_reg + 1
-        : !vreg_high_words_.IsBitSet(v_reg + 1);
-    vreg_data_[v_reg + 1].value = new_value;
-    vreg_data_[v_reg + 1].change = pos;
-    vreg_high_words_.SetBit(v_reg + 1);
-  }
-}
-
-inline void GvnDeadCodeElimination::VRegChains::AddMIRWithoutDef(MIR* mir) {
-  mir_data_.emplace_back(mir);
-}
-
-void GvnDeadCodeElimination::VRegChains::RemoveLastMIRData() {
-  MIRData* data = LastMIRData();
-  if (data->has_def) {
-    DCHECK_EQ(vreg_data_[data->vreg_def].change, NumMIRs() - 1u);
-    vreg_data_[data->vreg_def] = data->prev_value;
-    DCHECK(!vreg_high_words_.IsBitSet(data->vreg_def));
-    if (data->low_def_over_high_word) {
-      vreg_high_words_.SetBit(data->vreg_def);
-    }
-    if (data->wide_def) {
-      DCHECK_EQ(vreg_data_[data->vreg_def + 1].change, NumMIRs() - 1u);
-      vreg_data_[data->vreg_def + 1] = data->prev_value_high;
-      DCHECK(vreg_high_words_.IsBitSet(data->vreg_def + 1));
-      if (data->high_def_over_low_word) {
-        vreg_high_words_.ClearBit(data->vreg_def + 1);
-      }
-    }
-  }
-  mir_data_.pop_back();
-}
-
-void GvnDeadCodeElimination::VRegChains::RemoveTrailingNops() {
-  // There's at least one NOP to drop. There may be more.
-  MIRData* last_data = LastMIRData();
-  DCHECK(!last_data->must_keep && !last_data->has_def);
-  do {
-    DCHECK_EQ(static_cast<int>(last_data->mir->dalvikInsn.opcode), static_cast<int>(kMirOpNop));
-    mir_data_.pop_back();
-    if (mir_data_.empty()) {
-      break;
-    }
-    last_data = LastMIRData();
-  } while (!last_data->must_keep && !last_data->has_def);
-}
-
-inline size_t GvnDeadCodeElimination::VRegChains::NumMIRs() const {
-  return mir_data_.size();
-}
-
-inline GvnDeadCodeElimination::MIRData* GvnDeadCodeElimination::VRegChains::GetMIRData(size_t pos) {
-  DCHECK_LT(pos, mir_data_.size());
-  return &mir_data_[pos];
-}
-
-inline GvnDeadCodeElimination::MIRData* GvnDeadCodeElimination::VRegChains::LastMIRData() {
-  DCHECK(!mir_data_.empty());
-  return &mir_data_.back();
-}
-
-uint32_t GvnDeadCodeElimination::VRegChains::NumVRegs() const {
-  return num_vregs_;
-}
-
-void GvnDeadCodeElimination::VRegChains::InsertInitialValueHigh(int v_reg, uint16_t value) {
-  DCHECK_NE(value, kNoValue);
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  uint16_t change = vreg_data_[v_reg].change;
-  if (change == kNPos) {
-    vreg_data_[v_reg].value = value;
-    vreg_high_words_.SetBit(v_reg);
-  } else {
-    while (true) {
-      MIRData* data = &mir_data_[change];
-      DCHECK(data->vreg_def == v_reg || data->vreg_def + 1 == v_reg);
-      if (data->vreg_def == v_reg) {  // Low word, use prev_value.
-        if (data->prev_value.change == kNPos) {
-          DCHECK_EQ(data->prev_value.value, kNoValue);
-          data->prev_value.value = value;
-          data->low_def_over_high_word = true;
-          break;
-        }
-        change = data->prev_value.change;
-      } else {  // High word, use prev_value_high.
-        if (data->prev_value_high.change == kNPos) {
-          DCHECK_EQ(data->prev_value_high.value, kNoValue);
-          data->prev_value_high.value = value;
-          break;
-        }
-        change = data->prev_value_high.change;
-      }
-    }
-  }
-}
-
-void GvnDeadCodeElimination::VRegChains::UpdateInitialVRegValue(int v_reg, bool wide,
-                                                                const LocalValueNumbering* lvn) {
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  if (!wide) {
-    if (vreg_data_[v_reg].value == kNoValue) {
-      uint16_t old_value = lvn->GetStartingVregValueNumber(v_reg);
-      if (old_value == kNoValue) {
-        // Maybe there was a wide value in v_reg before. Do not check for wide value in v_reg-1,
-        // that will be done only if we see a definition of v_reg-1, otherwise it's unnecessary.
-        old_value = lvn->GetStartingVregValueNumberWide(v_reg);
-        if (old_value != kNoValue) {
-          InsertInitialValueHigh(v_reg + 1, old_value);
-        }
-      }
-      vreg_data_[v_reg].value = old_value;
-      DCHECK(!vreg_high_words_.IsBitSet(v_reg));  // Keep marked as low word.
-    }
-  } else {
-    DCHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
-    bool check_high = true;
-    if (vreg_data_[v_reg].value == kNoValue) {
-      uint16_t old_value = lvn->GetStartingVregValueNumberWide(v_reg);
-      if (old_value != kNoValue) {
-        InsertInitialValueHigh(v_reg + 1, old_value);
-        check_high = false;  // High word has been processed.
-      } else {
-        // Maybe there was a narrow value before. Do not check for wide value in v_reg-1,
-        // that will be done only if we see a definition of v_reg-1, otherwise it's unnecessary.
-        old_value = lvn->GetStartingVregValueNumber(v_reg);
-      }
-      vreg_data_[v_reg].value = old_value;
-      DCHECK(!vreg_high_words_.IsBitSet(v_reg));  // Keep marked as low word.
-    }
-    if (check_high && vreg_data_[v_reg + 1].value == kNoValue) {
-      uint16_t old_value = lvn->GetStartingVregValueNumber(v_reg + 1);
-      if (old_value == kNoValue && static_cast<size_t>(v_reg + 2) < num_vregs_) {
-        // Maybe there was a wide value before.
-        old_value = lvn->GetStartingVregValueNumberWide(v_reg + 1);
-        if (old_value != kNoValue) {
-          InsertInitialValueHigh(v_reg + 2, old_value);
-        }
-      }
-      vreg_data_[v_reg + 1].value = old_value;
-      DCHECK(!vreg_high_words_.IsBitSet(v_reg + 1));  // Keep marked as low word.
-    }
-  }
-}
-
-inline uint16_t GvnDeadCodeElimination::VRegChains::LastChange(int v_reg) {
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  return vreg_data_[v_reg].change;
-}
-
-inline uint16_t GvnDeadCodeElimination::VRegChains::CurrentValue(int v_reg) {
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  return vreg_data_[v_reg].value;
-}
-
-uint16_t GvnDeadCodeElimination::VRegChains::FindKillHead(int v_reg, uint16_t cutoff) {
-  uint16_t current_value = this->CurrentValue(v_reg);
-  DCHECK_NE(current_value, kNoValue);
-  uint16_t change = LastChange(v_reg);
-  DCHECK_LT(change, mir_data_.size());
-  DCHECK_GE(change, cutoff);
-  bool match_high_word = (mir_data_[change].vreg_def != v_reg);
-  do {
-    MIRData* data = &mir_data_[change];
-    DCHECK(data->vreg_def == v_reg || data->vreg_def + 1 == v_reg);
-    if (data->vreg_def == v_reg) {  // Low word, use prev_value.
-      if (data->prev_value.value == current_value &&
-          match_high_word == data->low_def_over_high_word) {
-        break;
-      }
-      change = data->prev_value.change;
-    } else {  // High word, use prev_value_high.
-      if (data->prev_value_high.value == current_value &&
-          match_high_word != data->high_def_over_low_word) {
-        break;
-      }
-      change = data->prev_value_high.change;
-    }
-    if (change < cutoff) {
-      change = kNPos;
-    }
-  } while (change != kNPos);
-  return change;
-}
-
-uint16_t GvnDeadCodeElimination::VRegChains::FindFirstChangeAfter(int v_reg,
-                                                                  uint16_t change) const {
-  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-  DCHECK_LT(change, mir_data_.size());
-  uint16_t result = kNPos;
-  uint16_t search_change = vreg_data_[v_reg].change;
-  while (search_change != kNPos && search_change > change) {
-    result = search_change;
-    search_change = mir_data_[search_change].PrevChange(v_reg);
-  }
-  return result;
-}
-
-void GvnDeadCodeElimination::VRegChains::ReplaceChange(uint16_t old_change, uint16_t new_change) {
-  const MIRData* old_data = GetMIRData(old_change);
-  DCHECK(old_data->has_def);
-  int count = old_data->wide_def ? 2 : 1;
-  for (int v_reg = old_data->vreg_def, end = old_data->vreg_def + count; v_reg != end; ++v_reg) {
-    uint16_t next_change = FindFirstChangeAfter(v_reg, old_change);
-    if (next_change == kNPos) {
-      DCHECK_EQ(vreg_data_[v_reg].change, old_change);
-      vreg_data_[v_reg].change = new_change;
-      DCHECK_EQ(vreg_high_words_.IsBitSet(v_reg), v_reg == old_data->vreg_def + 1);
-      // No change in vreg_high_words_.
-    } else {
-      DCHECK_EQ(mir_data_[next_change].PrevChange(v_reg), old_change);
-      mir_data_[next_change].SetPrevChange(v_reg, new_change);
-    }
-  }
-}
-
-void GvnDeadCodeElimination::VRegChains::RemoveChange(uint16_t change) {
-  MIRData* data = &mir_data_[change];
-  DCHECK(data->has_def);
-  int count = data->wide_def ? 2 : 1;
-  for (int v_reg = data->vreg_def, end = data->vreg_def + count; v_reg != end; ++v_reg) {
-    uint16_t next_change = FindFirstChangeAfter(v_reg, change);
-    if (next_change == kNPos) {
-      DCHECK_EQ(vreg_data_[v_reg].change, change);
-      vreg_data_[v_reg] = (data->vreg_def == v_reg) ? data->prev_value : data->prev_value_high;
-      DCHECK_EQ(vreg_high_words_.IsBitSet(v_reg), v_reg == data->vreg_def + 1);
-      if (data->vreg_def == v_reg && data->low_def_over_high_word) {
-        vreg_high_words_.SetBit(v_reg);
-      } else if (data->vreg_def != v_reg && data->high_def_over_low_word) {
-        vreg_high_words_.ClearBit(v_reg);
-      }
-    } else {
-      DCHECK_EQ(mir_data_[next_change].PrevChange(v_reg), change);
-      mir_data_[next_change].RemovePrevChange(v_reg, data);
-    }
-  }
-}
-
-inline bool GvnDeadCodeElimination::VRegChains::IsTopChange(uint16_t change) const {
-  DCHECK_LT(change, mir_data_.size());
-  const MIRData* data = &mir_data_[change];
-  DCHECK(data->has_def);
-  DCHECK_LT(data->wide_def ? data->vreg_def + 1u : data->vreg_def, num_vregs_);
-  return vreg_data_[data->vreg_def].change == change &&
-      (!data->wide_def || vreg_data_[data->vreg_def + 1u].change == change);
-}
-
-bool GvnDeadCodeElimination::VRegChains::IsSRegUsed(uint16_t first_change, uint16_t last_change,
-                                                    int s_reg) const {
-  DCHECK_LE(first_change, last_change);
-  DCHECK_LE(last_change, mir_data_.size());
-  for (size_t c = first_change; c != last_change; ++c) {
-    SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
-    for (int i = 0; i != ssa_rep->num_uses; ++i) {
-      if (ssa_rep->uses[i] == s_reg) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-bool GvnDeadCodeElimination::VRegChains::IsVRegUsed(uint16_t first_change, uint16_t last_change,
-                                                    int v_reg, MIRGraph* mir_graph) const {
-  DCHECK_LE(first_change, last_change);
-  DCHECK_LE(last_change, mir_data_.size());
-  for (size_t c = first_change; c != last_change; ++c) {
-    SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
-    for (int i = 0; i != ssa_rep->num_uses; ++i) {
-      if (mir_graph->SRegToVReg(ssa_rep->uses[i]) == v_reg) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-void GvnDeadCodeElimination::VRegChains::RenameSRegUses(uint16_t first_change, uint16_t last_change,
-                                                        int old_s_reg, int new_s_reg, bool wide) {
-  for (size_t c = first_change; c != last_change; ++c) {
-    SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
-    for (int i = 0; i != ssa_rep->num_uses; ++i) {
-      if (ssa_rep->uses[i] == old_s_reg) {
-        ssa_rep->uses[i] = new_s_reg;
-        if (wide) {
-          ++i;
-          DCHECK_LT(i, ssa_rep->num_uses);
-          ssa_rep->uses[i] = new_s_reg + 1;
-        }
-      }
-    }
-  }
-}
-
-void GvnDeadCodeElimination::VRegChains::RenameVRegUses(uint16_t first_change, uint16_t last_change,
-                                                    int old_s_reg, int old_v_reg,
-                                                    int new_s_reg, int new_v_reg) {
-  for (size_t c = first_change; c != last_change; ++c) {
-    MIR* mir = mir_data_[c].mir;
-    if (IsInstructionBinOp2Addr(mir->dalvikInsn.opcode) &&
-        mir->ssa_rep->uses[0] == old_s_reg && old_v_reg != new_v_reg) {
-      // Rewrite binop_2ADDR with plain binop before doing the register rename.
-      ChangeBinOp2AddrToPlainBinOp(mir);
-    }
-    uint64_t df_attr = MIRGraph::GetDataFlowAttributes(mir);
-    size_t use = 0u;
-#define REPLACE_VREG(REG) \
-    if ((df_attr & DF_U##REG) != 0) {                                         \
-      if (mir->ssa_rep->uses[use] == old_s_reg) {                             \
-        DCHECK_EQ(mir->dalvikInsn.v##REG, static_cast<uint32_t>(old_v_reg));  \
-        mir->dalvikInsn.v##REG = new_v_reg;                                   \
-        mir->ssa_rep->uses[use] = new_s_reg;                                  \
-        if ((df_attr & DF_##REG##_WIDE) != 0) {                               \
-          DCHECK_EQ(mir->ssa_rep->uses[use + 1], old_s_reg + 1);              \
-          mir->ssa_rep->uses[use + 1] = new_s_reg + 1;                        \
-        }                                                                     \
-      }                                                                       \
-      use += ((df_attr & DF_##REG##_WIDE) != 0) ? 2 : 1;                      \
-    }
-    REPLACE_VREG(A)
-    REPLACE_VREG(B)
-    REPLACE_VREG(C)
-#undef REPLACE_VREG
-    // We may encounter an out-of-order Phi which we need to ignore, otherwise we should
-    // only be asked to rename registers specified by DF_UA, DF_UB and DF_UC.
-    DCHECK_EQ(use,
-              static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi
-              ? 0u
-              : static_cast<size_t>(mir->ssa_rep->num_uses));
-  }
-}
-
-GvnDeadCodeElimination::GvnDeadCodeElimination(const GlobalValueNumbering* gvn,
-                                         ScopedArenaAllocator* alloc)
-    : gvn_(gvn),
-      mir_graph_(gvn_->GetMirGraph()),
-      vreg_chains_(mir_graph_->GetNumOfCodeAndTempVRs(), alloc),
-      bb_(nullptr),
-      lvn_(nullptr),
-      no_uses_all_since_(0u),
-      unused_vregs_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)),
-      vregs_to_kill_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)),
-      kill_heads_(alloc->AllocArray<uint16_t>(vreg_chains_.NumVRegs(), kArenaAllocMisc)),
-      changes_to_kill_(alloc->Adapter()),
-      dependent_vregs_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)) {
-  changes_to_kill_.reserve(16u);
-}
-
-void GvnDeadCodeElimination::Apply(BasicBlock* bb) {
-  bb_ = bb;
-  lvn_ = gvn_->GetLvn(bb->id);
-
-  RecordPass();
-  BackwardPass();
-
-  DCHECK_EQ(no_uses_all_since_, 0u);
-  lvn_ = nullptr;
-  bb_ = nullptr;
-}
-
-void GvnDeadCodeElimination::RecordPass() {
-  // Record MIRs with vreg definition data, eliminate single instructions.
-  vreg_chains_.Reset();
-  DCHECK_EQ(no_uses_all_since_, 0u);
-  for (MIR* mir = bb_->first_mir_insn; mir != nullptr; mir = mir->next) {
-    if (RecordMIR(mir)) {
-      RecordPassTryToKillOverwrittenMoveOrMoveSrc();
-      RecordPassTryToKillLastMIR();
-    }
-  }
-}
-
-void GvnDeadCodeElimination::BackwardPass() {
-  // Now process MIRs in reverse order, trying to eliminate them.
-  unused_vregs_->ClearAllBits();  // Implicitly depend on all vregs at the end of BB.
-  while (vreg_chains_.NumMIRs() != 0u) {
-    if (BackwardPassTryToKillLastMIR()) {
-      continue;
-    }
-    BackwardPassProcessLastMIR();
-  }
-}
-
-void GvnDeadCodeElimination::KillMIR(MIRData* data) {
-  DCHECK(!data->must_keep);
-  DCHECK(!data->uses_all_vregs);
-  DCHECK(data->has_def);
-  DCHECK(data->mir->ssa_rep->num_defs == 1 || data->mir->ssa_rep->num_defs == 2);
-
-  KillMIR(data->mir);
-  data->has_def = false;
-  data->is_move = false;
-  data->is_move_src = false;
-}
-
-void GvnDeadCodeElimination::KillMIR(MIR* mir) {
-  mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-  mir->ssa_rep->num_uses = 0;
-  mir->ssa_rep->num_defs = 0;
-}
-
-void GvnDeadCodeElimination::ChangeBinOp2AddrToPlainBinOp(MIR* mir) {
-  mir->dalvikInsn.vC = mir->dalvikInsn.vB;
-  mir->dalvikInsn.vB = mir->dalvikInsn.vA;
-  mir->dalvikInsn.opcode = static_cast<Instruction::Code>(
-      mir->dalvikInsn.opcode - Instruction::ADD_INT_2ADDR +  Instruction::ADD_INT);
-}
-
-MIR* GvnDeadCodeElimination::CreatePhi(int s_reg) {
-  int v_reg = mir_graph_->SRegToVReg(s_reg);
-  MIR* phi = mir_graph_->NewMIR();
-  phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
-  phi->dalvikInsn.vA = v_reg;
-  phi->offset = bb_->start_offset;
-  phi->m_unit_index = 0;  // Arbitrarily assign all Phi nodes to outermost method.
-
-  phi->ssa_rep = static_cast<struct SSARepresentation *>(mir_graph_->GetArena()->Alloc(
-      sizeof(SSARepresentation), kArenaAllocDFInfo));
-
-  mir_graph_->AllocateSSADefData(phi, 1);
-  phi->ssa_rep->defs[0] = s_reg;
-
-  size_t num_uses = bb_->predecessors.size();
-  mir_graph_->AllocateSSAUseData(phi, num_uses);
-  size_t idx = 0u;
-  for (BasicBlockId pred_id : bb_->predecessors) {
-    BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
-    DCHECK(pred_bb != nullptr);
-    phi->ssa_rep->uses[idx] = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
-    DCHECK_NE(phi->ssa_rep->uses[idx], INVALID_SREG);
-    idx++;
-  }
-
-  phi->meta.phi_incoming = static_cast<BasicBlockId*>(mir_graph_->GetArena()->Alloc(
-      sizeof(BasicBlockId) * num_uses, kArenaAllocDFInfo));
-  std::copy(bb_->predecessors.begin(), bb_->predecessors.end(), phi->meta.phi_incoming);
-  bb_->PrependMIR(phi);
-  return phi;
-}
-
-MIR* GvnDeadCodeElimination::RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change,
-                                                      MIR* mir_to_kill) {
-  DCHECK(mir_to_kill->ssa_rep->num_defs == 1 || mir_to_kill->ssa_rep->num_defs == 2);
-  bool wide = (mir_to_kill->ssa_rep->num_defs != 1);
-  int new_s_reg = mir_to_kill->ssa_rep->defs[0];
-
-  // Just before we kill mir_to_kill, we need to replace the previous SSA reg assigned to the
-  // same dalvik reg to keep consistency with subsequent instructions. However, if there's no
-  // defining MIR for that dalvik reg, the preserved values must come from its predecessors
-  // and we need to create a new Phi (a degenerate Phi if there's only a single predecessor).
-  if (def_change == kNPos) {
-    if (wide) {
-      DCHECK_EQ(new_s_reg + 1, mir_to_kill->ssa_rep->defs[1]);
-      DCHECK_EQ(mir_graph_->SRegToVReg(new_s_reg) + 1, mir_graph_->SRegToVReg(new_s_reg + 1));
-      CreatePhi(new_s_reg + 1);  // High word Phi.
-    }
-    MIR* phi = CreatePhi(new_s_reg);
-    // If this is a degenerate Phi with all inputs being the same SSA reg, we need to its uses.
-    DCHECK_NE(phi->ssa_rep->num_uses, 0u);
-    int old_s_reg = phi->ssa_rep->uses[0];
-    bool all_same = true;
-    for (size_t i = 1u, num = phi->ssa_rep->num_uses; i != num; ++i) {
-      if (phi->ssa_rep->uses[i] != old_s_reg) {
-        all_same = false;
-        break;
-      }
-    }
-    if (all_same) {
-      vreg_chains_.RenameSRegUses(0u, last_change, old_s_reg, new_s_reg, wide);
-    }
-    return phi;
-  } else {
-    DCHECK_LT(def_change, last_change);
-    DCHECK_LE(last_change, vreg_chains_.NumMIRs());
-    MIRData* def_data = vreg_chains_.GetMIRData(def_change);
-    DCHECK(def_data->has_def);
-    int old_s_reg = def_data->mir->ssa_rep->defs[0];
-    DCHECK_NE(old_s_reg, new_s_reg);
-    DCHECK_EQ(mir_graph_->SRegToVReg(old_s_reg), mir_graph_->SRegToVReg(new_s_reg));
-    def_data->mir->ssa_rep->defs[0] = new_s_reg;
-    if (wide) {
-      if (static_cast<int>(def_data->mir->dalvikInsn.opcode) == kMirOpPhi) {
-        // Currently the high word Phi is always located after the low word Phi.
-        MIR* phi_high = def_data->mir->next;
-        DCHECK(phi_high != nullptr && static_cast<int>(phi_high->dalvikInsn.opcode) == kMirOpPhi);
-        DCHECK_EQ(phi_high->ssa_rep->defs[0], old_s_reg + 1);
-        phi_high->ssa_rep->defs[0] = new_s_reg + 1;
-      } else {
-        DCHECK_EQ(def_data->mir->ssa_rep->defs[1], old_s_reg + 1);
-        def_data->mir->ssa_rep->defs[1] = new_s_reg + 1;
-      }
-    }
-    vreg_chains_.RenameSRegUses(def_change + 1u, last_change, old_s_reg, new_s_reg, wide);
-    return nullptr;
-  }
-}
-
-
-void GvnDeadCodeElimination::BackwardPassProcessLastMIR() {
-  MIRData* data = vreg_chains_.LastMIRData();
-  if (data->uses_all_vregs) {
-    DCHECK(data->must_keep);
-    unused_vregs_->ClearAllBits();
-    DCHECK_EQ(no_uses_all_since_, vreg_chains_.NumMIRs());
-    --no_uses_all_since_;
-    while (no_uses_all_since_ != 0u &&
-        !vreg_chains_.GetMIRData(no_uses_all_since_ - 1u)->uses_all_vregs) {
-      --no_uses_all_since_;
-    }
-  } else {
-    if (data->has_def) {
-      unused_vregs_->SetBit(data->vreg_def);
-      if (data->wide_def) {
-        unused_vregs_->SetBit(data->vreg_def + 1);
-      }
-    }
-    for (int i = 0, num_uses = data->mir->ssa_rep->num_uses; i != num_uses; ++i) {
-      int v_reg = mir_graph_->SRegToVReg(data->mir->ssa_rep->uses[i]);
-      unused_vregs_->ClearBit(v_reg);
-    }
-  }
-  vreg_chains_.RemoveLastMIRData();
-}
-
-void GvnDeadCodeElimination::RecordPassKillMoveByRenamingSrcDef(uint16_t src_change,
-                                                                uint16_t move_change) {
-  DCHECK_LT(src_change, move_change);
-  MIRData* src_data = vreg_chains_.GetMIRData(src_change);
-  MIRData* move_data = vreg_chains_.GetMIRData(move_change);
-  DCHECK(src_data->is_move_src);
-  DCHECK_EQ(src_data->wide_def, move_data->wide_def);
-  DCHECK(move_data->prev_value.change == kNPos || move_data->prev_value.change <= src_change);
-  DCHECK(!move_data->wide_def || move_data->prev_value_high.change == kNPos ||
-         move_data->prev_value_high.change <= src_change);
-
-  int old_s_reg = src_data->mir->ssa_rep->defs[0];
-  // NOTE: old_s_reg may differ from move_data->mir->ssa_rep->uses[0]; value names must match.
-  int new_s_reg = move_data->mir->ssa_rep->defs[0];
-  DCHECK_NE(old_s_reg, new_s_reg);
-
-  if (IsInstructionBinOp2Addr(src_data->mir->dalvikInsn.opcode) &&
-      src_data->vreg_def != move_data->vreg_def) {
-    // Rewrite binop_2ADDR with plain binop before doing the register rename.
-    ChangeBinOp2AddrToPlainBinOp(src_data->mir);
-  }
-  // Remove src_change from the vreg chain(s).
-  vreg_chains_.RemoveChange(src_change);
-  // Replace the move_change with the src_change, copying all necessary data.
-  src_data->is_move_src = move_data->is_move_src;
-  src_data->low_def_over_high_word = move_data->low_def_over_high_word;
-  src_data->high_def_over_low_word = move_data->high_def_over_low_word;
-  src_data->vreg_def = move_data->vreg_def;
-  src_data->prev_value = move_data->prev_value;
-  src_data->prev_value_high = move_data->prev_value_high;
-  src_data->mir->dalvikInsn.vA = move_data->vreg_def;
-  src_data->mir->ssa_rep->defs[0] = new_s_reg;
-  if (move_data->wide_def) {
-    DCHECK_EQ(src_data->mir->ssa_rep->defs[1], old_s_reg + 1);
-    src_data->mir->ssa_rep->defs[1] = new_s_reg + 1;
-  }
-  vreg_chains_.ReplaceChange(move_change, src_change);
-
-  // Rename uses and kill the move.
-  vreg_chains_.RenameVRegUses(src_change + 1u, vreg_chains_.NumMIRs(),
-                              old_s_reg, mir_graph_->SRegToVReg(old_s_reg),
-                              new_s_reg, mir_graph_->SRegToVReg(new_s_reg));
-  KillMIR(move_data);
-}
-
-void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_t check_change) {
-  MIRData* data = vreg_chains_.GetMIRData(check_change);
-  DCHECK(data->is_move || data->is_move_src);
-  int32_t dest_s_reg = data->mir->ssa_rep->defs[0];
-
-  if (data->is_move) {
-    // Check if source vreg has changed since the MOVE.
-    int32_t src_s_reg = data->mir->ssa_rep->uses[0];
-    uint32_t src_v_reg = mir_graph_->SRegToVReg(src_s_reg);
-    uint16_t src_change = vreg_chains_.FindFirstChangeAfter(src_v_reg, check_change);
-    bool wide = data->wide_def;
-    if (wide) {
-      uint16_t src_change_high = vreg_chains_.FindFirstChangeAfter(src_v_reg + 1, check_change);
-      if (src_change_high != kNPos && (src_change == kNPos || src_change_high < src_change)) {
-        src_change = src_change_high;
-      }
-    }
-    if (src_change == kNPos ||
-        !vreg_chains_.IsSRegUsed(src_change + 1u, vreg_chains_.NumMIRs(), dest_s_reg)) {
-      // We can simply change all uses of dest to src.
-      size_t rename_end = (src_change != kNPos) ? src_change + 1u : vreg_chains_.NumMIRs();
-      vreg_chains_.RenameVRegUses(check_change + 1u, rename_end,
-                                  dest_s_reg, mir_graph_->SRegToVReg(dest_s_reg),
-                                  src_s_reg,  mir_graph_->SRegToVReg(src_s_reg));
-
-      // Now, remove the MOVE from the vreg chain(s) and kill it.
-      vreg_chains_.RemoveChange(check_change);
-      KillMIR(data);
-      return;
-    }
-  }
-
-  if (data->is_move_src) {
-    // Try to find a MOVE to a vreg that wasn't changed since check_change.
-    uint16_t value_name =
-        data->wide_def ? lvn_->GetSregValueWide(dest_s_reg) : lvn_->GetSregValue(dest_s_reg);
-    uint32_t dest_v_reg = mir_graph_->SRegToVReg(dest_s_reg);
-    for (size_t c = check_change + 1u, size = vreg_chains_.NumMIRs(); c != size; ++c) {
-      MIRData* d = vreg_chains_.GetMIRData(c);
-      if (d->is_move && d->wide_def == data->wide_def &&
-          (d->prev_value.change == kNPos || d->prev_value.change <= check_change) &&
-          (!d->wide_def ||
-           d->prev_value_high.change == kNPos || d->prev_value_high.change <= check_change)) {
-        // Compare value names to find move to move.
-        int32_t src_s_reg = d->mir->ssa_rep->uses[0];
-        uint16_t src_name =
-            (d->wide_def ? lvn_->GetSregValueWide(src_s_reg) : lvn_->GetSregValue(src_s_reg));
-        if (value_name == src_name) {
-          // Check if the move's destination vreg is unused between check_change and the move.
-          uint32_t new_dest_v_reg = mir_graph_->SRegToVReg(d->mir->ssa_rep->defs[0]);
-          if (!vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg, mir_graph_) &&
-              (!d->wide_def ||
-               !vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg + 1, mir_graph_))) {
-            // If the move's destination vreg changed, check if the vreg we're trying
-            // to rename is unused after that change.
-            uint16_t dest_change = vreg_chains_.FindFirstChangeAfter(new_dest_v_reg, c);
-            if (d->wide_def) {
-              uint16_t dest_change_high = vreg_chains_.FindFirstChangeAfter(new_dest_v_reg + 1, c);
-              if (dest_change_high != kNPos &&
-                  (dest_change == kNPos || dest_change_high < dest_change)) {
-                dest_change = dest_change_high;
-              }
-            }
-            if (dest_change == kNPos ||
-                !vreg_chains_.IsVRegUsed(dest_change + 1u, size, dest_v_reg, mir_graph_)) {
-              RecordPassKillMoveByRenamingSrcDef(check_change, c);
-              return;
-            }
-          }
-        }
-      }
-    }
-  }
-}
-
-void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc() {
-  // Check if we're overwriting a the result of a move or the definition of a source of a move.
-  // For MOVE_WIDE, we may be overwriting partially; if that's the case, check that the other
-  // word wasn't previously overwritten - we would have tried to rename back then.
-  MIRData* data = vreg_chains_.LastMIRData();
-  if (!data->has_def) {
-    return;
-  }
-  // NOTE: Instructions such as new-array implicitly use all vregs (if they throw) but they can
-  // define a move source which can be renamed. Therefore we allow the checked change to be the
-  // change before no_uses_all_since_. This has no effect on moves as they never use all vregs.
-  if (data->prev_value.change != kNPos && data->prev_value.change + 1u >= no_uses_all_since_) {
-    MIRData* check_data = vreg_chains_.GetMIRData(data->prev_value.change);
-    bool try_to_kill = false;
-    if (!check_data->is_move && !check_data->is_move_src) {
-      DCHECK(!try_to_kill);
-    } else if (!check_data->wide_def) {
-      // Narrow move; always fully overwritten by the last MIR.
-      try_to_kill = true;
-    } else if (data->low_def_over_high_word) {
-      // Overwriting only the high word; is the low word still valid?
-      DCHECK_EQ(check_data->vreg_def + 1u, data->vreg_def);
-      if (vreg_chains_.LastChange(check_data->vreg_def) == data->prev_value.change) {
-        try_to_kill = true;
-      }
-    } else if (!data->wide_def) {
-      // Overwriting only the low word, is the high word still valid?
-      if (vreg_chains_.LastChange(data->vreg_def + 1) == data->prev_value.change) {
-        try_to_kill = true;
-      }
-    } else {
-      // Overwriting both words; was the high word still from the same move?
-      if (data->prev_value_high.change == data->prev_value.change) {
-        try_to_kill = true;
-      }
-    }
-    if (try_to_kill) {
-      RecordPassTryToKillOverwrittenMoveOrMoveSrc(data->prev_value.change);
-    }
-  }
-  if (data->wide_def && data->high_def_over_low_word &&
-      data->prev_value_high.change != kNPos &&
-      data->prev_value_high.change + 1u >= no_uses_all_since_) {
-    MIRData* check_data = vreg_chains_.GetMIRData(data->prev_value_high.change);
-    bool try_to_kill = false;
-    if (!check_data->is_move && !check_data->is_move_src) {
-      DCHECK(!try_to_kill);
-    } else if (!check_data->wide_def) {
-      // Narrow move; always fully overwritten by the last MIR.
-      try_to_kill = true;
-    } else if (vreg_chains_.LastChange(check_data->vreg_def + 1) ==
-        data->prev_value_high.change) {
-      // High word is still valid.
-      try_to_kill = true;
-    }
-    if (try_to_kill) {
-      RecordPassTryToKillOverwrittenMoveOrMoveSrc(data->prev_value_high.change);
-    }
-  }
-}
-
-void GvnDeadCodeElimination::RecordPassTryToKillLastMIR() {
-  MIRData* last_data = vreg_chains_.LastMIRData();
-  if (last_data->must_keep) {
-    return;
-  }
-  if (UNLIKELY(!last_data->has_def)) {
-    // Must be an eliminated MOVE. Drop its data and data of all eliminated MIRs before it.
-    vreg_chains_.RemoveTrailingNops();
-    return;
-  }
-
-  // Try to kill a sequence of consecutive definitions of the same vreg. Allow mixing
-  // wide and non-wide defs; consider high word dead if low word has been overwritten.
-  uint16_t current_value = vreg_chains_.CurrentValue(last_data->vreg_def);
-  uint16_t change = vreg_chains_.NumMIRs() - 1u;
-  MIRData* data = last_data;
-  while (data->prev_value.value != current_value) {
-    --change;
-    if (data->prev_value.change == kNPos || data->prev_value.change != change) {
-      return;
-    }
-    data = vreg_chains_.GetMIRData(data->prev_value.change);
-    if (data->must_keep || !data->has_def || data->vreg_def != last_data->vreg_def) {
-      return;
-    }
-  }
-
-  bool wide = last_data->wide_def;
-  if (wide) {
-    // Check that the low word is valid.
-    if (data->low_def_over_high_word) {
-      return;
-    }
-    // Check that the high word is valid.
-    MIRData* high_data = data;
-    if (!high_data->wide_def) {
-      uint16_t high_change = vreg_chains_.FindFirstChangeAfter(data->vreg_def + 1, change);
-      DCHECK_NE(high_change, kNPos);
-      high_data = vreg_chains_.GetMIRData(high_change);
-      DCHECK_EQ(high_data->vreg_def, data->vreg_def);
-    }
-    if (high_data->prev_value_high.value != current_value || high_data->high_def_over_low_word) {
-      return;
-    }
-  }
-
-  MIR* phi = RenameSRegDefOrCreatePhi(data->prev_value.change, change, last_data->mir);
-  for (size_t i = 0, count = vreg_chains_.NumMIRs() - change; i != count; ++i) {
-    KillMIR(vreg_chains_.LastMIRData()->mir);
-    vreg_chains_.RemoveLastMIRData();
-  }
-  if (phi != nullptr) {
-    // Though the Phi has been added to the beginning, we can put the MIRData at the end.
-    vreg_chains_.AddMIRWithDef(phi, phi->dalvikInsn.vA, wide, current_value);
-    // Reset the previous value to avoid eventually eliminating the Phi itself (unless unused).
-    last_data = vreg_chains_.LastMIRData();
-    last_data->prev_value.value = kNoValue;
-    last_data->prev_value_high.value = kNoValue;
-  }
-}
-
-uint16_t GvnDeadCodeElimination::FindChangesToKill(uint16_t first_change, uint16_t last_change) {
-  // Process dependencies for changes in range [first_change, last_change) and record all
-  // changes that we need to kill. Return kNPos if there's a dependent change that must be
-  // kept unconditionally; otherwise the end of the range processed before encountering
-  // a change that defines a dalvik reg that we need to keep (last_change on full success).
-  changes_to_kill_.clear();
-  dependent_vregs_->ClearAllBits();
-  for (size_t change = first_change; change != last_change; ++change) {
-    MIRData* data = vreg_chains_.GetMIRData(change);
-    DCHECK(!data->uses_all_vregs);
-    bool must_not_depend = data->must_keep;
-    bool depends = false;
-    // Check if the MIR defines a vreg we're trying to eliminate.
-    if (data->has_def && vregs_to_kill_->IsBitSet(data->vreg_def)) {
-      if (change < kill_heads_[data->vreg_def]) {
-        must_not_depend = true;
-      } else {
-        depends = true;
-      }
-    }
-    if (data->has_def && data->wide_def && vregs_to_kill_->IsBitSet(data->vreg_def + 1)) {
-      if (change < kill_heads_[data->vreg_def + 1]) {
-        must_not_depend = true;
-      } else {
-        depends = true;
-      }
-    }
-    if (!depends) {
-      // Check for dependency through SSA reg uses.
-      SSARepresentation* ssa_rep = data->mir->ssa_rep;
-      for (int i = 0; i != ssa_rep->num_uses; ++i) {
-        if (dependent_vregs_->IsBitSet(mir_graph_->SRegToVReg(ssa_rep->uses[i]))) {
-          depends = true;
-          break;
-        }
-      }
-    }
-    // Now check if we can eliminate the insn if we need to.
-    if (depends && must_not_depend) {
-      return kNPos;
-    }
-    if (depends && data->has_def &&
-        vreg_chains_.IsTopChange(change) && !vregs_to_kill_->IsBitSet(data->vreg_def) &&
-        !unused_vregs_->IsBitSet(data->vreg_def) &&
-        (!data->wide_def || !unused_vregs_->IsBitSet(data->vreg_def + 1))) {
-      // This is a top change but neither unnecessary nor one of the top kill changes.
-      return change;
-    }
-    // Finally, update the data.
-    if (depends) {
-      changes_to_kill_.push_back(change);
-      if (data->has_def) {
-        dependent_vregs_->SetBit(data->vreg_def);
-        if (data->wide_def) {
-          dependent_vregs_->SetBit(data->vreg_def + 1);
-        }
-      }
-    } else {
-      if (data->has_def) {
-        dependent_vregs_->ClearBit(data->vreg_def);
-        if (data->wide_def) {
-          dependent_vregs_->ClearBit(data->vreg_def + 1);
-        }
-      }
-    }
-  }
-  return last_change;
-}
-
-void GvnDeadCodeElimination::BackwardPassTryToKillRevertVRegs() {
-}
-
-bool GvnDeadCodeElimination::BackwardPassTryToKillLastMIR() {
-  MIRData* last_data = vreg_chains_.LastMIRData();
-  if (last_data->must_keep) {
-    return false;
-  }
-  DCHECK(!last_data->uses_all_vregs);
-  if (!last_data->has_def) {
-    // Previously eliminated.
-    DCHECK_EQ(static_cast<int>(last_data->mir->dalvikInsn.opcode), static_cast<int>(kMirOpNop));
-    vreg_chains_.RemoveTrailingNops();
-    return true;
-  }
-  if (unused_vregs_->IsBitSet(last_data->vreg_def) ||
-      (last_data->wide_def && unused_vregs_->IsBitSet(last_data->vreg_def + 1))) {
-    if (last_data->wide_def) {
-      // For wide defs, one of the vregs may still be considered needed, fix that.
-      unused_vregs_->SetBit(last_data->vreg_def);
-      unused_vregs_->SetBit(last_data->vreg_def + 1);
-    }
-    KillMIR(last_data->mir);
-    vreg_chains_.RemoveLastMIRData();
-    return true;
-  }
-
-  vregs_to_kill_->ClearAllBits();
-  size_t num_mirs = vreg_chains_.NumMIRs();
-  DCHECK_NE(num_mirs, 0u);
-  uint16_t kill_change = num_mirs - 1u;
-  uint16_t start = num_mirs;
-  size_t num_killed_top_changes = 0u;
-  while (num_killed_top_changes != kMaxNumTopChangesToKill &&
-      kill_change != kNPos && kill_change != num_mirs) {
-    ++num_killed_top_changes;
-
-    DCHECK(vreg_chains_.IsTopChange(kill_change));
-    MIRData* data = vreg_chains_.GetMIRData(kill_change);
-    int count = data->wide_def ? 2 : 1;
-    for (int v_reg = data->vreg_def, end = data->vreg_def + count; v_reg != end; ++v_reg) {
-      uint16_t kill_head = vreg_chains_.FindKillHead(v_reg, no_uses_all_since_);
-      if (kill_head == kNPos) {
-        return false;
-      }
-      kill_heads_[v_reg] = kill_head;
-      vregs_to_kill_->SetBit(v_reg);
-      start = std::min(start, kill_head);
-    }
-    DCHECK_LT(start, vreg_chains_.NumMIRs());
-
-    kill_change = FindChangesToKill(start, num_mirs);
-  }
-
-  if (kill_change != num_mirs) {
-    return false;
-  }
-
-  // Kill all MIRs marked as dependent.
-  for (uint32_t v_reg : vregs_to_kill_->Indexes()) {
-    // Rename s_regs or create Phi only once for each MIR (only for low word).
-    MIRData* data = vreg_chains_.GetMIRData(vreg_chains_.LastChange(v_reg));
-    DCHECK(data->has_def);
-    if (data->vreg_def == v_reg) {
-      MIRData* kill_head_data = vreg_chains_.GetMIRData(kill_heads_[v_reg]);
-      RenameSRegDefOrCreatePhi(kill_head_data->PrevChange(v_reg), num_mirs, data->mir);
-    } else {
-      DCHECK_EQ(data->vreg_def + 1u, v_reg);
-      DCHECK_EQ(vreg_chains_.GetMIRData(kill_heads_[v_reg - 1u])->PrevChange(v_reg - 1u),
-                vreg_chains_.GetMIRData(kill_heads_[v_reg])->PrevChange(v_reg));
-    }
-  }
-  for (auto it = changes_to_kill_.rbegin(), end = changes_to_kill_.rend(); it != end; ++it) {
-    MIRData* data = vreg_chains_.GetMIRData(*it);
-    DCHECK(!data->must_keep);
-    DCHECK(data->has_def);
-    vreg_chains_.RemoveChange(*it);
-    KillMIR(data);
-  }
-
-  // Each dependent register not in vregs_to_kill_ is either already marked unused or
-  // it's one word of a wide register where the other word has been overwritten.
-  unused_vregs_->UnionIfNotIn(dependent_vregs_, vregs_to_kill_);
-
-  vreg_chains_.RemoveTrailingNops();
-  return true;
-}
-
-bool GvnDeadCodeElimination::RecordMIR(MIR* mir) {
-  bool must_keep = false;
-  bool uses_all_vregs = false;
-  bool is_move = false;
-  uint16_t opcode = mir->dalvikInsn.opcode;
-  switch (opcode) {
-    case kMirOpPhi: {
-      // Determine if this Phi is merging wide regs.
-      RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
-      if (raw_dest.high_word) {
-        // This is the high part of a wide reg. Ignore the Phi.
-        return false;
-      }
-      bool wide = raw_dest.wide;
-      // Record the value.
-      DCHECK_EQ(mir->ssa_rep->num_defs, 1);
-      int s_reg = mir->ssa_rep->defs[0];
-      uint16_t new_value = wide ? lvn_->GetSregValueWide(s_reg) : lvn_->GetSregValue(s_reg);
-
-      int v_reg = mir_graph_->SRegToVReg(s_reg);
-      DCHECK_EQ(vreg_chains_.CurrentValue(v_reg), kNoValue);  // No previous def for v_reg.
-      if (wide) {
-        DCHECK_EQ(vreg_chains_.CurrentValue(v_reg + 1), kNoValue);
-      }
-      vreg_chains_.AddMIRWithDef(mir, v_reg, wide, new_value);
-      return true;  // Avoid the common processing.
-    }
-
-    case kMirOpNop:
-    case Instruction::NOP:
-      // Don't record NOPs.
-      return false;
-
-    case kMirOpCheck:
-      must_keep = true;
-      uses_all_vregs = true;
-      break;
-
-    case Instruction::RETURN_VOID:
-    case Instruction::RETURN:
-    case Instruction::RETURN_OBJECT:
-    case Instruction::RETURN_WIDE:
-    case Instruction::GOTO:
-    case Instruction::GOTO_16:
-    case Instruction::GOTO_32:
-    case Instruction::PACKED_SWITCH:
-    case Instruction::SPARSE_SWITCH:
-    case Instruction::IF_EQ:
-    case Instruction::IF_NE:
-    case Instruction::IF_LT:
-    case Instruction::IF_GE:
-    case Instruction::IF_GT:
-    case Instruction::IF_LE:
-    case Instruction::IF_EQZ:
-    case Instruction::IF_NEZ:
-    case Instruction::IF_LTZ:
-    case Instruction::IF_GEZ:
-    case Instruction::IF_GTZ:
-    case Instruction::IF_LEZ:
-    case kMirOpFusedCmplFloat:
-    case kMirOpFusedCmpgFloat:
-    case kMirOpFusedCmplDouble:
-    case kMirOpFusedCmpgDouble:
-    case kMirOpFusedCmpLong:
-      must_keep = true;
-      uses_all_vregs = true;  // Keep the implicit dependencies on all vregs.
-      break;
-
-    case Instruction::CONST_CLASS:
-    case Instruction::CONST_STRING:
-    case Instruction::CONST_STRING_JUMBO:
-      // NOTE: While we're currently treating CONST_CLASS, CONST_STRING and CONST_STRING_JUMBO
-      // as throwing but we could conceivably try and eliminate those exceptions if we're
-      // retrieving the class/string repeatedly.
-      must_keep = true;
-      uses_all_vregs = true;
-      break;
-
-    case Instruction::MONITOR_ENTER:
-    case Instruction::MONITOR_EXIT:
-      // We can actually try and optimize across the acquire operation of MONITOR_ENTER,
-      // the value names provided by GVN reflect the possible changes to memory visibility.
-      // NOTE: In ART, MONITOR_ENTER and MONITOR_EXIT can throw only NPE.
-      must_keep = true;
-      uses_all_vregs = (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0;
-      break;
-
-    case Instruction::INVOKE_DIRECT:
-    case Instruction::INVOKE_DIRECT_RANGE:
-    case Instruction::INVOKE_VIRTUAL:
-    case Instruction::INVOKE_VIRTUAL_RANGE:
-    case Instruction::INVOKE_SUPER:
-    case Instruction::INVOKE_SUPER_RANGE:
-    case Instruction::INVOKE_INTERFACE:
-    case Instruction::INVOKE_INTERFACE_RANGE:
-    case Instruction::INVOKE_STATIC:
-    case Instruction::INVOKE_STATIC_RANGE:
-    case Instruction::THROW:
-    case Instruction::FILLED_NEW_ARRAY:
-    case Instruction::FILLED_NEW_ARRAY_RANGE:
-    case Instruction::FILL_ARRAY_DATA:
-      must_keep = true;
-      uses_all_vregs = true;
-      break;
-
-    case Instruction::NEW_INSTANCE:
-    case Instruction::NEW_ARRAY:
-      must_keep = true;
-      uses_all_vregs = true;
-      break;
-
-    case Instruction::CHECK_CAST:
-      DCHECK_EQ(mir->ssa_rep->num_uses, 1);
-      must_keep = true;  // Keep for type information even if MIR_IGNORE_CHECK_CAST.
-      uses_all_vregs = (mir->optimization_flags & MIR_IGNORE_CHECK_CAST) == 0;
-      break;
-
-    case kMirOpNullCheck:
-      DCHECK_EQ(mir->ssa_rep->num_uses, 1);
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
-        mir->ssa_rep->num_uses = 0;
-        mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-        return false;
-      }
-      must_keep = true;
-      uses_all_vregs = true;
-      break;
-
-    case Instruction::MOVE_RESULT:
-    case Instruction::MOVE_RESULT_OBJECT:
-    case Instruction::MOVE_RESULT_WIDE:
-      break;
-
-    case Instruction::INSTANCE_OF:
-      break;
-
-    case Instruction::MOVE_EXCEPTION:
-      must_keep = true;
-      break;
-
-    case kMirOpCopy:
-    case Instruction::MOVE:
-    case Instruction::MOVE_FROM16:
-    case Instruction::MOVE_16:
-    case Instruction::MOVE_WIDE:
-    case Instruction::MOVE_WIDE_FROM16:
-    case Instruction::MOVE_WIDE_16:
-    case Instruction::MOVE_OBJECT:
-    case Instruction::MOVE_OBJECT_FROM16:
-    case Instruction::MOVE_OBJECT_16: {
-      is_move = true;
-      // If the MIR defining src vreg is known, allow renaming all uses of src vreg to dest vreg
-      // while updating the defining MIR to directly define dest vreg. However, changing Phi's
-      // def this way doesn't work without changing MIRs in other BBs.
-      int src_v_reg = mir_graph_->SRegToVReg(mir->ssa_rep->uses[0]);
-      int src_change = vreg_chains_.LastChange(src_v_reg);
-      if (src_change != kNPos) {
-        MIRData* src_data = vreg_chains_.GetMIRData(src_change);
-        if (static_cast<int>(src_data->mir->dalvikInsn.opcode) != kMirOpPhi) {
-          src_data->is_move_src = true;
-        }
-      }
-      break;
-    }
-
-    case Instruction::CONST_4:
-    case Instruction::CONST_16:
-    case Instruction::CONST:
-    case Instruction::CONST_HIGH16:
-    case Instruction::CONST_WIDE_16:
-    case Instruction::CONST_WIDE_32:
-    case Instruction::CONST_WIDE:
-    case Instruction::CONST_WIDE_HIGH16:
-    case Instruction::CMPL_FLOAT:
-    case Instruction::CMPG_FLOAT:
-    case Instruction::CMPL_DOUBLE:
-    case Instruction::CMPG_DOUBLE:
-    case Instruction::CMP_LONG:
-    case Instruction::NEG_INT:
-    case Instruction::NOT_INT:
-    case Instruction::NEG_LONG:
-    case Instruction::NOT_LONG:
-    case Instruction::NEG_FLOAT:
-    case Instruction::NEG_DOUBLE:
-    case Instruction::INT_TO_LONG:
-    case Instruction::INT_TO_FLOAT:
-    case Instruction::INT_TO_DOUBLE:
-    case Instruction::LONG_TO_INT:
-    case Instruction::LONG_TO_FLOAT:
-    case Instruction::LONG_TO_DOUBLE:
-    case Instruction::FLOAT_TO_INT:
-    case Instruction::FLOAT_TO_LONG:
-    case Instruction::FLOAT_TO_DOUBLE:
-    case Instruction::DOUBLE_TO_INT:
-    case Instruction::DOUBLE_TO_LONG:
-    case Instruction::DOUBLE_TO_FLOAT:
-    case Instruction::INT_TO_BYTE:
-    case Instruction::INT_TO_CHAR:
-    case Instruction::INT_TO_SHORT:
-    case Instruction::ADD_INT:
-    case Instruction::SUB_INT:
-    case Instruction::MUL_INT:
-    case Instruction::AND_INT:
-    case Instruction::OR_INT:
-    case Instruction::XOR_INT:
-    case Instruction::SHL_INT:
-    case Instruction::SHR_INT:
-    case Instruction::USHR_INT:
-    case Instruction::ADD_LONG:
-    case Instruction::SUB_LONG:
-    case Instruction::MUL_LONG:
-    case Instruction::AND_LONG:
-    case Instruction::OR_LONG:
-    case Instruction::XOR_LONG:
-    case Instruction::SHL_LONG:
-    case Instruction::SHR_LONG:
-    case Instruction::USHR_LONG:
-    case Instruction::ADD_FLOAT:
-    case Instruction::SUB_FLOAT:
-    case Instruction::MUL_FLOAT:
-    case Instruction::DIV_FLOAT:
-    case Instruction::REM_FLOAT:
-    case Instruction::ADD_DOUBLE:
-    case Instruction::SUB_DOUBLE:
-    case Instruction::MUL_DOUBLE:
-    case Instruction::DIV_DOUBLE:
-    case Instruction::REM_DOUBLE:
-    case Instruction::ADD_INT_2ADDR:
-    case Instruction::SUB_INT_2ADDR:
-    case Instruction::MUL_INT_2ADDR:
-    case Instruction::AND_INT_2ADDR:
-    case Instruction::OR_INT_2ADDR:
-    case Instruction::XOR_INT_2ADDR:
-    case Instruction::SHL_INT_2ADDR:
-    case Instruction::SHR_INT_2ADDR:
-    case Instruction::USHR_INT_2ADDR:
-    case Instruction::ADD_LONG_2ADDR:
-    case Instruction::SUB_LONG_2ADDR:
-    case Instruction::MUL_LONG_2ADDR:
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::OR_LONG_2ADDR:
-    case Instruction::XOR_LONG_2ADDR:
-    case Instruction::SHL_LONG_2ADDR:
-    case Instruction::SHR_LONG_2ADDR:
-    case Instruction::USHR_LONG_2ADDR:
-    case Instruction::ADD_FLOAT_2ADDR:
-    case Instruction::SUB_FLOAT_2ADDR:
-    case Instruction::MUL_FLOAT_2ADDR:
-    case Instruction::DIV_FLOAT_2ADDR:
-    case Instruction::REM_FLOAT_2ADDR:
-    case Instruction::ADD_DOUBLE_2ADDR:
-    case Instruction::SUB_DOUBLE_2ADDR:
-    case Instruction::MUL_DOUBLE_2ADDR:
-    case Instruction::DIV_DOUBLE_2ADDR:
-    case Instruction::REM_DOUBLE_2ADDR:
-    case Instruction::ADD_INT_LIT16:
-    case Instruction::RSUB_INT:
-    case Instruction::MUL_INT_LIT16:
-    case Instruction::AND_INT_LIT16:
-    case Instruction::OR_INT_LIT16:
-    case Instruction::XOR_INT_LIT16:
-    case Instruction::ADD_INT_LIT8:
-    case Instruction::RSUB_INT_LIT8:
-    case Instruction::MUL_INT_LIT8:
-    case Instruction::AND_INT_LIT8:
-    case Instruction::OR_INT_LIT8:
-    case Instruction::XOR_INT_LIT8:
-    case Instruction::SHL_INT_LIT8:
-    case Instruction::SHR_INT_LIT8:
-    case Instruction::USHR_INT_LIT8:
-      break;
-
-    case Instruction::DIV_INT:
-    case Instruction::REM_INT:
-    case Instruction::DIV_LONG:
-    case Instruction::REM_LONG:
-    case Instruction::DIV_INT_2ADDR:
-    case Instruction::REM_INT_2ADDR:
-    case Instruction::DIV_LONG_2ADDR:
-    case Instruction::REM_LONG_2ADDR:
-      if ((mir->optimization_flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
-        must_keep = true;
-        uses_all_vregs = true;
-      }
-      break;
-
-    case Instruction::DIV_INT_LIT16:
-    case Instruction::REM_INT_LIT16:
-    case Instruction::DIV_INT_LIT8:
-    case Instruction::REM_INT_LIT8:
-      if (mir->dalvikInsn.vC == 0) {  // Explicit division by 0?
-        must_keep = true;
-        uses_all_vregs = true;
-      }
-      break;
-
-    case Instruction::ARRAY_LENGTH:
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0) {
-        must_keep = true;
-        uses_all_vregs = true;
-      }
-      break;
-
-    case Instruction::AGET_OBJECT:
-    case Instruction::AGET:
-    case Instruction::AGET_WIDE:
-    case Instruction::AGET_BOOLEAN:
-    case Instruction::AGET_BYTE:
-    case Instruction::AGET_CHAR:
-    case Instruction::AGET_SHORT:
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
-          (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) == 0) {
-        must_keep = true;
-        uses_all_vregs = true;
-      }
-      break;
-
-    case Instruction::APUT_OBJECT:
-    case Instruction::APUT:
-    case Instruction::APUT_WIDE:
-    case Instruction::APUT_BYTE:
-    case Instruction::APUT_BOOLEAN:
-    case Instruction::APUT_SHORT:
-    case Instruction::APUT_CHAR:
-      must_keep = true;
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
-          (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) == 0) {
-        uses_all_vregs = true;
-      }
-      break;
-
-    case Instruction::IGET_OBJECT:
-    case Instruction::IGET:
-    case Instruction::IGET_WIDE:
-    case Instruction::IGET_BOOLEAN:
-    case Instruction::IGET_BYTE:
-    case Instruction::IGET_CHAR:
-    case Instruction::IGET_SHORT: {
-      const MirIFieldLoweringInfo& info = mir_graph_->GetIFieldLoweringInfo(mir);
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
-          !info.IsResolved() || !info.FastGet()) {
-        must_keep = true;
-        uses_all_vregs = true;
-      } else if (info.IsVolatile()) {
-        must_keep = true;
-      }
-      break;
-    }
-
-    case Instruction::IPUT_OBJECT:
-    case Instruction::IPUT:
-    case Instruction::IPUT_WIDE:
-    case Instruction::IPUT_BOOLEAN:
-    case Instruction::IPUT_BYTE:
-    case Instruction::IPUT_CHAR:
-    case Instruction::IPUT_SHORT: {
-      must_keep = true;
-      const MirIFieldLoweringInfo& info = mir_graph_->GetIFieldLoweringInfo(mir);
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
-          !info.IsResolved() || !info.FastPut()) {
-        uses_all_vregs = true;
-      }
-      break;
-    }
-
-    case Instruction::SGET_OBJECT:
-    case Instruction::SGET:
-    case Instruction::SGET_WIDE:
-    case Instruction::SGET_BOOLEAN:
-    case Instruction::SGET_BYTE:
-    case Instruction::SGET_CHAR:
-    case Instruction::SGET_SHORT: {
-      const MirSFieldLoweringInfo& info = mir_graph_->GetSFieldLoweringInfo(mir);
-      if ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0 ||
-          !info.IsResolved() || !info.FastGet()) {
-        must_keep = true;
-        uses_all_vregs = true;
-      } else if (info.IsVolatile()) {
-        must_keep = true;
-      }
-      break;
-    }
-
-    case Instruction::SPUT_OBJECT:
-    case Instruction::SPUT:
-    case Instruction::SPUT_WIDE:
-    case Instruction::SPUT_BOOLEAN:
-    case Instruction::SPUT_BYTE:
-    case Instruction::SPUT_CHAR:
-    case Instruction::SPUT_SHORT: {
-      must_keep = true;
-      const MirSFieldLoweringInfo& info = mir_graph_->GetSFieldLoweringInfo(mir);
-      if ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0 ||
-          !info.IsResolved() || !info.FastPut()) {
-        uses_all_vregs = true;
-      }
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-      UNREACHABLE();
-  }
-
-  if (mir->ssa_rep->num_defs != 0) {
-    DCHECK(mir->ssa_rep->num_defs == 1 || mir->ssa_rep->num_defs == 2);
-    bool wide = (mir->ssa_rep->num_defs == 2);
-    int s_reg = mir->ssa_rep->defs[0];
-    int v_reg = mir_graph_->SRegToVReg(s_reg);
-    uint16_t new_value = wide ? lvn_->GetSregValueWide(s_reg) : lvn_->GetSregValue(s_reg);
-    DCHECK_NE(new_value, kNoValue);
-
-    vreg_chains_.UpdateInitialVRegValue(v_reg, wide, lvn_);
-    vreg_chains_.AddMIRWithDef(mir, v_reg, wide, new_value);
-    if (is_move) {
-      // Allow renaming all uses of dest vreg to src vreg.
-      vreg_chains_.LastMIRData()->is_move = true;
-    }
-  } else {
-    vreg_chains_.AddMIRWithoutDef(mir);
-    DCHECK(!is_move) << opcode;
-  }
-
-  if (must_keep) {
-    MIRData* last_data = vreg_chains_.LastMIRData();
-    last_data->must_keep = true;
-    if (uses_all_vregs) {
-      last_data->uses_all_vregs = true;
-      no_uses_all_since_ = vreg_chains_.NumMIRs();
-    }
-  } else {
-    DCHECK_NE(mir->ssa_rep->num_defs, 0) << opcode;
-    DCHECK(!uses_all_vregs) << opcode;
-  }
-  return true;
-}
-
-}  // namespace art
diff --git a/compiler/dex/gvn_dead_code_elimination.h b/compiler/dex/gvn_dead_code_elimination.h
deleted file mode 100644
index 06022db..0000000
--- a/compiler/dex/gvn_dead_code_elimination.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
-#define ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
-
-#include "base/arena_object.h"
-#include "base/scoped_arena_containers.h"
-#include "global_value_numbering.h"
-
-namespace art {
-
-class ArenaBitVector;
-class BasicBlock;
-class LocalValueNumbering;
-class MIR;
-class MIRGraph;
-
-/**
- * @class DeadCodeElimination
- * @details Eliminate dead code based on the results of global value numbering.
- * Also get rid of MOVE insns when we can use the source instead of destination
- * without affecting the vreg values at safepoints; this is useful in methods
- * with a large number of vregs that frequently move values to and from low vregs
- * to accommodate insns that can work only with the low 16 or 256 vregs.
- */
-class GvnDeadCodeElimination : public DeletableArenaObject<kArenaAllocMisc> {
- public:
-  GvnDeadCodeElimination(const GlobalValueNumbering* gvn, ScopedArenaAllocator* alloc);
-
-  // Apply the DCE to a basic block.
-  void Apply(BasicBlock* bb);
-
- private:
-  static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
-  static constexpr uint16_t kNPos = 0xffffu;
-  static constexpr size_t kMaxNumTopChangesToKill = 2;
-
-  struct VRegValue {
-    VRegValue() : value(kNoValue), change(kNPos) { }
-
-    // Value name as reported by GVN, kNoValue if not available.
-    uint16_t value;
-    // Index of the change in mir_data_ that defined the value, kNPos if initial value for the BB.
-    uint16_t change;
-  };
-
-  struct MIRData {
-    explicit MIRData(MIR* m)
-        : mir(m), uses_all_vregs(false), must_keep(false), is_move(false), is_move_src(false),
-          has_def(false), wide_def(false),
-          low_def_over_high_word(false), high_def_over_low_word(false), vreg_def(0u),
-          prev_value(), prev_value_high() {
-    }
-
-    uint16_t PrevChange(int v_reg) const;
-    void SetPrevChange(int v_reg, uint16_t change);
-    void RemovePrevChange(int v_reg, MIRData* prev_data);
-
-    MIR* mir;
-    bool uses_all_vregs : 1;  // If mir uses all vregs, uses in mir->ssa_rep are irrelevant.
-    bool must_keep : 1;
-    bool is_move : 1;
-    bool is_move_src : 1;
-    bool has_def : 1;
-    bool wide_def : 1;
-    bool low_def_over_high_word : 1;
-    bool high_def_over_low_word : 1;
-    uint16_t vreg_def;
-    VRegValue prev_value;
-    VRegValue prev_value_high;   // For wide defs.
-  };
-
-  class VRegChains {
-   public:
-    VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc);
-
-    void Reset();
-
-    void AddMIRWithDef(MIR* mir, int v_reg, bool wide, uint16_t new_value);
-    void AddMIRWithoutDef(MIR* mir);
-    void RemoveLastMIRData();
-    void RemoveTrailingNops();
-
-    size_t NumMIRs() const;
-    MIRData* GetMIRData(size_t pos);
-    MIRData* LastMIRData();
-
-    uint32_t NumVRegs() const;
-    void InsertInitialValueHigh(int v_reg, uint16_t value);
-    void UpdateInitialVRegValue(int v_reg, bool wide, const LocalValueNumbering* lvn);
-    uint16_t LastChange(int v_reg);
-    uint16_t CurrentValue(int v_reg);
-
-    uint16_t FindKillHead(int v_reg, uint16_t cutoff);
-    uint16_t FindFirstChangeAfter(int v_reg, uint16_t change) const;
-    void ReplaceChange(uint16_t old_change, uint16_t new_change);
-    void RemoveChange(uint16_t change);
-    bool IsTopChange(uint16_t change) const;
-    bool IsSRegUsed(uint16_t first_change, uint16_t last_change, int s_reg) const;
-    bool IsVRegUsed(uint16_t first_change, uint16_t last_change, int v_reg,
-                    MIRGraph* mir_graph) const;
-    void RenameSRegUses(uint16_t first_change, uint16_t last_change,
-                        int old_s_reg, int new_s_reg, bool wide);
-    void RenameVRegUses(uint16_t first_change, uint16_t last_change,
-                        int old_s_reg, int old_v_reg, int new_s_reg, int new_v_reg);
-
-   private:
-    const uint32_t num_vregs_;
-    VRegValue* const vreg_data_;
-    BitVector vreg_high_words_;
-    ScopedArenaVector<MIRData> mir_data_;
-  };
-
-  void RecordPass();
-  void BackwardPass();
-
-  void KillMIR(MIRData* data);
-  static void KillMIR(MIR* mir);
-  static void ChangeBinOp2AddrToPlainBinOp(MIR* mir);
-  MIR* CreatePhi(int s_reg);
-  MIR* RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change, MIR* mir_to_kill);
-
-  // Update state variables going backwards through a MIR.
-  void BackwardPassProcessLastMIR();
-
-  uint16_t FindChangesToKill(uint16_t first_change, uint16_t last_change);
-  void BackwardPassTryToKillRevertVRegs();
-  bool BackwardPassTryToKillLastMIR();
-
-  void RecordPassKillMoveByRenamingSrcDef(uint16_t src_change, uint16_t move_change);
-  void RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_t check_change);
-  void RecordPassTryToKillOverwrittenMoveOrMoveSrc();
-  void RecordPassTryToKillLastMIR();
-
-  bool RecordMIR(MIR* mir);
-
-  const GlobalValueNumbering* const gvn_;
-  MIRGraph* const mir_graph_;
-
-  VRegChains vreg_chains_;
-  BasicBlock* bb_;
-  const LocalValueNumbering* lvn_;
-  size_t no_uses_all_since_;  // The change index after the last change with uses_all_vregs set.
-
-  // Data used when processing MIRs in reverse order.
-  ArenaBitVector* unused_vregs_;              // vregs that are not needed later.
-  ArenaBitVector* vregs_to_kill_;             // vregs that revert to a previous value.
-  uint16_t* kill_heads_;  // For each vreg in vregs_to_kill_, the first change to kill.
-  ScopedArenaVector<uint16_t> changes_to_kill_;
-  ArenaBitVector* dependent_vregs_;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc
deleted file mode 100644
index 22fb835..0000000
--- a/compiler/dex/gvn_dead_code_elimination_test.cc
+++ /dev/null
@@ -1,2201 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dataflow_iterator-inl.h"
-#include "dex/mir_field_info.h"
-#include "global_value_numbering.h"
-#include "gvn_dead_code_elimination.h"
-#include "local_value_numbering.h"
-#include "gtest/gtest.h"
-
-namespace art {
-
-class GvnDeadCodeEliminationTest : public testing::Test {
- protected:
-  static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
-
-  struct IFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct SFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct BBDef {
-    static constexpr size_t kMaxSuccessors = 4;
-    static constexpr size_t kMaxPredecessors = 4;
-
-    BBType type;
-    size_t num_successors;
-    BasicBlockId successors[kMaxPredecessors];
-    size_t num_predecessors;
-    BasicBlockId predecessors[kMaxPredecessors];
-  };
-
-  struct MIRDef {
-    static constexpr size_t kMaxSsaDefs = 2;
-    static constexpr size_t kMaxSsaUses = 4;
-
-    BasicBlockId bbid;
-    Instruction::Code opcode;
-    int64_t value;
-    uint32_t field_info;
-    size_t num_uses;
-    int32_t uses[kMaxSsaUses];
-    size_t num_defs;
-    int32_t defs[kMaxSsaDefs];
-  };
-
-#define DEF_SUCC0() \
-    0u, { }
-#define DEF_SUCC1(s1) \
-    1u, { s1 }
-#define DEF_SUCC2(s1, s2) \
-    2u, { s1, s2 }
-#define DEF_SUCC3(s1, s2, s3) \
-    3u, { s1, s2, s3 }
-#define DEF_SUCC4(s1, s2, s3, s4) \
-    4u, { s1, s2, s3, s4 }
-#define DEF_PRED0() \
-    0u, { }
-#define DEF_PRED1(p1) \
-    1u, { p1 }
-#define DEF_PRED2(p1, p2) \
-    2u, { p1, p2 }
-#define DEF_PRED3(p1, p2, p3) \
-    3u, { p1, p2, p3 }
-#define DEF_PRED4(p1, p2, p3, p4) \
-    4u, { p1, p2, p3, p4 }
-#define DEF_BB(type, succ, pred) \
-    { type, succ, pred }
-
-#define DEF_CONST(bb, opcode, reg, value) \
-    { bb, opcode, value, 0u, 0, { }, 1, { reg } }
-#define DEF_CONST_WIDE(bb, opcode, reg, value) \
-    { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_CONST_STRING(bb, opcode, reg, index) \
-    { bb, opcode, index, 0u, 0, { }, 1, { reg } }
-#define DEF_IGET(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
-#define DEF_AGET(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
-#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
-#define DEF_APUT(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
-#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
-#define DEF_INVOKE1(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_UNIQUE_REF(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 0, { }, 1, { reg } }  // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
-#define DEF_IFZ(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_MOVE(bb, opcode, reg, src) \
-    { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
-#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
-    { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
-#define DEF_PHI2(bb, reg, src1, src2) \
-    { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
-#define DEF_UNOP(bb, opcode, result, src1) \
-    { bb, opcode, 0u, 0u, 1, { src1 }, 1, { result } }
-#define DEF_BINOP(bb, opcode, result, src1, src2) \
-    { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
-#define DEF_BINOP_WIDE(bb, opcode, result, src1, src2) \
-    { bb, opcode, 0u, 0u, 4, { src1, src1 + 1, src2, src2 + 1 }, 2, { result, result + 1 } }
-
-  void DoPrepareIFields(const IFieldDef* defs, size_t count) {
-    cu_.mir_graph->ifield_lowering_infos_.clear();
-    cu_.mir_graph->ifield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ =
-            MirIFieldLoweringInfo::kFlagFastGet | MirIFieldLoweringInfo::kFlagFastPut |
-            (field_info.flags_ & ~(def->is_volatile ? 0u : MirIFieldLoweringInfo::kFlagIsVolatile));
-      }
-      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareIFields(const IFieldDef (&defs)[count]) {
-    DoPrepareIFields(defs, count);
-  }
-
-  void DoPrepareSFields(const SFieldDef* defs, size_t count) {
-    cu_.mir_graph->sfield_lowering_infos_.clear();
-    cu_.mir_graph->sfield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const SFieldDef* def = &defs[i];
-      MirSFieldLoweringInfo field_info(def->field_idx, def->type);
-      // Mark even unresolved fields as initialized.
-      field_info.flags_ |= MirSFieldLoweringInfo::kFlagClassIsInitialized;
-      // NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by GVN.
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ =
-            MirSFieldLoweringInfo::kFlagFastGet | MirSFieldLoweringInfo::kFlagFastPut |
-            (field_info.flags_ & ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile));
-      }
-      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareSFields(const SFieldDef (&defs)[count]) {
-    DoPrepareSFields(defs, count);
-  }
-
-  void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
-    cu_.mir_graph->block_id_map_.clear();
-    cu_.mir_graph->block_list_.clear();
-    ASSERT_LT(3u, count);  // null, entry, exit and at least one bytecode block.
-    ASSERT_EQ(kNullBlock, defs[0].type);
-    ASSERT_EQ(kEntryBlock, defs[1].type);
-    ASSERT_EQ(kExitBlock, defs[2].type);
-    for (size_t i = 0u; i != count; ++i) {
-      const BBDef* def = &defs[i];
-      BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
-      if (def->num_successors <= 2) {
-        bb->successor_block_list_type = kNotUsed;
-        bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
-        bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
-      } else {
-        bb->successor_block_list_type = kPackedSwitch;
-        bb->fall_through = 0u;
-        bb->taken = 0u;
-        bb->successor_blocks.reserve(def->num_successors);
-        for (size_t j = 0u; j != def->num_successors; ++j) {
-          SuccessorBlockInfo* successor_block_info =
-              static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
-                                                               kArenaAllocSuccessors));
-          successor_block_info->block = j;
-          successor_block_info->key = 0u;  // Not used by class init check elimination.
-          bb->successor_blocks.push_back(successor_block_info);
-        }
-      }
-      bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
-      if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
-        bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
-            cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
-        bb->data_flow_info->live_in_v = live_in_v_;
-        bb->data_flow_info->vreg_to_ssa_map_exit = nullptr;
-      }
-    }
-    ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
-    cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
-    ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
-    cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
-    ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
-  }
-
-  template <size_t count>
-  void PrepareBasicBlocks(const BBDef (&defs)[count]) {
-    DoPrepareBasicBlocks(defs, count);
-  }
-
-  int SRegToVReg(int32_t s_reg, bool wide) {
-    int v_reg = cu_.mir_graph->SRegToVReg(s_reg);
-    CHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
-    if (wide) {
-      CHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
-    }
-    return v_reg;
-  }
-
-  int SRegToVReg(int32_t* uses, size_t* use, bool wide) {
-    int v_reg = SRegToVReg(uses[*use], wide);
-    if (wide) {
-      CHECK_EQ(uses[*use] + 1, uses[*use + 1]);
-      *use += 2u;
-    } else {
-      *use += 1u;
-    }
-    return v_reg;
-  }
-
-  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
-    mir_count_ = count;
-    mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
-    ssa_reps_.resize(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const MIRDef* def = &defs[i];
-      MIR* mir = &mirs_[i];
-      ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
-      BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
-      bb->AppendMIR(mir);
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
-      mir->dalvikInsn.vB_wide = def->value;
-      if (IsInstructionIGetOrIPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
-        mir->meta.ifield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_info].MemAccessType(),
-                  IGetOrIPutMemAccessType(def->opcode));
-      } else if (IsInstructionSGetOrSPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
-        mir->meta.sfield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
-                  SGetOrSPutMemAccessType(def->opcode));
-      } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
-        mir->meta.phi_incoming =
-            allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
-        ASSERT_EQ(def->num_uses, bb->predecessors.size());
-        std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
-      }
-      mir->ssa_rep = &ssa_reps_[i];
-      cu_.mir_graph->AllocateSSAUseData(mir, def->num_uses);
-      std::copy_n(def->uses, def->num_uses, mir->ssa_rep->uses);
-      // Keep mir->ssa_rep->fp_use[.] zero-initialized (false). Not used by DCE, only copied.
-      cu_.mir_graph->AllocateSSADefData(mir, def->num_defs);
-      std::copy_n(def->defs, def->num_defs, mir->ssa_rep->defs);
-      // Keep mir->ssa_rep->fp_def[.] zero-initialized (false). Not used by DCE, only copied.
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->offset = i;  // LVN uses offset only for debug output
-      mir->optimization_flags = 0u;
-      uint64_t df_attrs = MIRGraph::GetDataFlowAttributes(mir);
-      if ((df_attrs & DF_DA) != 0) {
-        CHECK_NE(def->num_defs, 0u);
-        mir->dalvikInsn.vA = SRegToVReg(def->defs[0], (df_attrs & DF_A_WIDE) != 0);
-        bb->data_flow_info->vreg_to_ssa_map_exit[mir->dalvikInsn.vA] = def->defs[0];
-        if ((df_attrs & DF_A_WIDE) != 0) {
-          CHECK_EQ(def->defs[0] + 1, def->defs[1]);
-          bb->data_flow_info->vreg_to_ssa_map_exit[mir->dalvikInsn.vA + 1u] = def->defs[0] + 1;
-        }
-      }
-      if ((df_attrs & (DF_UA | DF_UB | DF_UC)) != 0) {
-        size_t use = 0;
-        if ((df_attrs & DF_UA) != 0) {
-          mir->dalvikInsn.vA = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_A_WIDE) != 0);
-        }
-        if ((df_attrs & DF_UB) != 0) {
-          mir->dalvikInsn.vB = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_B_WIDE) != 0);
-        }
-        if ((df_attrs & DF_UC) != 0) {
-          mir->dalvikInsn.vC = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_C_WIDE) != 0);
-        }
-        DCHECK_EQ(def->num_uses, use);
-      }
-    }
-    DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
-        cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
-    code_item->insns_size_in_code_units_ = 2u * count;
-    code_item->registers_size_ = kMaxVRegs;
-    cu_.mir_graph->current_code_item_ = code_item;
-  }
-
-  template <size_t count>
-  void PrepareMIRs(const MIRDef (&defs)[count]) {
-    DoPrepareMIRs(defs, count);
-  }
-
-  template <size_t count>
-  void PrepareSRegToVRegMap(const int (&map)[count]) {
-    cu_.mir_graph->ssa_base_vregs_.assign(map, map + count);
-    num_vregs_ = *std::max_element(map, map + count) + 1u;
-    AllNodesIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      if (bb->data_flow_info != nullptr) {
-        bb->data_flow_info->vreg_to_ssa_map_exit = static_cast<int32_t*>(
-            cu_.arena.Alloc(sizeof(int32_t) * num_vregs_, kArenaAllocDFInfo));
-        std::fill_n(bb->data_flow_info->vreg_to_ssa_map_exit, num_vregs_, INVALID_SREG);
-      }
-    }
-  }
-
-  void PerformGVN() {
-    cu_.mir_graph->SSATransformationStart();
-    cu_.mir_graph->ComputeDFSOrders();
-    cu_.mir_graph->ComputeDominators();
-    cu_.mir_graph->ComputeTopologicalSortOrder();
-    cu_.mir_graph->SSATransformationEnd();
-    cu_.mir_graph->temp_.gvn.ifield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
-    cu_.mir_graph->temp_.gvn.sfield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
-    ASSERT_TRUE(gvn_ == nullptr);
-    gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
-                                                           GlobalValueNumbering::kModeGvn));
-    value_names_.resize(mir_count_, 0xffffu);
-    LoopRepeatingTopologicalSortIterator iterator(cu_.mir_graph.get());
-    bool change = false;
-    for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
-      LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
-      if (lvn != nullptr) {
-        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-          value_names_[mir - mirs_] = lvn->GetValueNumber(mir);
-        }
-      }
-      change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
-      ASSERT_TRUE(gvn_->Good());
-    }
-  }
-
-  void PerformGVNCodeModifications() {
-    ASSERT_TRUE(gvn_ != nullptr);
-    ASSERT_TRUE(gvn_->Good());
-    gvn_->StartPostProcessing();
-    TopologicalSortIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
-      if (lvn != nullptr) {
-        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-          uint16_t value_name = lvn->GetValueNumber(mir);
-          ASSERT_EQ(value_name, value_names_[mir - mirs_]);
-        }
-      }
-      bool change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
-      ASSERT_FALSE(change);
-      ASSERT_TRUE(gvn_->Good());
-    }
-  }
-
-  void FillVregToSsaRegExitMaps() {
-    // Fill in vreg_to_ssa_map_exit for each BB.
-    PreOrderDfsIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      if (bb->block_type == kDalvikByteCode) {
-        CHECK(!bb->predecessors.empty());
-        BasicBlock* pred_bb = cu_.mir_graph->GetBasicBlock(bb->predecessors[0]);
-        for (size_t v_reg = 0; v_reg != num_vregs_; ++v_reg) {
-          if (bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] == INVALID_SREG) {
-            bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] =
-                pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
-          }
-        }
-      }
-    }
-  }
-
-  template <size_t count>
-  void MarkAsWideSRegs(const int32_t (&sregs)[count]) {
-    for (int32_t sreg : sregs) {
-      cu_.mir_graph->reg_location_[sreg].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].high_word = true;
-    }
-  }
-
-  void PerformDCE() {
-    FillVregToSsaRegExitMaps();
-    cu_.mir_graph->GetNumOfCodeAndTempVRs();
-    dce_.reset(new (allocator_.get()) GvnDeadCodeElimination(gvn_.get(), allocator_.get()));
-    PreOrderDfsIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      if (bb->block_type == kDalvikByteCode) {
-        dce_->Apply(bb);
-      }
-    }
-  }
-
-  void PerformGVN_DCE() {
-    PerformGVN();
-    PerformGVNCodeModifications();  // Eliminate null/range checks.
-    PerformDCE();
-  }
-
-  template <size_t count>
-  void ExpectValueNamesNE(const size_t (&indexes)[count]) {
-    for (size_t i1 = 0; i1 != count; ++i1) {
-      size_t idx1 = indexes[i1];
-      for (size_t i2 = i1 + 1; i2 != count; ++i2) {
-        size_t idx2 = indexes[i2];
-        EXPECT_NE(value_names_[idx1], value_names_[idx2]) << idx1 << " " << idx2;
-      }
-    }
-  }
-
-  template <size_t count>
-  void ExpectNoNullCheck(const size_t (&indexes)[count]) {
-    for (size_t i = 0; i != count; ++i) {
-      size_t idx = indexes[i];
-      EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[idx].optimization_flags & MIR_IGNORE_NULL_CHECK)
-          << idx;
-    }
-    size_t num_no_null_ck = 0u;
-    for (size_t i = 0; i != mir_count_; ++i) {
-      if ((mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
-        ++num_no_null_ck;
-      }
-    }
-    EXPECT_EQ(count, num_no_null_ck);
-  }
-
-  GvnDeadCodeEliminationTest()
-      : pool_(),
-        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
-        num_vregs_(0u),
-        mir_count_(0u),
-        mirs_(nullptr),
-        ssa_reps_(),
-        allocator_(),
-        gvn_(),
-        dce_(),
-        value_names_(),
-        live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false)) {
-    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
-    cu_.access_flags = kAccStatic;  // Don't let "this" interfere with this test.
-    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
-    // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
-    // 0 constants are integral, not references, and the values are all narrow.
-    // Nothing else is used by LVN/GVN. Tests can override the default values as needed.
-    cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
-        kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
-    cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
-    // Bind all possible sregs to live vregs for test purposes.
-    live_in_v_->SetInitialBits(kMaxSsaRegs);
-    cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
-    cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
-    for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
-      cu_.mir_graph->ssa_base_vregs_.push_back(i);
-      cu_.mir_graph->ssa_subscripts_.push_back(0);
-    }
-    // Set shorty for a void-returning method without arguments.
-    cu_.shorty = "V";
-  }
-
-  static constexpr size_t kMaxSsaRegs = 16384u;
-  static constexpr size_t kMaxVRegs = 256u;
-
-  ArenaPool pool_;
-  CompilationUnit cu_;
-  size_t num_vregs_;
-  size_t mir_count_;
-  MIR* mirs_;
-  std::vector<SSARepresentation> ssa_reps_;
-  std::unique_ptr<ScopedArenaAllocator> allocator_;
-  std::unique_ptr<GlobalValueNumbering> gvn_;
-  std::unique_ptr<GvnDeadCodeElimination> dce_;
-  std::vector<uint16_t> value_names_;
-  ArenaBitVector* live_in_v_;
-};
-
-constexpr uint16_t GvnDeadCodeEliminationTest::kNoValue;
-
-class GvnDeadCodeEliminationTestSimple : public GvnDeadCodeEliminationTest {
- public:
-  GvnDeadCodeEliminationTestSimple();
-
- private:
-  static const BBDef kSimpleBbs[];
-};
-
-const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestSimple::kSimpleBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
-};
-
-GvnDeadCodeEliminationTestSimple::GvnDeadCodeEliminationTestSimple()
-    : GvnDeadCodeEliminationTest() {
-  PrepareBasicBlocks(kSimpleBbs);
-}
-
-class GvnDeadCodeEliminationTestDiamond : public GvnDeadCodeEliminationTest {
- public:
-  GvnDeadCodeEliminationTestDiamond();
-
- private:
-  static const BBDef kDiamondBbs[];
-};
-
-const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestDiamond::kDiamondBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // Block #3, top of the diamond.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #4, left side.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #5, right side.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),  // Block #6, bottom.
-};
-
-GvnDeadCodeEliminationTestDiamond::GvnDeadCodeEliminationTestDiamond()
-    : GvnDeadCodeEliminationTest() {
-  PrepareBasicBlocks(kDiamondBbs);
-}
-
-class GvnDeadCodeEliminationTestLoop : public GvnDeadCodeEliminationTest {
- public:
-  GvnDeadCodeEliminationTestLoop();
-
- private:
-  static const BBDef kLoopBbs[];
-};
-
-const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestLoop::kLoopBbs[] = {
-    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-    DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)),  // "taken" loops to self.
-    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-};
-
-GvnDeadCodeEliminationTestLoop::GvnDeadCodeEliminationTestLoop()
-    : GvnDeadCodeEliminationTest() {
-  PrepareBasicBlocks(kLoopBbs);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename1) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
-      DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 3 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  const size_t no_null_ck_indexes[] = { 1, 3 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the IGET uses the s_reg 0, v_reg 0, defined by mirs_[0].
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
-  EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
-  EXPECT_EQ(0u, mirs_[3].dalvikInsn.vB);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename2) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
-      DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
-      DEF_CONST(3, Instruction::CONST, 4u, 1000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 3, 4 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  const size_t no_null_ck_indexes[] = { 1, 3 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, true, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the IGET uses the s_reg 0, v_reg 0, defined by mirs_[0].
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
-  EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
-  EXPECT_EQ(0u, mirs_[3].dalvikInsn.vB);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename3) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
-      DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 3 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  const size_t no_null_ck_indexes[] = { 1, 3 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the NEW_INSTANCE defines the s_reg 2, v_reg 2, originally defined by the move.
-  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(2u, mirs_[0].dalvikInsn.vA);
-  // Check that the first IGET is using the s_reg 2, v_reg 2.
-  ASSERT_EQ(1, mirs_[1].ssa_rep->num_uses);
-  EXPECT_EQ(2, mirs_[1].ssa_rep->uses[0]);
-  EXPECT_EQ(2u, mirs_[1].dalvikInsn.vB);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename4) {
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 1u, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 1u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 3u, 1000u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 0, 1 /* high word */ };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 3 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 3 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  static const bool eliminated[] = {
-      false, true, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the NEW_INSTANCE defines the s_reg 2, v_reg 2, originally defined by the move 2u.
-  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(2u, mirs_[0].dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename5) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 2u, 1u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 4u, 3u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 5u, 1000u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 3, 0, 1 /* high word */ };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 5 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 5 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-  EXPECT_EQ(value_names_[0], value_names_[4]);
-
-  static const bool eliminated[] = {
-      false, false, false, true, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the NEW_INSTANCE defines the s_reg 4, v_reg 3, originally defined by the move 4u.
-  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(4, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(3u, mirs_[0].dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename6) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1 /* high word */, 1, 2 /* high word */ };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 0, 2 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  static const bool eliminated[] = {
-      false, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the CONST_WIDE defines the s_reg 2, v_reg 1, originally defined by the move 2u.
-  ASSERT_EQ(2, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(3, mirs_[0].ssa_rep->defs[1]);
-  EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename7) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 2u, 0u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  static const bool eliminated[] = {
-      false, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the CONST defines the s_reg 1, v_reg 1, originally defined by the move 1u.
-  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(1, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
-  // Check that the ADD_INT inputs are both s_reg1, vreg 1.
-  ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
-  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
-  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[1]);
-  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vB);
-  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vC);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename8) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 2u, 0u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  static const bool eliminated[] = {
-      false, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the CONST defines the s_reg 1, v_reg 1, originally defined by the move 1u.
-  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
-  EXPECT_EQ(1, mirs_[0].ssa_rep->defs[0]);
-  EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
-  // Check that the ADD_INT_2ADDR was replaced by ADD_INT and inputs are both s_reg 1, vreg 1.
-  EXPECT_EQ(Instruction::ADD_INT, mirs_[2].dalvikInsn.opcode);
-  ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
-  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
-  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[1]);
-  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vB);
-  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vC);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Rename9) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 1u, 0u, 0u),
-      DEF_MOVE(3, Instruction::MOVE, 2u, 1u),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 0, 1, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 3 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[1], value_names_[2]);
-
-  static const bool eliminated[] = {
-      false, false, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the ADD_INT_2ADDR was replaced by ADD_INT and output is in s_reg 2, vreg 1.
-  EXPECT_EQ(Instruction::ADD_INT, mirs_[1].dalvikInsn.opcode);
-  ASSERT_EQ(2, mirs_[1].ssa_rep->num_uses);
-  EXPECT_EQ(0, mirs_[1].ssa_rep->uses[0]);
-  EXPECT_EQ(0, mirs_[1].ssa_rep->uses[1]);
-  EXPECT_EQ(0u, mirs_[1].dalvikInsn.vB);
-  EXPECT_EQ(0u, mirs_[1].dalvikInsn.vC);
-  ASSERT_EQ(1, mirs_[1].ssa_rep->num_defs);
-  EXPECT_EQ(2, mirs_[1].ssa_rep->defs[0]);
-  EXPECT_EQ(1u, mirs_[1].dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, NoRename1) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 2u, 1u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
-      DEF_CONST(3, Instruction::CONST, 4u, 1000),
-      DEF_IGET(3, Instruction::IGET, 5u, 3u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 0, 1 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 4, 5 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-
-  const size_t no_null_ck_indexes[] = { 1, 5 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, NoRename2) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 2u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
-      DEF_CONST(3, Instruction::CONST, 4u, 1000),
-      DEF_IGET(3, Instruction::IGET, 5u, 3u, 1u),
-      DEF_CONST(3, Instruction::CONST, 6u, 2000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2, 0, 3, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 4, 5, 6 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-
-  const size_t no_null_ck_indexes[] = { 1, 5 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, NoRename3) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
-      DEF_IGET(3, Instruction::IGET, 2u, 0u, 2u),
-      DEF_BINOP(3, Instruction::ADD_INT, 3u, 1u, 2u),
-      DEF_MOVE(3, Instruction::MOVE_OBJECT, 4u, 0u),
-      DEF_IGET(3, Instruction::IGET, 5u, 4u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 5 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[4]);
-
-  const size_t no_null_ck_indexes[] = { 1, 2, 5 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, NoRename4) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 1u),
-      DEF_CONST(3, Instruction::CONST, 2u, 100u),
-      DEF_CONST(3, Instruction::CONST, 3u, 200u),
-      DEF_BINOP(3, Instruction::OR_INT_2ADDR, 4u, 2u, 3u),   // 3. Find definition of the move src.
-      DEF_MOVE(3, Instruction::MOVE, 5u, 0u),                // 4. Uses move dest vreg.
-      DEF_MOVE(3, Instruction::MOVE, 6u, 4u),                // 2. Find overwritten move src.
-      DEF_CONST(3, Instruction::CONST, 7u, 2000u),           // 1. Overwrites 4u, look for moves.
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2, 4, 0, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 7 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[5]);
-  EXPECT_EQ(value_names_[4], value_names_[6]);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Simple1) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-      { 1u, 1u, 1u, false, kDexMemAccessObject },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 1u),
-      DEF_IGET(3, Instruction::IGET, 3u, 2u, 2u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 0u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 5u, 4u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_NE(value_names_[0], value_names_[1]);
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_NE(value_names_[0], value_names_[3]);
-  EXPECT_NE(value_names_[1], value_names_[2]);
-  EXPECT_NE(value_names_[1], value_names_[3]);
-  EXPECT_NE(value_names_[2], value_names_[3]);
-  EXPECT_EQ(value_names_[1], value_names_[4]);
-  EXPECT_EQ(value_names_[2], value_names_[5]);
-
-  EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[4].optimization_flags & MIR_IGNORE_NULL_CHECK);
-  EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[5].optimization_flags & MIR_IGNORE_NULL_CHECK);
-
-  static const bool eliminated[] = {
-      false, false, false, false, true, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[1].ssa_rep->num_defs);
-  EXPECT_EQ(4, mirs_[1].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[1].ssa_rep->num_uses);
-  EXPECT_EQ(0, mirs_[1].ssa_rep->uses[0]);
-  ASSERT_EQ(1, mirs_[2].ssa_rep->num_defs);
-  EXPECT_EQ(5, mirs_[2].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[2].ssa_rep->num_uses);
-  EXPECT_EQ(4, mirs_[2].ssa_rep->uses[0]);
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
-  EXPECT_EQ(3, mirs_[3].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
-  EXPECT_EQ(5, mirs_[3].ssa_rep->uses[0]);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Simple2) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_IGET(3, Instruction::IGET, 2u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 3u, 2u, 1u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 4u, 3u),
-      DEF_IGET(3, Instruction::IGET, 5u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 6u, 5u, 1u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2, 3, 2, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[2], value_names_[5]);
-  EXPECT_EQ(value_names_[3], value_names_[6]);
-
-  const size_t no_null_ck_indexes[] = { 2, 5 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, true, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
-  EXPECT_EQ(6, mirs_[3].ssa_rep->defs[0]);
-  ASSERT_EQ(2, mirs_[3].ssa_rep->num_uses);
-  EXPECT_EQ(2, mirs_[3].ssa_rep->uses[0]);
-  EXPECT_EQ(1, mirs_[3].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[4].ssa_rep->num_defs);
-  EXPECT_EQ(4, mirs_[4].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[4].ssa_rep->num_uses);
-  EXPECT_EQ(6, mirs_[4].ssa_rep->uses[0]);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Simple3) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),  // Simple elimination of ADD+MUL
-      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),  // allows simple elimination of IGET+SUB.
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 5, 5, 4 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[11]);
-  EXPECT_EQ(value_names_[7], value_names_[12]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false, true, true, true, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[6].ssa_rep->num_defs);
-  EXPECT_EQ(11, mirs_[6].ssa_rep->defs[0]);  // 6 -> 11
-  ASSERT_EQ(2, mirs_[6].ssa_rep->num_uses);
-  EXPECT_EQ(5, mirs_[6].ssa_rep->uses[0]);
-  EXPECT_EQ(2, mirs_[6].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
-  EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]);  // 7 -> 12
-  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
-  EXPECT_EQ(11, mirs_[7].ssa_rep->uses[0]);  // 6 -> 11
-  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
-  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
-  EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]);  // 7 -> 12
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Simple4) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 1u, INT64_C(1)),
-      DEF_BINOP(3, Instruction::LONG_TO_FLOAT, 3u, 1u, 2u),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 5u, 4u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 6u, INT64_C(1)),
-      DEF_BINOP(3, Instruction::LONG_TO_FLOAT, 8u, 6u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3, 1, 2, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 1, 6 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[1], value_names_[5]);
-  EXPECT_EQ(value_names_[2], value_names_[6]);
-  EXPECT_EQ(value_names_[3], value_names_[7]);
-
-  const size_t no_null_ck_indexes[] = { 3, 7 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      // Simple elimination of CONST_WIDE+LONG_TO_FLOAT allows simple eliminatiion of IGET.
-      false, false, false, false, false, true, true, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[2].ssa_rep->num_defs);
-  EXPECT_EQ(8, mirs_[2].ssa_rep->defs[0]);   // 3 -> 8
-  ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
-  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
-  EXPECT_EQ(2, mirs_[2].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
-  EXPECT_EQ(9, mirs_[3].ssa_rep->defs[0]);   // 4 -> 9
-  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
-  EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
-  ASSERT_EQ(1, mirs_[4].ssa_rep->num_defs);
-  EXPECT_EQ(5, mirs_[4].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[4].ssa_rep->num_uses);
-  EXPECT_EQ(9, mirs_[4].ssa_rep->uses[0]);   // 4 -> 9
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, KillChain1) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 4, 5, 6, 4, 5, 4, 5 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[11]);
-  EXPECT_EQ(value_names_[7], value_names_[12]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false, true, true, true, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[6].ssa_rep->num_defs);
-  EXPECT_EQ(11, mirs_[6].ssa_rep->defs[0]);  // 6 -> 11
-  ASSERT_EQ(2, mirs_[6].ssa_rep->num_uses);
-  EXPECT_EQ(5, mirs_[6].ssa_rep->uses[0]);
-  EXPECT_EQ(2, mirs_[6].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
-  EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]);  // 7 -> 12
-  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
-  EXPECT_EQ(11, mirs_[7].ssa_rep->uses[0]);  // 6 -> 11
-  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
-  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
-  EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]);   // 7 -> 12
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, KillChain2) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
-      DEF_CONST(3, Instruction::CONST, 13u, 4000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 7, 4, 7 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 13 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[11]);
-  EXPECT_EQ(value_names_[7], value_names_[12]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false, true, true, true, true, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
-  EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]);  // 7 -> 12
-  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
-  EXPECT_EQ(6, mirs_[7].ssa_rep->uses[0]);
-  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
-  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
-  EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]);   // 7 -> 12
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, KillChain3) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
-      DEF_CONST(3, Instruction::CONST, 12u, 4000),
-      DEF_BINOP(3, Instruction::SUB_INT, 13u, 11u, 3u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 4, 7, 4 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 12 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[11]);
-  EXPECT_EQ(value_names_[7], value_names_[13]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false, true, true, true, false, true
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the sregs have been renamed correctly.
-  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
-  EXPECT_EQ(13, mirs_[7].ssa_rep->defs[0]);  // 7 -> 13
-  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
-  EXPECT_EQ(6, mirs_[7].ssa_rep->uses[0]);
-  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
-  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
-  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
-  EXPECT_EQ(13, mirs_[8].ssa_rep->uses[0]);   // 7 -> 13
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, KeepChain1) {
-  // KillChain2 without the final CONST.
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 7, 4 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[11]);
-  EXPECT_EQ(value_names_[7], value_names_[12]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, KeepChain2) {
-  // KillChain1 with MIRs in the middle of the chain.
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1000),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000),
-      DEF_CONST(3, Instruction::CONST, 3u, 3000),
-      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
-      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
-      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
-      DEF_CONST(3, Instruction::CONST, 11u, 4000),
-      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 12u, 11u),
-      DEF_BINOP(3, Instruction::MUL_INT, 13u, 10u, 2u),
-      DEF_BINOP(3, Instruction::SUB_INT, 14u, 13u, 3u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 4, 5, 6, 4, 5, 4, 7, 4, 5 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-  EXPECT_EQ(value_names_[5], value_names_[10]);
-  EXPECT_EQ(value_names_[6], value_names_[13]);
-  EXPECT_EQ(value_names_[7], value_names_[14]);
-
-  const size_t no_null_ck_indexes[] = { 4, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, false, false,
-      false, false, false, false, false, false
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000),
-      DEF_CONST(4, Instruction::CONST, 1u, 1000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-
-  static const bool eliminated[] = {
-      false, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created a single-input Phi to replace the CONST 3u.
-  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
-  MIR* phi = bb4->first_mir_insn;
-  ASSERT_TRUE(phi != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
-  ASSERT_EQ(1, phi->ssa_rep->num_uses);
-  EXPECT_EQ(0, phi->ssa_rep->uses[0]);
-  ASSERT_EQ(1, phi->ssa_rep->num_defs);
-  EXPECT_EQ(1, phi->ssa_rep->defs[0]);
-  EXPECT_EQ(0u, phi->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi2) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000),
-      DEF_MOVE(4, Instruction::MOVE, 1u, 0u),
-      DEF_CONST(4, Instruction::CONST, 2u, 1000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  static const bool eliminated[] = {
-      false, false, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created a single-input Phi to replace the CONST 3u.
-  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
-  MIR* phi = bb4->first_mir_insn;
-  ASSERT_TRUE(phi != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
-  ASSERT_EQ(1, phi->ssa_rep->num_uses);
-  EXPECT_EQ(0, phi->ssa_rep->uses[0]);
-  ASSERT_EQ(1, phi->ssa_rep->num_defs);
-  EXPECT_EQ(2, phi->ssa_rep->defs[0]);
-  EXPECT_EQ(0u, phi->dalvikInsn.vA);
-  MIR* move = phi->next;
-  ASSERT_TRUE(move != nullptr);
-  ASSERT_EQ(Instruction::MOVE, move->dalvikInsn.opcode);
-  ASSERT_EQ(1, move->ssa_rep->num_uses);
-  EXPECT_EQ(2, move->ssa_rep->uses[0]);
-  ASSERT_EQ(1, move->ssa_rep->num_defs);
-  EXPECT_EQ(1, move->ssa_rep->defs[0]);
-  EXPECT_EQ(1u, move->dalvikInsn.vA);
-  EXPECT_EQ(0u, move->dalvikInsn.vB);
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi3) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(4, Instruction::CONST, 1u, 1000),
-      DEF_IPUT(4, Instruction::IPUT, 1u, 0u, 0u),
-      DEF_CONST(5, Instruction::CONST, 3u, 2000),
-      DEF_IPUT(5, Instruction::IPUT, 3u, 0u, 0u),
-      DEF_IGET(6, Instruction::IGET, 5u, 0u, 0u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2 /* dummy */, 1, 2 /* dummy */, 1 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 3, 5 };
-  ExpectValueNamesNE(diff_indexes);
-
-  const size_t no_null_ck_indexes[] = { 2, 4, 5 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created a two-input Phi to replace the IGET 5u.
-  BasicBlock* bb6 = cu_.mir_graph->GetBasicBlock(6);
-  MIR* phi = bb6->first_mir_insn;
-  ASSERT_TRUE(phi != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
-  ASSERT_EQ(2, phi->ssa_rep->num_uses);
-  EXPECT_EQ(1, phi->ssa_rep->uses[0]);
-  EXPECT_EQ(3, phi->ssa_rep->uses[1]);
-  ASSERT_EQ(1, phi->ssa_rep->num_defs);
-  EXPECT_EQ(5, phi->ssa_rep->defs[0]);
-  EXPECT_EQ(1u, phi->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, KillChainInAnotherBlock1) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },  // linked list
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 3u, 2u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 3u, 0u),
-      DEF_IFZ(3, Instruction::IF_NEZ, 4u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 6u, 0u, 0u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 7u, 6u, 0u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 8u, 7u, 0u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 9u, 8u, 0u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3 /* dummy */, 1, 2, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[1], value_names_[6]);
-  EXPECT_EQ(value_names_[2], value_names_[7]);
-  EXPECT_EQ(value_names_[3], value_names_[8]);
-  EXPECT_EQ(value_names_[4], value_names_[9]);
-
-  const size_t no_null_ck_indexes[] = { 1, 6, 7, 8, 9 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, true, true, true, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created two single-input Phis to replace the IGET 8u and IGET 9u;
-  // the IGET 6u and IGET 7u were killed without a replacement.
-  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
-  MIR* phi1 = bb4->first_mir_insn;
-  ASSERT_TRUE(phi1 != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi1->dalvikInsn.opcode));
-  MIR* phi2 = phi1->next;
-  ASSERT_TRUE(phi2 != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi2->dalvikInsn.opcode));
-  ASSERT_TRUE(phi2->next == &mirs_[6]);
-  if (phi1->dalvikInsn.vA == 2u) {
-    std::swap(phi1, phi2);
-  }
-  ASSERT_EQ(1, phi1->ssa_rep->num_uses);
-  EXPECT_EQ(3, phi1->ssa_rep->uses[0]);
-  ASSERT_EQ(1, phi1->ssa_rep->num_defs);
-  EXPECT_EQ(8, phi1->ssa_rep->defs[0]);
-  EXPECT_EQ(1u, phi1->dalvikInsn.vA);
-  ASSERT_EQ(1, phi2->ssa_rep->num_uses);
-  EXPECT_EQ(4, phi2->ssa_rep->uses[0]);
-  ASSERT_EQ(1, phi2->ssa_rep->num_defs);
-  EXPECT_EQ(9, phi2->ssa_rep->defs[0]);
-  EXPECT_EQ(2u, phi2->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, KillChainInAnotherBlock2) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },  // linked list
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 3u, 2u, 0u),
-      DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 3u, 0u),
-      DEF_IFZ(3, Instruction::IF_NEZ, 4u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 6u, 0u, 0u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 7u, 6u, 0u),
-      DEF_IGET(4, Instruction::IGET_OBJECT, 8u, 7u, 0u),
-      DEF_CONST(4, Instruction::CONST, 9u, 1000),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3 /* dummy */, 1, 2, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 9 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[1], value_names_[6]);
-  EXPECT_EQ(value_names_[2], value_names_[7]);
-  EXPECT_EQ(value_names_[3], value_names_[8]);
-
-  const size_t no_null_ck_indexes[] = { 1, 6, 7, 8 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, true, true, true, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created a single-input Phi to replace the IGET 8u;
-  // the IGET 6u and IGET 7u were killed without a replacement.
-  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
-  MIR* phi = bb4->first_mir_insn;
-  ASSERT_TRUE(phi != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
-  ASSERT_TRUE(phi->next == &mirs_[6]);
-  ASSERT_EQ(1, phi->ssa_rep->num_uses);
-  EXPECT_EQ(3, phi->ssa_rep->uses[0]);
-  ASSERT_EQ(1, phi->ssa_rep->num_defs);
-  EXPECT_EQ(8, phi->ssa_rep->defs[0]);
-  EXPECT_EQ(1u, phi->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestLoop, IFieldLoopVariable) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
-      DEF_CONST(3, Instruction::CONST, 1u, 1),
-      DEF_CONST(3, Instruction::CONST, 2u, 0),
-      DEF_IPUT(3, Instruction::IPUT, 2u, 0u, 0u),
-      DEF_IGET(4, Instruction::IGET, 4u, 0u, 0u),
-      DEF_BINOP(4, Instruction::ADD_INT, 5u, 4u, 1u),
-      DEF_IPUT(4, Instruction::IPUT, 5u, 0u, 0u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3 /* dummy */, 2, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 4, 5 };
-  ExpectValueNamesNE(diff_indexes);
-
-  const size_t no_null_ck_indexes[] = { 3, 4, 6 };
-  ExpectNoNullCheck(no_null_ck_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false, false, true, false, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that we've created a two-input Phi to replace the IGET 3u.
-  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
-  MIR* phi = bb4->first_mir_insn;
-  ASSERT_TRUE(phi != nullptr);
-  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
-  ASSERT_TRUE(phi->next == &mirs_[4]);
-  ASSERT_EQ(2, phi->ssa_rep->num_uses);
-  EXPECT_EQ(2, phi->ssa_rep->uses[0]);
-  EXPECT_EQ(5, phi->ssa_rep->uses[1]);
-  ASSERT_EQ(1, phi->ssa_rep->num_defs);
-  EXPECT_EQ(4, phi->ssa_rep->defs[0]);
-  EXPECT_EQ(2u, phi->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestDiamond, LongOverlaps1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 2u, 1000u),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 4u, 0u),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 6u, 2u),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 8u, 4u),
-      DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 10u, 6u),
-  };
-
-  // The last insn should overlap the first and second.
-  static const int32_t sreg_to_vreg_map[] = { 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 0, 2, 4, 6, 8, 10 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-  EXPECT_EQ(value_names_[0], value_names_[4]);
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, LongOverlaps2) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u),
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 4u, 2u),
-  };
-
-  // The last insn should overlap the first and second.
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 0, 2, 4 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  static const bool eliminated[] = {
-      false, true, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the CONST_WIDE registers have been correctly renamed.
-  MIR* const_wide = &mirs_[0];
-  ASSERT_EQ(2u, const_wide->ssa_rep->num_defs);
-  EXPECT_EQ(4, const_wide->ssa_rep->defs[0]);
-  EXPECT_EQ(5, const_wide->ssa_rep->defs[1]);
-  EXPECT_EQ(1u, const_wide->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, LongOverlaps3) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u),
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 4u, 2u),
-  };
-
-  // The last insn should overlap the first and second.
-  static const int32_t sreg_to_vreg_map[] = { 2, 3, 0, 1, 1, 2 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 0, 2, 4 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[2]);
-
-  static const bool eliminated[] = {
-      false, true, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check that the CONST_WIDE registers have been correctly renamed.
-  MIR* const_wide = &mirs_[0];
-  ASSERT_EQ(2u, const_wide->ssa_rep->num_defs);
-  EXPECT_EQ(4, const_wide->ssa_rep->defs[0]);
-  EXPECT_EQ(5, const_wide->ssa_rep->defs[1]);
-  EXPECT_EQ(1u, const_wide->dalvikInsn.vA);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, MixedOverlaps1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
-      DEF_CONST(3, Instruction::CONST, 2u, 2000u),
-      { 3, Instruction::INT_TO_LONG, 0, 0u, 1, { 2u }, 2, { 3u, 4u } },
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 5u, 3u),
-      DEF_CONST(3, Instruction::CONST, 7u, 3000u),
-      DEF_CONST(3, Instruction::CONST, 8u, 4000u),
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 1, 2, 0, 0, 1, 3, 4, 0, 1 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 3, 5 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 2, 3, 5, 6 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[3], value_names_[4]);
-
-  static const bool eliminated[] = {
-      false, true, false, false, true, false, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-  // Check renamed registers in CONST.
-  MIR* cst = &mirs_[0];
-  ASSERT_EQ(Instruction::CONST, cst->dalvikInsn.opcode);
-  ASSERT_EQ(0, cst->ssa_rep->num_uses);
-  ASSERT_EQ(1, cst->ssa_rep->num_defs);
-  EXPECT_EQ(1, cst->ssa_rep->defs[0]);
-  EXPECT_EQ(2u, cst->dalvikInsn.vA);
-  // Check renamed registers in INT_TO_LONG.
-  MIR* int_to_long = &mirs_[3];
-  ASSERT_EQ(Instruction::INT_TO_LONG, int_to_long->dalvikInsn.opcode);
-  ASSERT_EQ(1, int_to_long->ssa_rep->num_uses);
-  EXPECT_EQ(2, int_to_long->ssa_rep->uses[0]);
-  ASSERT_EQ(2, int_to_long->ssa_rep->num_defs);
-  EXPECT_EQ(5, int_to_long->ssa_rep->defs[0]);
-  EXPECT_EQ(6, int_to_long->ssa_rep->defs[1]);
-  EXPECT_EQ(3u, int_to_long->dalvikInsn.vA);
-  EXPECT_EQ(0u, int_to_long->dalvikInsn.vB);
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, UnusedRegs1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_CONST(3, Instruction::CONST, 1u, 2000u),
-      DEF_BINOP(3, Instruction::ADD_INT, 2u, 1u, 0u),
-      DEF_CONST(3, Instruction::CONST, 3u, 1000u),            // NOT killed (b/21702651).
-      DEF_BINOP(3, Instruction::ADD_INT, 4u, 1u, 3u),         // Killed (RecordPass)
-      DEF_CONST(3, Instruction::CONST, 5u, 2000u),            // Killed with 9u (BackwardPass)
-      DEF_BINOP(3, Instruction::ADD_INT, 6u, 5u, 0u),         // Killed (RecordPass)
-      DEF_CONST(3, Instruction::CONST, 7u, 4000u),
-      DEF_MOVE(3, Instruction::MOVE, 8u, 0u),                 // Killed with 6u (BackwardPass)
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 1, 2, 3, 0, 3, 0, 3, 4, 0 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 7 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-  EXPECT_EQ(value_names_[2], value_names_[4]);
-  EXPECT_EQ(value_names_[1], value_names_[5]);
-  EXPECT_EQ(value_names_[2], value_names_[6]);
-  EXPECT_EQ(value_names_[0], value_names_[8]);
-
-  static const bool eliminated[] = {
-      false, false, false, false, true, true, true, false, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, UnusedRegs2) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
-      DEF_CONST(3, Instruction::CONST, 1u, 2000u),
-      DEF_BINOP(3, Instruction::ADD_INT, 2u, 1u, 0u),
-      DEF_CONST(3, Instruction::CONST, 3u, 1000u),            // Killed (BackwardPass; b/21702651)
-      DEF_BINOP(3, Instruction::ADD_INT, 4u, 1u, 3u),         // Killed (RecordPass)
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 5u, 4000u),
-      { 3, Instruction::LONG_TO_INT, 0, 0u, 2, { 5u, 6u }, 1, { 7u } },
-      DEF_BINOP(3, Instruction::ADD_INT, 8u, 7u, 0u),
-      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 9u, 4000u),  // Killed with 12u (BackwardPass)
-      DEF_CONST(3, Instruction::CONST, 11u, 6000u),
-      { 3, Instruction::LONG_TO_INT, 0, 0u, 2, { 9u, 10u }, 1, { 12u } },  // Killed with 9u (BP)
-  };
-
-  static const int32_t sreg_to_vreg_map[] = {
-      2, 3, 4, 1, 4, 5, 6 /* high word */, 0, 7, 0, 1 /* high word */, 8, 0
-  };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 5, 9 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2, 5, 6, 7, 9 };
-  ExpectValueNamesNE(diff_indexes);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-  EXPECT_EQ(value_names_[2], value_names_[4]);
-  EXPECT_EQ(value_names_[5], value_names_[8]);
-  EXPECT_EQ(value_names_[6], value_names_[10]);
-
-  static const bool eliminated[] = {
-      false, false, false, true, true, false, false, false, true, false, true,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, ArrayLengthThrows) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3, Instruction::CONST, 0u, 0),              // null
-      DEF_UNOP(3, Instruction::ARRAY_LENGTH, 1u, 0u),       // null.length
-      DEF_CONST(3, Instruction::CONST, 2u, 1000u),          // Overwrite the array-length dest.
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 1 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  PerformGVN_DCE();
-
-  ASSERT_EQ(arraysize(mirs), value_names_.size());
-  static const size_t diff_indexes[] = { 0, 1, 2 };
-  ExpectValueNamesNE(diff_indexes);
-
-  static const bool eliminated[] = {
-      false, false, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-TEST_F(GvnDeadCodeEliminationTestSimple, Dependancy) {
-  static const MIRDef mirs[] = {
-      DEF_MOVE(3, Instruction::MOVE, 5u, 1u),                 // move v5,v1
-      DEF_MOVE(3, Instruction::MOVE, 6u, 1u),                 // move v12,v1
-      DEF_MOVE(3, Instruction::MOVE, 7u, 0u),                 // move v13,v0
-      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 8u, 2u),       // move v0_1,v2_3
-      DEF_MOVE(3, Instruction::MOVE, 10u, 6u),                // move v3,v12
-      DEF_MOVE(3, Instruction::MOVE, 11u, 4u),                // move v2,v4
-      DEF_MOVE(3, Instruction::MOVE, 12u, 7u),                // move v4,v13
-      DEF_MOVE(3, Instruction::MOVE, 13, 11u),                // move v12,v2
-      DEF_MOVE(3, Instruction::MOVE, 14u, 10u),               // move v2,v3
-      DEF_MOVE(3, Instruction::MOVE, 15u, 5u),                // move v3,v5
-      DEF_MOVE(3, Instruction::MOVE, 16u, 12u),               // move v5,v4
-  };
-
-  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 12, 13, 0, 1, 3, 2, 4, 12, 2, 3, 5 };
-  PrepareSRegToVRegMap(sreg_to_vreg_map);
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 2, 8 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformGVN_DCE();
-
-  static const bool eliminated[] = {
-      false, false, false, false, false, false, false, true, true, false, false,
-  };
-  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(eliminated); ++i) {
-    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
-    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
deleted file mode 100644
index 38f7d1e..0000000
--- a/compiler/dex/local_value_numbering.cc
+++ /dev/null
@@ -1,2038 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "local_value_numbering.h"
-
-#include "base/bit_utils.h"
-#include "global_value_numbering.h"
-#include "mir_field_info.h"
-#include "mir_graph.h"
-#include "utils.h"
-
-namespace art {
-
-namespace {  // anonymous namespace
-
-// Operations used for value map keys instead of actual opcode.
-static constexpr uint16_t kInvokeMemoryVersionBumpOp = Instruction::INVOKE_VIRTUAL;
-static constexpr uint16_t kUnresolvedSFieldOp = Instruction::SGET;
-static constexpr uint16_t kResolvedSFieldOp = Instruction::SGET_WIDE;
-static constexpr uint16_t kUnresolvedIFieldOp = Instruction::IGET;
-static constexpr uint16_t kNonAliasingIFieldLocOp = Instruction::IGET_WIDE;
-static constexpr uint16_t kNonAliasingIFieldInitialOp = Instruction::IGET_OBJECT;
-static constexpr uint16_t kAliasingIFieldOp = Instruction::IGET_BOOLEAN;
-static constexpr uint16_t kAliasingIFieldStartVersionOp = Instruction::IGET_BYTE;
-static constexpr uint16_t kAliasingIFieldBumpVersionOp = Instruction::IGET_CHAR;
-static constexpr uint16_t kNonAliasingArrayOp = Instruction::AGET;
-static constexpr uint16_t kNonAliasingArrayStartVersionOp = Instruction::AGET_WIDE;
-static constexpr uint16_t kNonAliasingArrayBumpVersionOp = Instruction::AGET_OBJECT;
-static constexpr uint16_t kAliasingArrayOp = Instruction::AGET_BOOLEAN;
-static constexpr uint16_t kAliasingArrayStartVersionOp = Instruction::AGET_BYTE;
-static constexpr uint16_t kAliasingArrayBumpVersionOp = Instruction::AGET_CHAR;
-static constexpr uint16_t kMergeBlockMemoryVersionBumpOp = Instruction::INVOKE_VIRTUAL_RANGE;
-static constexpr uint16_t kMergeBlockAliasingIFieldVersionBumpOp = Instruction::IPUT;
-static constexpr uint16_t kMergeBlockAliasingIFieldMergeLocationOp = Instruction::IPUT_WIDE;
-static constexpr uint16_t kMergeBlockNonAliasingArrayVersionBumpOp = Instruction::APUT;
-static constexpr uint16_t kMergeBlockNonAliasingArrayMergeLocationOp = Instruction::APUT_WIDE;
-static constexpr uint16_t kMergeBlockAliasingArrayVersionBumpOp = Instruction::APUT_OBJECT;
-static constexpr uint16_t kMergeBlockAliasingArrayMergeLocationOp = Instruction::APUT_BOOLEAN;
-static constexpr uint16_t kMergeBlockNonAliasingIFieldVersionBumpOp = Instruction::APUT_BYTE;
-static constexpr uint16_t kMergeBlockSFieldVersionBumpOp = Instruction::APUT_CHAR;
-
-}  // anonymous namespace
-
-class LocalValueNumbering::AliasingIFieldVersions {
- public:
-  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                     uint16_t field_id) {
-    uint16_t type = gvn->GetIFieldType(field_id);
-    return gvn->LookupValue(kAliasingIFieldStartVersionOp, field_id,
-                            lvn->global_memory_version_, lvn->unresolved_ifield_version_[type]);
-  }
-
-  static uint16_t BumpMemoryVersion(GlobalValueNumbering* gvn, uint16_t old_version,
-                                    uint16_t store_ref_set_id, uint16_t stored_value) {
-    return gvn->LookupValue(kAliasingIFieldBumpVersionOp, old_version,
-                            store_ref_set_id, stored_value);
-  }
-
-  static uint16_t LookupGlobalValue(GlobalValueNumbering* gvn,
-                                    uint16_t field_id, uint16_t base, uint16_t memory_version) {
-    return gvn->LookupValue(kAliasingIFieldOp, field_id, base, memory_version);
-  }
-
-  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                   uint16_t field_id, uint16_t base) {
-    // If the base/field_id is non-aliasing in lvn, use the non-aliasing value.
-    uint16_t type = gvn->GetIFieldType(field_id);
-    if (lvn->IsNonAliasingIField(base, field_id, type)) {
-      uint16_t loc = gvn->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
-      auto lb = lvn->non_aliasing_ifield_value_map_.find(loc);
-      return (lb != lvn->non_aliasing_ifield_value_map_.end())
-          ? lb->second
-          : gvn->LookupValue(kNonAliasingIFieldInitialOp, loc, kNoValue, kNoValue);
-    }
-    return AliasingValuesMergeGet<AliasingIFieldVersions>(
-        gvn, lvn, &lvn->aliasing_ifield_value_map_, field_id, base);
-  }
-
-  static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                uint16_t field_id) {
-    uint16_t type = gvn->GetIFieldType(field_id);
-    return lvn->unresolved_ifield_version_[type] == lvn->merge_new_memory_version_ ||
-        lvn->global_memory_version_ == lvn->merge_new_memory_version_;
-  }
-
-  static uint16_t LookupMergeBlockValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                        uint16_t field_id) {
-    return gvn->LookupValue(kMergeBlockAliasingIFieldVersionBumpOp, field_id, kNoValue, lvn_id);
-  }
-
-  static uint16_t LookupMergeLocationValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                           uint16_t field_id, uint16_t base) {
-    return gvn->LookupValue(kMergeBlockAliasingIFieldMergeLocationOp, field_id, base, lvn_id);
-  }
-};
-
-class LocalValueNumbering::NonAliasingArrayVersions {
- public:
-  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn,
-                                     const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
-                                     uint16_t array) {
-    return gvn->LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, kNoValue);
-  }
-
-  static uint16_t BumpMemoryVersion(GlobalValueNumbering* gvn, uint16_t old_version,
-                                    uint16_t store_ref_set_id, uint16_t stored_value) {
-    return gvn->LookupValue(kNonAliasingArrayBumpVersionOp, old_version,
-                            store_ref_set_id, stored_value);
-  }
-
-  static uint16_t LookupGlobalValue(GlobalValueNumbering* gvn,
-                                    uint16_t array, uint16_t index, uint16_t memory_version) {
-    return gvn->LookupValue(kNonAliasingArrayOp, array, index, memory_version);
-  }
-
-  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                   uint16_t array, uint16_t index) {
-    return AliasingValuesMergeGet<NonAliasingArrayVersions>(
-        gvn, lvn, &lvn->non_aliasing_array_value_map_, array, index);
-  }
-
-  static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
-                                const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
-                                uint16_t array ATTRIBUTE_UNUSED) {
-    return false;  // Not affected by global_memory_version_.
-  }
-
-  static uint16_t LookupMergeBlockValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                        uint16_t array) {
-    return gvn->LookupValue(kMergeBlockNonAliasingArrayVersionBumpOp, array, kNoValue, lvn_id);
-  }
-
-  static uint16_t LookupMergeLocationValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                           uint16_t array, uint16_t index) {
-    return gvn->LookupValue(kMergeBlockNonAliasingArrayMergeLocationOp, array, index, lvn_id);
-  }
-};
-
-class LocalValueNumbering::AliasingArrayVersions {
- public:
-  static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
-                                     uint16_t type) {
-    return gvn->LookupValue(kAliasingArrayStartVersionOp, type, lvn->global_memory_version_,
-                            kNoValue);
-  }
-
-  static uint16_t BumpMemoryVersion(GlobalValueNumbering* gvn, uint16_t old_version,
-                                    uint16_t store_ref_set_id, uint16_t stored_value) {
-    return gvn->LookupValue(kAliasingArrayBumpVersionOp, old_version,
-                            store_ref_set_id, stored_value);
-  }
-
-  static uint16_t LookupGlobalValue(GlobalValueNumbering* gvn,
-                                    uint16_t type, uint16_t location, uint16_t memory_version) {
-    return gvn->LookupValue(kAliasingArrayOp, type, location, memory_version);
-  }
-
-  static uint16_t LookupMergeValue(GlobalValueNumbering* gvn,
-                                   const LocalValueNumbering* lvn,
-                                   uint16_t type, uint16_t location) {
-    // If the location is non-aliasing in lvn, use the non-aliasing value.
-    uint16_t array = gvn->GetArrayLocationBase(location);
-    if (lvn->IsNonAliasingArray(array, type)) {
-      uint16_t index = gvn->GetArrayLocationIndex(location);
-      return NonAliasingArrayVersions::LookupMergeValue(gvn, lvn, array, index);
-    }
-    return AliasingValuesMergeGet<AliasingArrayVersions>(
-        gvn, lvn, &lvn->aliasing_array_value_map_, type, location);
-  }
-
-  static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
-                                const LocalValueNumbering* lvn,
-                                uint16_t type ATTRIBUTE_UNUSED) {
-    return lvn->global_memory_version_ == lvn->merge_new_memory_version_;
-  }
-
-  static uint16_t LookupMergeBlockValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                        uint16_t type) {
-    return gvn->LookupValue(kMergeBlockAliasingArrayVersionBumpOp, type, kNoValue, lvn_id);
-  }
-
-  static uint16_t LookupMergeLocationValue(GlobalValueNumbering* gvn, uint16_t lvn_id,
-                                           uint16_t type, uint16_t location) {
-    return gvn->LookupValue(kMergeBlockAliasingArrayMergeLocationOp, type, location, lvn_id);
-  }
-};
-
-template <typename Map>
-LocalValueNumbering::AliasingValues* LocalValueNumbering::GetAliasingValues(
-    Map* map, const typename Map::key_type& key) {
-  auto lb = map->lower_bound(key);
-  if (lb == map->end() || map->key_comp()(key, lb->first)) {
-    lb = map->PutBefore(lb, key, AliasingValues(this));
-  }
-  return &lb->second;
-}
-
-template <typename Versions, typename KeyType>
-void LocalValueNumbering::UpdateAliasingValuesLoadVersion(const KeyType& key,
-                                                          AliasingValues* values) {
-  if (values->last_load_memory_version == kNoValue) {
-    // Get the start version that accounts for aliasing with unresolved fields of the same
-    // type and make it unique for the field by including the field_id.
-    uint16_t memory_version = values->memory_version_before_stores;
-    if (memory_version == kNoValue) {
-      memory_version = Versions::StartMemoryVersion(gvn_, this, key);
-    }
-    if (!values->store_loc_set.empty()) {
-      uint16_t ref_set_id = gvn_->GetRefSetId(values->store_loc_set);
-      memory_version = Versions::BumpMemoryVersion(gvn_, memory_version, ref_set_id,
-                                                   values->last_stored_value);
-    }
-    values->last_load_memory_version = memory_version;
-  }
-}
-
-template <typename Versions, typename Map>
-uint16_t LocalValueNumbering::AliasingValuesMergeGet(GlobalValueNumbering* gvn,
-                                                     const LocalValueNumbering* lvn,
-                                                     Map* map, const typename Map::key_type& key,
-                                                     uint16_t location) {
-  // Retrieve the value name that we would get from
-  //   const_cast<LocalValueNumbering*>(lvn)->HandleAliasingValueGet(map. key, location)
-  // but don't modify the map.
-  uint16_t value_name;
-  auto it = map->find(key);
-  if (it == map->end()) {
-    uint16_t start_version = Versions::StartMemoryVersion(gvn, lvn, key);
-    value_name = Versions::LookupGlobalValue(gvn, key, location, start_version);
-  } else if (it->second.store_loc_set.count(location) != 0u) {
-    value_name = it->second.last_stored_value;
-  } else {
-    auto load_it = it->second.load_value_map.find(location);
-    if (load_it != it->second.load_value_map.end()) {
-      value_name = load_it->second;
-    } else {
-      value_name = Versions::LookupGlobalValue(gvn, key, location, it->second.last_load_memory_version);
-    }
-  }
-  return value_name;
-}
-
-template <typename Versions, typename Map>
-uint16_t LocalValueNumbering::HandleAliasingValuesGet(Map* map, const typename Map::key_type& key,
-                                                      uint16_t location) {
-  // Retrieve the value name for IGET/SGET/AGET, update the map with new value if any.
-  uint16_t res;
-  AliasingValues* values = GetAliasingValues(map, key);
-  if (values->store_loc_set.count(location) != 0u) {
-    res = values->last_stored_value;
-  } else {
-    UpdateAliasingValuesLoadVersion<Versions>(key, values);
-    auto lb = values->load_value_map.lower_bound(location);
-    if (lb != values->load_value_map.end() && lb->first == location) {
-      res = lb->second;
-    } else {
-      res = Versions::LookupGlobalValue(gvn_, key, location, values->last_load_memory_version);
-      values->load_value_map.PutBefore(lb, location, res);
-    }
-  }
-  return res;
-}
-
-template <typename Versions, typename Map>
-bool LocalValueNumbering::HandleAliasingValuesPut(Map* map, const typename Map::key_type& key,
-                                                  uint16_t location, uint16_t value) {
-  AliasingValues* values = GetAliasingValues(map, key);
-  auto load_values_it = values->load_value_map.find(location);
-  if (load_values_it != values->load_value_map.end() && load_values_it->second == value) {
-    // This insn can be eliminated, it stores the same value that's already in the field.
-    return false;
-  }
-  if (value == values->last_stored_value) {
-    auto store_loc_lb = values->store_loc_set.lower_bound(location);
-    if (store_loc_lb != values->store_loc_set.end() && *store_loc_lb == location) {
-      // This insn can be eliminated, it stores the same value that's already in the field.
-      return false;
-    }
-    values->store_loc_set.emplace_hint(store_loc_lb, location);
-  } else {
-    UpdateAliasingValuesLoadVersion<Versions>(key, values);
-    values->memory_version_before_stores = values->last_load_memory_version;
-    values->last_stored_value = value;
-    values->store_loc_set.clear();
-    values->store_loc_set.insert(location);
-  }
-  // Clear the last load memory version and remove all potentially overwritten values.
-  values->last_load_memory_version = kNoValue;
-  auto it = values->load_value_map.begin(), end = values->load_value_map.end();
-  while (it != end) {
-    if (it->second == value) {
-      ++it;
-    } else {
-      it = values->load_value_map.erase(it);
-    }
-  }
-  return true;
-}
-
-template <typename K>
-void LocalValueNumbering::CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
-                                                const ScopedArenaSafeMap<K, AliasingValues>& src) {
-  // We need each new AliasingValues (or rather its map members) to be constructed
-  // with our allocator, rather than the allocator of the source.
-  for (const auto& entry : src) {
-    auto it = dest->PutBefore(dest->end(), entry.first, AliasingValues(this));
-    it->second = entry.second;  // Map assignments preserve current allocator.
-  }
-}
-
-LocalValueNumbering::LocalValueNumbering(GlobalValueNumbering* gvn, uint16_t id,
-                                         ScopedArenaAllocator* allocator)
-    : gvn_(gvn),
-      id_(id),
-      sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      sfield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      non_aliasing_ifield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      aliasing_ifield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      non_aliasing_array_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      aliasing_array_value_map_(std::less<uint16_t>(), allocator->Adapter()),
-      global_memory_version_(0u),
-      non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
-      escaped_refs_(std::less<uint16_t>(), allocator->Adapter()),
-      escaped_ifield_clobber_set_(EscapedIFieldClobberKeyComparator(), allocator->Adapter()),
-      escaped_array_clobber_set_(EscapedArrayClobberKeyComparator(), allocator->Adapter()),
-      range_checked_(RangeCheckKeyComparator() , allocator->Adapter()),
-      null_checked_(std::less<uint16_t>(), allocator->Adapter()),
-      div_zero_checked_(std::less<uint16_t>(), allocator->Adapter()),
-      merge_names_(allocator->Adapter()),
-      merge_map_(std::less<ScopedArenaVector<BasicBlockId>>(), allocator->Adapter()),
-      merge_new_memory_version_(kNoValue) {
-  std::fill_n(unresolved_sfield_version_, arraysize(unresolved_sfield_version_), 0u);
-  std::fill_n(unresolved_ifield_version_, arraysize(unresolved_ifield_version_), 0u);
-}
-
-bool LocalValueNumbering::Equals(const LocalValueNumbering& other) const {
-  DCHECK(gvn_ == other.gvn_);
-  // Compare the maps/sets and memory versions.
-  return sreg_value_map_ == other.sreg_value_map_ &&
-      sreg_wide_value_map_ == other.sreg_wide_value_map_ &&
-      sfield_value_map_ == other.sfield_value_map_ &&
-      non_aliasing_ifield_value_map_ == other.non_aliasing_ifield_value_map_ &&
-      aliasing_ifield_value_map_ == other.aliasing_ifield_value_map_ &&
-      non_aliasing_array_value_map_ == other.non_aliasing_array_value_map_ &&
-      aliasing_array_value_map_ == other.aliasing_array_value_map_ &&
-      SameMemoryVersion(other) &&
-      non_aliasing_refs_ == other.non_aliasing_refs_ &&
-      escaped_refs_ == other.escaped_refs_ &&
-      escaped_ifield_clobber_set_ == other.escaped_ifield_clobber_set_ &&
-      escaped_array_clobber_set_ == other.escaped_array_clobber_set_ &&
-      range_checked_ == other.range_checked_ &&
-      null_checked_ == other.null_checked_ &&
-      div_zero_checked_ == other.div_zero_checked_;
-}
-
-void LocalValueNumbering::MergeOne(const LocalValueNumbering& other, MergeType merge_type) {
-  CopyLiveSregValues(&sreg_value_map_, other.sreg_value_map_);
-  CopyLiveSregValues(&sreg_wide_value_map_, other.sreg_wide_value_map_);
-
-  if (merge_type == kReturnMerge) {
-    // RETURN or PHI+RETURN. We need only sreg value maps.
-    return;
-  }
-
-  non_aliasing_ifield_value_map_ = other.non_aliasing_ifield_value_map_;
-  CopyAliasingValuesMap(&non_aliasing_array_value_map_, other.non_aliasing_array_value_map_);
-  non_aliasing_refs_ = other.non_aliasing_refs_;
-  range_checked_ = other.range_checked_;
-  null_checked_ = other.null_checked_;
-  div_zero_checked_ = other.div_zero_checked_;
-
-  const BasicBlock* pred_bb = gvn_->GetBasicBlock(other.Id());
-  if (GlobalValueNumbering::HasNullCheckLastInsn(pred_bb, Id())) {
-    int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
-    null_checked_.insert(other.GetOperandValue(s_reg));
-  }
-
-  if (merge_type == kCatchMerge) {
-    // Memory is clobbered. Use new memory version and don't merge aliasing locations.
-    global_memory_version_ = NewMemoryVersion(&merge_new_memory_version_);
-    std::fill_n(unresolved_sfield_version_, arraysize(unresolved_sfield_version_),
-                global_memory_version_);
-    std::fill_n(unresolved_ifield_version_, arraysize(unresolved_ifield_version_),
-                global_memory_version_);
-    PruneNonAliasingRefsForCatch();
-    return;
-  }
-
-  DCHECK(merge_type == kNormalMerge);
-  global_memory_version_ = other.global_memory_version_;
-  std::copy_n(other.unresolved_ifield_version_, arraysize(unresolved_sfield_version_),
-              unresolved_ifield_version_);
-  std::copy_n(other.unresolved_sfield_version_, arraysize(unresolved_ifield_version_),
-              unresolved_sfield_version_);
-  sfield_value_map_ = other.sfield_value_map_;
-  CopyAliasingValuesMap(&aliasing_ifield_value_map_, other.aliasing_ifield_value_map_);
-  CopyAliasingValuesMap(&aliasing_array_value_map_, other.aliasing_array_value_map_);
-  escaped_refs_ = other.escaped_refs_;
-  escaped_ifield_clobber_set_ = other.escaped_ifield_clobber_set_;
-  escaped_array_clobber_set_ = other.escaped_array_clobber_set_;
-}
-
-bool LocalValueNumbering::SameMemoryVersion(const LocalValueNumbering& other) const {
-  return
-      global_memory_version_ == other.global_memory_version_ &&
-      std::equal(unresolved_ifield_version_,
-                 unresolved_ifield_version_ + arraysize(unresolved_ifield_version_),
-                 other.unresolved_ifield_version_) &&
-      std::equal(unresolved_sfield_version_,
-                 unresolved_sfield_version_ + arraysize(unresolved_sfield_version_),
-                 other.unresolved_sfield_version_);
-}
-
-uint16_t LocalValueNumbering::NewMemoryVersion(uint16_t* new_version) {
-  if (*new_version == kNoValue) {
-    *new_version = gvn_->LookupValue(kMergeBlockMemoryVersionBumpOp, 0u, 0u, id_);
-  }
-  return *new_version;
-}
-
-void LocalValueNumbering::MergeMemoryVersions(bool clobbered_catch) {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-  const LocalValueNumbering* cmp = gvn_->merge_lvns_[0];
-  // Check if the global version has changed.
-  bool new_global_version = clobbered_catch;
-  if (!new_global_version) {
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      if (lvn->global_memory_version_ != cmp->global_memory_version_) {
-        // Use a new version for everything.
-        new_global_version = true;
-        break;
-      }
-    }
-  }
-  if (new_global_version) {
-    global_memory_version_ = NewMemoryVersion(&merge_new_memory_version_);
-    std::fill_n(unresolved_sfield_version_, arraysize(unresolved_sfield_version_),
-                merge_new_memory_version_);
-    std::fill_n(unresolved_ifield_version_, arraysize(unresolved_ifield_version_),
-                merge_new_memory_version_);
-  } else {
-    // Initialize with a copy of memory versions from the comparison LVN.
-    global_memory_version_ = cmp->global_memory_version_;
-    std::copy_n(cmp->unresolved_ifield_version_, arraysize(unresolved_sfield_version_),
-                unresolved_ifield_version_);
-    std::copy_n(cmp->unresolved_sfield_version_, arraysize(unresolved_ifield_version_),
-                unresolved_sfield_version_);
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      if (lvn == cmp) {
-        continue;
-      }
-      for (size_t i = 0; i != kDexMemAccessTypeCount; ++i) {
-        if (lvn->unresolved_ifield_version_[i] != cmp->unresolved_ifield_version_[i]) {
-          unresolved_ifield_version_[i] = NewMemoryVersion(&merge_new_memory_version_);
-        }
-        if (lvn->unresolved_sfield_version_[i] != cmp->unresolved_sfield_version_[i]) {
-          unresolved_sfield_version_[i] = NewMemoryVersion(&merge_new_memory_version_);
-        }
-      }
-    }
-  }
-}
-
-void LocalValueNumbering::PruneNonAliasingRefsForCatch() {
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    const BasicBlock* bb = gvn_->GetBasicBlock(lvn->Id());
-    if (UNLIKELY(bb->taken == id_) || UNLIKELY(bb->fall_through == id_)) {
-      // Non-exceptional path to a catch handler means that the catch block was actually
-      // empty and all exceptional paths lead to the shared path after that empty block.
-      continue;
-    }
-    DCHECK_EQ(bb->taken, kNullBlock);
-    DCHECK_NE(bb->fall_through, kNullBlock);
-    const BasicBlock* fall_through_bb = gvn_->GetBasicBlock(bb->fall_through);
-    const MIR* mir = fall_through_bb->first_mir_insn;
-    DCHECK(mir != nullptr);
-    // Only INVOKEs can leak and clobber non-aliasing references if they throw.
-    if ((mir->dalvikInsn.FlagsOf() & Instruction::kInvoke) != 0) {
-      HandleInvokeArgs(mir, lvn);
-    }
-  }
-}
-
-
-template <typename Set, Set LocalValueNumbering::* set_ptr>
-void LocalValueNumbering::IntersectSets() {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-
-  // Find the LVN with the least entries in the set.
-  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    if ((lvn->*set_ptr).size() < (least_entries_lvn->*set_ptr).size()) {
-      least_entries_lvn = lvn;
-    }
-  }
-
-  // For each key check if it's in all the LVNs.
-  for (const auto& key : least_entries_lvn->*set_ptr) {
-    bool checked = true;
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      if (lvn != least_entries_lvn && (lvn->*set_ptr).count(key) == 0u) {
-        checked = false;
-        break;
-      }
-    }
-    if (checked) {
-      (this->*set_ptr).emplace_hint((this->*set_ptr).end(), key);
-    }
-  }
-}
-
-void LocalValueNumbering::CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src) {
-  auto dest_end = dest->end();
-  ArenaBitVector* live_in_v = gvn_->GetMirGraph()->GetBasicBlock(id_)->data_flow_info->live_in_v;
-  DCHECK(live_in_v != nullptr);
-  for (const auto& entry : src) {
-    bool live = live_in_v->IsBitSet(gvn_->GetMirGraph()->SRegToVReg(entry.first));
-    if (live) {
-      dest->PutBefore(dest_end, entry.first, entry.second);
-    }
-  }
-}
-
-template <LocalValueNumbering::SregValueMap LocalValueNumbering::* map_ptr>
-void LocalValueNumbering::IntersectSregValueMaps() {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-
-  // Find the LVN with the least entries in the set.
-  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    if ((lvn->*map_ptr).size() < (least_entries_lvn->*map_ptr).size()) {
-      least_entries_lvn = lvn;
-    }
-  }
-
-  // For each key check if it's in all the LVNs.
-  ArenaBitVector* live_in_v = gvn_->GetMirGraph()->GetBasicBlock(id_)->data_flow_info->live_in_v;
-  DCHECK(live_in_v != nullptr);
-  for (const auto& entry : least_entries_lvn->*map_ptr) {
-    bool live_and_same = live_in_v->IsBitSet(gvn_->GetMirGraph()->SRegToVReg(entry.first));
-    if (live_and_same) {
-      for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-        if (lvn != least_entries_lvn) {
-          auto it = (lvn->*map_ptr).find(entry.first);
-          if (it == (lvn->*map_ptr).end() || !(it->second == entry.second)) {
-            live_and_same = false;
-            break;
-          }
-        }
-      }
-    }
-    if (live_and_same) {
-      (this->*map_ptr).PutBefore((this->*map_ptr).end(), entry.first, entry.second);
-    }
-  }
-}
-
-// Intersect maps as sets. The value type must be equality-comparable.
-template <typename Map>
-void LocalValueNumbering::InPlaceIntersectMaps(Map* work_map, const Map& other_map) {
-  auto work_it = work_map->begin(), work_end = work_map->end();
-  auto cmp = work_map->value_comp();
-  for (const auto& entry : other_map) {
-    while (work_it != work_end &&
-        (cmp(*work_it, entry) ||
-         (!cmp(entry, *work_it) && !(work_it->second == entry.second)))) {
-      work_it = work_map->erase(work_it);
-    }
-    if (work_it == work_end) {
-      return;
-    }
-    ++work_it;
-  }
-}
-
-template <typename Set, Set LocalValueNumbering::*set_ptr, void (LocalValueNumbering::*MergeFn)(
-    const typename Set::value_type& entry, typename Set::iterator hint)>
-void LocalValueNumbering::MergeSets() {
-  auto cmp = (this->*set_ptr).value_comp();
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    auto my_it = (this->*set_ptr).begin(), my_end = (this->*set_ptr).end();
-    for (const auto& entry : lvn->*set_ptr) {
-      while (my_it != my_end && cmp(*my_it, entry)) {
-        ++my_it;
-      }
-      if (my_it != my_end && !cmp(entry, *my_it)) {
-        // Already handled.
-        ++my_it;
-      } else {
-        // Merge values for this field_id.
-        (this->*MergeFn)(entry, my_it);  // my_it remains valid across inserts to std::set/SafeMap.
-      }
-    }
-  }
-}
-
-void LocalValueNumbering::IntersectAliasingValueLocations(AliasingValues* work_values,
-                                                          const AliasingValues* values) {
-  auto cmp = work_values->load_value_map.key_comp();
-  auto work_it = work_values->load_value_map.begin(), work_end = work_values->load_value_map.end();
-  auto store_it = values->store_loc_set.begin(), store_end = values->store_loc_set.end();
-  auto load_it = values->load_value_map.begin(), load_end = values->load_value_map.end();
-  while (store_it != store_end || load_it != load_end) {
-    uint16_t loc;
-    if (store_it != store_end && (load_it == load_end || *store_it < load_it->first)) {
-      loc = *store_it;
-      ++store_it;
-    } else {
-      loc = load_it->first;
-      ++load_it;
-      DCHECK(store_it == store_end || cmp(loc, *store_it));
-    }
-    while (work_it != work_end && cmp(work_it->first, loc)) {
-      work_it = work_values->load_value_map.erase(work_it);
-    }
-    if (work_it != work_end && !cmp(loc, work_it->first)) {
-      // The location matches, keep it.
-      ++work_it;
-    }
-  }
-  while (work_it != work_end) {
-    work_it = work_values->load_value_map.erase(work_it);
-  }
-}
-
-void LocalValueNumbering::MergeEscapedRefs(const ValueNameSet::value_type& entry,
-                                           ValueNameSet::iterator hint) {
-  // See if the ref is either escaped or non-aliasing in each predecessor.
-  bool is_escaped = true;
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    if (lvn->non_aliasing_refs_.count(entry) == 0u &&
-        lvn->escaped_refs_.count(entry) == 0u) {
-      is_escaped = false;
-      break;
-    }
-  }
-  if (is_escaped) {
-    escaped_refs_.emplace_hint(hint, entry);
-  }
-}
-
-void LocalValueNumbering::MergeEscapedIFieldTypeClobberSets(
-    const EscapedIFieldClobberSet::value_type& entry, EscapedIFieldClobberSet::iterator hint) {
-  // Insert only type-clobber entries (field_id == kNoValue) of escaped refs.
-  if (entry.field_id == kNoValue && escaped_refs_.count(entry.base) != 0u) {
-    escaped_ifield_clobber_set_.emplace_hint(hint, entry);
-  }
-}
-
-void LocalValueNumbering::MergeEscapedIFieldClobberSets(
-    const EscapedIFieldClobberSet::value_type& entry, EscapedIFieldClobberSet::iterator hint) {
-  // Insert only those entries of escaped refs that are not overridden by a type clobber.
-  if (!(hint == escaped_ifield_clobber_set_.end() &&
-        hint->base == entry.base && hint->type == entry.type) &&
-      escaped_refs_.count(entry.base) != 0u) {
-    escaped_ifield_clobber_set_.emplace_hint(hint, entry);
-  }
-}
-
-void LocalValueNumbering::MergeEscapedArrayClobberSets(
-    const EscapedArrayClobberSet::value_type& entry, EscapedArrayClobberSet::iterator hint) {
-  if (escaped_refs_.count(entry.base) != 0u) {
-    escaped_array_clobber_set_.emplace_hint(hint, entry);
-  }
-}
-
-void LocalValueNumbering::MergeNullChecked() {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-
-  // Find the LVN with the least entries in the set.
-  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    if (lvn->null_checked_.size() < least_entries_lvn->null_checked_.size()) {
-      least_entries_lvn = lvn;
-    }
-  }
-
-  // For each null-checked value name check if it's null-checked in all the LVNs.
-  for (const auto& value_name : least_entries_lvn->null_checked_) {
-    // Merge null_checked_ for this ref.
-    merge_names_.clear();
-    merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
-    if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-      null_checked_.insert(null_checked_.end(), value_name);
-    }
-  }
-
-  // Now check if the least_entries_lvn has a null-check as the last insn.
-  const BasicBlock* least_entries_bb = gvn_->GetBasicBlock(least_entries_lvn->Id());
-  if (gvn_->HasNullCheckLastInsn(least_entries_bb, id_)) {
-    int s_reg = least_entries_bb->last_mir_insn->ssa_rep->uses[0];
-    uint32_t value_name = least_entries_lvn->GetOperandValue(s_reg);
-    merge_names_.clear();
-    merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
-    if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-      null_checked_.insert(value_name);
-    }
-  }
-}
-
-void LocalValueNumbering::MergeDivZeroChecked() {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-
-  // Find the LVN with the least entries in the set.
-  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    if (lvn->div_zero_checked_.size() < least_entries_lvn->div_zero_checked_.size()) {
-      least_entries_lvn = lvn;
-    }
-  }
-
-  // For each div-zero value name check if it's div-zero checked in all the LVNs.
-  for (const auto& value_name : least_entries_lvn->div_zero_checked_) {
-    // Merge null_checked_ for this ref.
-    merge_names_.clear();
-    merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
-    if (gvn_->DivZeroCheckedInAllPredecessors(merge_names_)) {
-      div_zero_checked_.insert(div_zero_checked_.end(), value_name);
-    }
-  }
-}
-
-void LocalValueNumbering::MergeSFieldValues(const SFieldToValueMap::value_type& entry,
-                                            SFieldToValueMap::iterator hint) {
-  uint16_t field_id = entry.first;
-  merge_names_.clear();
-  uint16_t value_name = kNoValue;
-  bool same_values = true;
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    // Get the value name as in HandleSGet() but don't modify *lvn.
-    auto it = lvn->sfield_value_map_.find(field_id);
-    if (it != lvn->sfield_value_map_.end()) {
-      value_name = it->second;
-    } else {
-      uint16_t type = gvn_->GetSFieldType(field_id);
-      value_name = gvn_->LookupValue(kResolvedSFieldOp, field_id,
-                                     lvn->unresolved_sfield_version_[type],
-                                     lvn->global_memory_version_);
-    }
-
-    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
-    merge_names_.push_back(value_name);
-  }
-  if (same_values) {
-    // value_name already contains the result.
-  } else {
-    auto lb = merge_map_.lower_bound(merge_names_);
-    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
-      value_name = lb->second;
-    } else {
-      value_name = gvn_->LookupValue(kMergeBlockSFieldVersionBumpOp, field_id, id_, kNoValue);
-      merge_map_.PutBefore(lb, merge_names_, value_name);
-      if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-        null_checked_.insert(value_name);
-      }
-    }
-  }
-  sfield_value_map_.PutBefore(hint, field_id, value_name);
-}
-
-void LocalValueNumbering::MergeNonAliasingIFieldValues(const IFieldLocToValueMap::value_type& entry,
-                                                       IFieldLocToValueMap::iterator hint) {
-  uint16_t field_loc = entry.first;
-  merge_names_.clear();
-  uint16_t value_name = kNoValue;
-  bool same_values = true;
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    // Get the value name as in HandleIGet() but don't modify *lvn.
-    auto it = lvn->non_aliasing_ifield_value_map_.find(field_loc);
-    if (it != lvn->non_aliasing_ifield_value_map_.end()) {
-      value_name = it->second;
-    } else {
-      value_name = gvn_->LookupValue(kNonAliasingIFieldInitialOp, field_loc, kNoValue, kNoValue);
-    }
-
-    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
-    merge_names_.push_back(value_name);
-  }
-  if (same_values) {
-    // value_name already contains the result.
-  } else {
-    auto lb = merge_map_.lower_bound(merge_names_);
-    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
-      value_name = lb->second;
-    } else {
-      value_name = gvn_->LookupValue(kMergeBlockNonAliasingIFieldVersionBumpOp, field_loc,
-                                     id_, kNoValue);
-      merge_map_.PutBefore(lb, merge_names_, value_name);
-      if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-        null_checked_.insert(value_name);
-      }
-    }
-  }
-  non_aliasing_ifield_value_map_.PutBefore(hint, field_loc, value_name);
-}
-
-template <typename Map, Map LocalValueNumbering::*map_ptr, typename Versions>
-void LocalValueNumbering::MergeAliasingValues(const typename Map::value_type& entry,
-                                              typename Map::iterator hint) {
-  const typename Map::key_type& key = entry.first;
-
-  auto it = (this->*map_ptr).PutBefore(hint, key, AliasingValues(this));
-  AliasingValues* my_values = &it->second;
-
-  const AliasingValues* cmp_values = nullptr;
-  bool same_version = !Versions::HasNewBaseVersion(gvn_, this, key);
-  uint16_t load_memory_version_for_same_version = kNoValue;
-  if (same_version) {
-    // Find the first non-null values.
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      auto value = (lvn->*map_ptr).find(key);
-      if (value != (lvn->*map_ptr).end()) {
-        cmp_values = &value->second;
-        break;
-      }
-    }
-    DCHECK(cmp_values != nullptr);  // There must be at least one non-null values.
-
-    // Check if we have identical memory versions, i.e. the global memory version, unresolved
-    // field version and the values' memory_version_before_stores, last_stored_value
-    // and store_loc_set are identical.
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      auto value = (lvn->*map_ptr).find(key);
-      if (value == (lvn->*map_ptr).end()) {
-        if (cmp_values->memory_version_before_stores != kNoValue) {
-          same_version = false;
-          break;
-        }
-      } else if (cmp_values->last_stored_value != value->second.last_stored_value ||
-          cmp_values->memory_version_before_stores != value->second.memory_version_before_stores ||
-          cmp_values->store_loc_set != value->second.store_loc_set) {
-        same_version = false;
-        break;
-      } else if (value->second.last_load_memory_version != kNoValue) {
-        DCHECK(load_memory_version_for_same_version == kNoValue ||
-               load_memory_version_for_same_version == value->second.last_load_memory_version);
-        load_memory_version_for_same_version = value->second.last_load_memory_version;
-      }
-    }
-  }
-
-  if (same_version) {
-    // Copy the identical values.
-    my_values->memory_version_before_stores = cmp_values->memory_version_before_stores;
-    my_values->last_stored_value = cmp_values->last_stored_value;
-    my_values->store_loc_set = cmp_values->store_loc_set;
-    my_values->last_load_memory_version = load_memory_version_for_same_version;
-    // Merge load values seen in all incoming arcs (i.e. an intersection).
-    if (!cmp_values->load_value_map.empty()) {
-      my_values->load_value_map = cmp_values->load_value_map;
-      for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-        auto value = (lvn->*map_ptr).find(key);
-        if (value == (lvn->*map_ptr).end() || value->second.load_value_map.empty()) {
-          my_values->load_value_map.clear();
-          break;
-        }
-        InPlaceIntersectMaps(&my_values->load_value_map, value->second.load_value_map);
-        if (my_values->load_value_map.empty()) {
-          break;
-        }
-      }
-    }
-  } else {
-    // Bump version number for the merge.
-    my_values->memory_version_before_stores = my_values->last_load_memory_version =
-        Versions::LookupMergeBlockValue(gvn_, id_, key);
-
-    // Calculate the locations that have been either read from or written to in each incoming LVN.
-    bool first_lvn = true;
-    for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      auto value = (lvn->*map_ptr).find(key);
-      if (value == (lvn->*map_ptr).end()) {
-        my_values->load_value_map.clear();
-        break;
-      }
-      if (first_lvn) {
-        first_lvn = false;
-        // Copy the first LVN's locations. Values will be overwritten later.
-        my_values->load_value_map = value->second.load_value_map;
-        for (uint16_t location : value->second.store_loc_set) {
-          my_values->load_value_map.Put(location, 0u);
-        }
-      } else {
-        IntersectAliasingValueLocations(my_values, &value->second);
-      }
-    }
-    // Calculate merged values for the intersection.
-    for (auto& load_value_entry : my_values->load_value_map) {
-      uint16_t location = load_value_entry.first;
-      merge_names_.clear();
-      uint16_t value_name = kNoValue;
-      bool same_values = true;
-      for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-        value_name = Versions::LookupMergeValue(gvn_, lvn, key, location);
-        same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
-        merge_names_.push_back(value_name);
-      }
-      if (same_values) {
-        // value_name already contains the result.
-      } else {
-        auto lb = merge_map_.lower_bound(merge_names_);
-        if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
-          value_name = lb->second;
-        } else {
-          // NOTE: In addition to the key and id_ which don't change on an LVN recalculation
-          // during GVN, we also add location which can actually change on recalculation, so the
-          // value_name below may change. This could lead to an infinite loop if the location
-          // value name always changed when the refereced value name changes. However, given that
-          // we assign unique value names for other merges, such as Phis, such a dependency is
-          // not possible in a well-formed SSA graph.
-          value_name = Versions::LookupMergeLocationValue(gvn_, id_, key, location);
-          merge_map_.PutBefore(lb, merge_names_, value_name);
-          if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-            null_checked_.insert(value_name);
-          }
-        }
-      }
-      load_value_entry.second = value_name;
-    }
-  }
-}
-
-void LocalValueNumbering::Merge(MergeType merge_type) {
-  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
-
-  // Always reserve space in merge_names_. Even if we don't use it in Merge() we may need it
-  // in GetStartingVregValueNumberImpl() when the merge_names_'s allocator is not the top.
-  merge_names_.reserve(gvn_->merge_lvns_.size());
-
-  IntersectSregValueMaps<&LocalValueNumbering::sreg_value_map_>();
-  IntersectSregValueMaps<&LocalValueNumbering::sreg_wide_value_map_>();
-  if (merge_type == kReturnMerge) {
-    // RETURN or PHI+RETURN. We need only sreg value maps.
-    return;
-  }
-
-  MergeMemoryVersions(merge_type == kCatchMerge);
-
-  // Merge non-aliasing maps/sets.
-  IntersectSets<ValueNameSet, &LocalValueNumbering::non_aliasing_refs_>();
-  if (!non_aliasing_refs_.empty() && merge_type == kCatchMerge) {
-    PruneNonAliasingRefsForCatch();
-  }
-  if (!non_aliasing_refs_.empty()) {
-    MergeSets<IFieldLocToValueMap, &LocalValueNumbering::non_aliasing_ifield_value_map_,
-              &LocalValueNumbering::MergeNonAliasingIFieldValues>();
-    MergeSets<NonAliasingArrayValuesMap, &LocalValueNumbering::non_aliasing_array_value_map_,
-              &LocalValueNumbering::MergeAliasingValues<
-                  NonAliasingArrayValuesMap, &LocalValueNumbering::non_aliasing_array_value_map_,
-                  NonAliasingArrayVersions>>();
-  }
-
-  // We won't do anything complicated for range checks, just calculate the intersection.
-  IntersectSets<RangeCheckSet, &LocalValueNumbering::range_checked_>();
-
-  // Merge null_checked_. We may later insert more, such as merged object field values.
-  MergeNullChecked();
-
-  // Now merge the div_zero_checked_.
-  MergeDivZeroChecked();
-
-  if (merge_type == kCatchMerge) {
-    // Memory is clobbered. New memory version already created, don't merge aliasing locations.
-    return;
-  }
-
-  DCHECK(merge_type == kNormalMerge);
-
-  // Merge escaped refs and clobber sets.
-  MergeSets<ValueNameSet, &LocalValueNumbering::escaped_refs_,
-            &LocalValueNumbering::MergeEscapedRefs>();
-  if (!escaped_refs_.empty()) {
-    MergeSets<EscapedIFieldClobberSet, &LocalValueNumbering::escaped_ifield_clobber_set_,
-              &LocalValueNumbering::MergeEscapedIFieldTypeClobberSets>();
-    MergeSets<EscapedIFieldClobberSet, &LocalValueNumbering::escaped_ifield_clobber_set_,
-              &LocalValueNumbering::MergeEscapedIFieldClobberSets>();
-    MergeSets<EscapedArrayClobberSet, &LocalValueNumbering::escaped_array_clobber_set_,
-              &LocalValueNumbering::MergeEscapedArrayClobberSets>();
-  }
-
-  MergeSets<SFieldToValueMap, &LocalValueNumbering::sfield_value_map_,
-            &LocalValueNumbering::MergeSFieldValues>();
-  MergeSets<AliasingIFieldValuesMap, &LocalValueNumbering::aliasing_ifield_value_map_,
-            &LocalValueNumbering::MergeAliasingValues<
-                AliasingIFieldValuesMap, &LocalValueNumbering::aliasing_ifield_value_map_,
-                AliasingIFieldVersions>>();
-  MergeSets<AliasingArrayValuesMap, &LocalValueNumbering::aliasing_array_value_map_,
-            &LocalValueNumbering::MergeAliasingValues<
-                AliasingArrayValuesMap, &LocalValueNumbering::aliasing_array_value_map_,
-                AliasingArrayVersions>>();
-}
-
-void LocalValueNumbering::PrepareEntryBlock() {
-  uint32_t vreg = gvn_->GetMirGraph()->GetFirstInVR();
-  CompilationUnit* cu = gvn_->GetCompilationUnit();
-  const char* shorty = cu->shorty;
-  ++shorty;  // Skip return value.
-  if ((cu->access_flags & kAccStatic) == 0) {
-    // If non-static method, mark "this" as non-null
-    uint16_t value_name = GetOperandValue(vreg);
-    ++vreg;
-    null_checked_.insert(value_name);
-  }
-  for ( ; *shorty != 0; ++shorty, ++vreg) {
-    if (*shorty == 'J' || *shorty == 'D') {
-      uint16_t value_name = GetOperandValueWide(vreg);
-      SetOperandValueWide(vreg, value_name);
-      ++vreg;
-    }
-  }
-}
-
-uint16_t LocalValueNumbering::MarkNonAliasingNonNull(MIR* mir) {
-  uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
-  DCHECK(null_checked_.find(res) == null_checked_.end());
-  null_checked_.insert(res);
-  non_aliasing_refs_.insert(res);
-  return res;
-}
-
-bool LocalValueNumbering::IsNonAliasing(uint16_t reg) const {
-  return non_aliasing_refs_.find(reg) != non_aliasing_refs_.end();
-}
-
-bool LocalValueNumbering::IsNonAliasingIField(uint16_t reg, uint16_t field_id,
-                                              uint16_t type) const {
-  if (IsNonAliasing(reg)) {
-    return true;
-  }
-  if (escaped_refs_.find(reg) == escaped_refs_.end()) {
-    return false;
-  }
-  // Check for IPUTs to unresolved fields.
-  EscapedIFieldClobberKey key1 = { reg, type, kNoValue };
-  if (escaped_ifield_clobber_set_.find(key1) != escaped_ifield_clobber_set_.end()) {
-    return false;
-  }
-  // Check for aliased IPUTs to the same field.
-  EscapedIFieldClobberKey key2 = { reg, type, field_id };
-  return escaped_ifield_clobber_set_.find(key2) == escaped_ifield_clobber_set_.end();
-}
-
-bool LocalValueNumbering::IsNonAliasingArray(uint16_t reg, uint16_t type) const {
-  if (IsNonAliasing(reg)) {
-    return true;
-  }
-  if (escaped_refs_.count(reg) == 0u) {
-    return false;
-  }
-  // Check for aliased APUTs.
-  EscapedArrayClobberKey key = { reg, type };
-  return escaped_array_clobber_set_.find(key) == escaped_array_clobber_set_.end();
-}
-
-void LocalValueNumbering::HandleNullCheck(MIR* mir, uint16_t reg) {
-  auto lb = null_checked_.lower_bound(reg);
-  if (lb != null_checked_.end() && *lb == reg) {
-    if (LIKELY(gvn_->CanModify())) {
-      if (gvn_->GetCompilationUnit()->verbose) {
-        LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
-      }
-      mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
-    }
-  } else {
-    null_checked_.insert(lb, reg);
-  }
-}
-
-void LocalValueNumbering::HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index) {
-  RangeCheckKey key = { array, index };
-  auto lb = range_checked_.lower_bound(key);
-  if (lb != range_checked_.end() && !RangeCheckKeyComparator()(key, *lb)) {
-    if (LIKELY(gvn_->CanModify())) {
-      if (gvn_->GetCompilationUnit()->verbose) {
-        LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
-      }
-      mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
-    }
-  } else {
-    // Mark range check completed.
-    range_checked_.insert(lb, key);
-  }
-}
-
-void LocalValueNumbering::HandleDivZeroCheck(MIR* mir, uint16_t reg) {
-  auto lb = div_zero_checked_.lower_bound(reg);
-  if (lb != div_zero_checked_.end() && *lb == reg) {
-    if (LIKELY(gvn_->CanModify())) {
-      if (gvn_->GetCompilationUnit()->verbose) {
-        LOG(INFO) << "Removing div zero check for 0x" << std::hex << mir->offset;
-      }
-      mir->optimization_flags |= MIR_IGNORE_DIV_ZERO_CHECK;
-    }
-  } else {
-    div_zero_checked_.insert(lb, reg);
-  }
-}
-
-void LocalValueNumbering::HandlePutObject(MIR* mir) {
-  // If we're storing a non-aliasing reference, stop tracking it as non-aliasing now.
-  uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
-  HandleEscapingRef(base);
-  if (gvn_->CanModify() && null_checked_.count(base) != 0u) {
-    if (gvn_->GetCompilationUnit()->verbose) {
-      LOG(INFO) << "Removing GC card mark value null check for 0x" << std::hex << mir->offset;
-    }
-    mir->optimization_flags |= MIR_STORE_NON_NULL_VALUE;
-  }
-}
-
-void LocalValueNumbering::HandleEscapingRef(uint16_t base) {
-  auto it = non_aliasing_refs_.find(base);
-  if (it != non_aliasing_refs_.end()) {
-    non_aliasing_refs_.erase(it);
-    escaped_refs_.insert(base);
-  }
-}
-
-void LocalValueNumbering::HandleInvokeArgs(const MIR* mir, const LocalValueNumbering* mir_lvn) {
-  const int32_t* uses = mir->ssa_rep->uses;
-  const int32_t* uses_end = uses + mir->ssa_rep->num_uses;
-  while (uses != uses_end) {
-    uint16_t sreg = *uses;
-    ++uses;
-    // Avoid LookupValue() so that we don't store new values in the global value map.
-    auto local_it = mir_lvn->sreg_value_map_.find(sreg);
-    if (local_it != mir_lvn->sreg_value_map_.end()) {
-      non_aliasing_refs_.erase(local_it->second);
-    } else {
-      uint16_t value_name = gvn_->FindValue(kNoValue, sreg, kNoValue, kNoValue);
-      if (value_name != kNoValue) {
-        non_aliasing_refs_.erase(value_name);
-      }
-    }
-  }
-}
-
-uint16_t LocalValueNumbering::HandlePhi(MIR* mir) {
-  if (gvn_->merge_lvns_.empty()) {
-    // Running LVN without a full GVN?
-    return kNoValue;
-  }
-  // Determine if this Phi is merging wide regs.
-  RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
-  if (raw_dest.high_word) {
-    // This is the high part of a wide reg. Ignore the Phi.
-    return kNoValue;
-  }
-  bool wide = raw_dest.wide;
-  // Iterate over *merge_lvns_ and skip incoming sregs for BBs without associated LVN.
-  merge_names_.clear();
-  uint16_t value_name = kNoValue;
-  bool same_values = true;
-  BasicBlockId* incoming = mir->meta.phi_incoming;
-  int32_t* uses = mir->ssa_rep->uses;
-  int16_t pos = 0;
-  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-    DCHECK_LT(pos, mir->ssa_rep->num_uses);
-    while (incoming[pos] != lvn->Id()) {
-      ++pos;
-      DCHECK_LT(pos, mir->ssa_rep->num_uses);
-    }
-    int s_reg = uses[pos];
-    ++pos;
-    value_name = wide ? lvn->GetOperandValueWide(s_reg) : lvn->GetOperandValue(s_reg);
-
-    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
-    merge_names_.push_back(value_name);
-  }
-  if (same_values) {
-    // value_name already contains the result.
-  } else {
-    auto lb = merge_map_.lower_bound(merge_names_);
-    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
-      value_name = lb->second;
-    } else {
-      value_name = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
-      merge_map_.PutBefore(lb, merge_names_, value_name);
-      if (!wide && gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-        null_checked_.insert(value_name);
-      }
-      if (gvn_->DivZeroCheckedInAllPredecessors(merge_names_)) {
-        div_zero_checked_.insert(value_name);
-      }
-    }
-  }
-  if (wide) {
-    SetOperandValueWide(mir->ssa_rep->defs[0], value_name);
-  } else {
-    SetOperandValue(mir->ssa_rep->defs[0], value_name);
-  }
-  return value_name;
-}
-
-uint16_t LocalValueNumbering::HandleConst(MIR* mir, uint32_t value) {
-  RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
-  uint16_t res;
-  if (value == 0u && raw_dest.ref) {
-    res = GlobalValueNumbering::kNullValue;
-  } else {
-    Instruction::Code op = raw_dest.fp ? Instruction::CONST_HIGH16 : Instruction::CONST;
-    res = gvn_->LookupValue(op, Low16Bits(value), High16Bits(value), 0);
-  }
-  SetOperandValue(mir->ssa_rep->defs[0], res);
-  return res;
-}
-
-uint16_t LocalValueNumbering::HandleConstWide(MIR* mir, uint64_t value) {
-  RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
-  Instruction::Code op = raw_dest.fp ? Instruction::CONST_HIGH16 : Instruction::CONST;
-  uint32_t low_word = Low32Bits(value);
-  uint32_t high_word = High32Bits(value);
-  uint16_t low_res = gvn_->LookupValue(op, Low16Bits(low_word), High16Bits(low_word), 1);
-  uint16_t high_res = gvn_->LookupValue(op, Low16Bits(high_word), High16Bits(high_word), 2);
-  uint16_t res = gvn_->LookupValue(op, low_res, high_res, 3);
-  SetOperandValueWide(mir->ssa_rep->defs[0], res);
-  return res;
-}
-
-uint16_t LocalValueNumbering::HandleAGet(MIR* mir, uint16_t opcode) {
-  uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
-  HandleNullCheck(mir, array);
-  uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
-  HandleRangeCheck(mir, array, index);
-  uint16_t type = AGetMemAccessType(static_cast<Instruction::Code>(opcode));
-  // Establish value number for loaded register.
-  uint16_t res;
-  if (IsNonAliasingArray(array, type)) {
-    res = HandleAliasingValuesGet<NonAliasingArrayVersions>(&non_aliasing_array_value_map_,
-                                                            array, index);
-  } else {
-    uint16_t location = gvn_->GetArrayLocation(array, index);
-    res = HandleAliasingValuesGet<AliasingArrayVersions>(&aliasing_array_value_map_,
-                                                         type, location);
-  }
-  if (opcode == Instruction::AGET_WIDE) {
-    SetOperandValueWide(mir->ssa_rep->defs[0], res);
-  } else {
-    SetOperandValue(mir->ssa_rep->defs[0], res);
-  }
-  return res;
-}
-
-void LocalValueNumbering::HandleAPut(MIR* mir, uint16_t opcode) {
-  int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
-  int index_idx = array_idx + 1;
-  uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
-  HandleNullCheck(mir, array);
-  uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
-  HandleRangeCheck(mir, array, index);
-
-  uint16_t type = APutMemAccessType(static_cast<Instruction::Code>(opcode));
-  uint16_t value = (opcode == Instruction::APUT_WIDE)
-                   ? GetOperandValueWide(mir->ssa_rep->uses[0])
-                   : GetOperandValue(mir->ssa_rep->uses[0]);
-  if (IsNonAliasing(array)) {
-    bool put_is_live = HandleAliasingValuesPut<NonAliasingArrayVersions>(
-        &non_aliasing_array_value_map_, array, index, value);
-    if (!put_is_live) {
-      // This APUT can be eliminated, it stores the same value that's already in the field.
-      // TODO: Eliminate the APUT.
-      return;
-    }
-  } else {
-    uint16_t location = gvn_->GetArrayLocation(array, index);
-    bool put_is_live = HandleAliasingValuesPut<AliasingArrayVersions>(
-        &aliasing_array_value_map_, type, location, value);
-    if (!put_is_live) {
-      // This APUT can be eliminated, it stores the same value that's already in the field.
-      // TODO: Eliminate the APUT.
-      return;
-    }
-
-    // Clobber all escaped array refs for this type.
-    for (uint16_t escaped_array : escaped_refs_) {
-      EscapedArrayClobberKey clobber_key = { escaped_array, type };
-      escaped_array_clobber_set_.insert(clobber_key);
-    }
-  }
-}
-
-uint16_t LocalValueNumbering::HandleIGet(MIR* mir, uint16_t opcode) {
-  uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
-  HandleNullCheck(mir, base);
-  const MirFieldInfo& field_info = gvn_->GetMirGraph()->GetIFieldLoweringInfo(mir);
-  uint16_t res;
-  if (!field_info.IsResolved() || field_info.IsVolatile()) {
-    // Unresolved fields may be volatile, so handle them as such to be safe.
-    HandleInvokeOrClInitOrAcquireOp(mir);  // Volatile GETs have acquire semantics.
-    // Volatile fields always get a new memory version; field id is irrelevant.
-    // Use result s_reg - will be unique.
-    res = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
-  } else {
-    uint16_t type = IGetMemAccessType(static_cast<Instruction::Code>(opcode));
-    uint16_t field_id = gvn_->GetIFieldId(mir);
-    if (IsNonAliasingIField(base, field_id, type)) {
-      uint16_t loc = gvn_->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
-      auto lb = non_aliasing_ifield_value_map_.lower_bound(loc);
-      if (lb != non_aliasing_ifield_value_map_.end() && lb->first == loc) {
-        res = lb->second;
-      } else {
-        res = gvn_->LookupValue(kNonAliasingIFieldInitialOp, loc, kNoValue, kNoValue);
-        non_aliasing_ifield_value_map_.PutBefore(lb, loc, res);
-      }
-    } else {
-      res = HandleAliasingValuesGet<AliasingIFieldVersions>(&aliasing_ifield_value_map_,
-                                                            field_id, base);
-    }
-  }
-  if (opcode == Instruction::IGET_WIDE) {
-    SetOperandValueWide(mir->ssa_rep->defs[0], res);
-  } else {
-    SetOperandValue(mir->ssa_rep->defs[0], res);
-  }
-  return res;
-}
-
-void LocalValueNumbering::HandleIPut(MIR* mir, uint16_t opcode) {
-  int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
-  uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
-  HandleNullCheck(mir, base);
-  uint16_t type = IPutMemAccessType(static_cast<Instruction::Code>(opcode));
-  const MirFieldInfo& field_info = gvn_->GetMirGraph()->GetIFieldLoweringInfo(mir);
-  if (!field_info.IsResolved()) {
-    // Unresolved fields always alias with everything of the same type.
-    // Use mir->offset as modifier; without elaborate inlining, it will be unique.
-    unresolved_ifield_version_[type] =
-        gvn_->LookupValue(kUnresolvedIFieldOp, kNoValue, kNoValue, mir->offset);
-
-    // For simplicity, treat base as escaped now.
-    HandleEscapingRef(base);
-
-    // Clobber all fields of escaped references of the same type.
-    for (uint16_t escaped_ref : escaped_refs_) {
-      EscapedIFieldClobberKey clobber_key = { escaped_ref, type, kNoValue };
-      escaped_ifield_clobber_set_.insert(clobber_key);
-    }
-
-    // Aliasing fields of the same type may have been overwritten.
-    auto it = aliasing_ifield_value_map_.begin(), end = aliasing_ifield_value_map_.end();
-    while (it != end) {
-      if (gvn_->GetIFieldType(it->first) != type) {
-        ++it;
-      } else {
-        it = aliasing_ifield_value_map_.erase(it);
-      }
-    }
-  } else if (field_info.IsVolatile()) {
-    // Nothing to do, resolved volatile fields always get a new memory version anyway and
-    // can't alias with resolved non-volatile fields.
-  } else {
-    uint16_t field_id = gvn_->GetIFieldId(mir);
-    uint16_t value = (opcode == Instruction::IPUT_WIDE)
-                     ? GetOperandValueWide(mir->ssa_rep->uses[0])
-                     : GetOperandValue(mir->ssa_rep->uses[0]);
-    if (IsNonAliasing(base)) {
-      uint16_t loc = gvn_->LookupValue(kNonAliasingIFieldLocOp, base, field_id, type);
-      auto lb = non_aliasing_ifield_value_map_.lower_bound(loc);
-      if (lb != non_aliasing_ifield_value_map_.end() && lb->first == loc) {
-        if (lb->second == value) {
-          // This IPUT can be eliminated, it stores the same value that's already in the field.
-          // TODO: Eliminate the IPUT.
-          return;
-        }
-        lb->second = value;  // Overwrite.
-      } else {
-        non_aliasing_ifield_value_map_.PutBefore(lb, loc, value);
-      }
-    } else {
-      bool put_is_live = HandleAliasingValuesPut<AliasingIFieldVersions>(
-          &aliasing_ifield_value_map_, field_id, base, value);
-      if (!put_is_live) {
-        // This IPUT can be eliminated, it stores the same value that's already in the field.
-        // TODO: Eliminate the IPUT.
-        return;
-      }
-
-      // Clobber all fields of escaped references for this field.
-      for (uint16_t escaped_ref : escaped_refs_) {
-        EscapedIFieldClobberKey clobber_key = { escaped_ref, type, field_id };
-        escaped_ifield_clobber_set_.insert(clobber_key);
-      }
-    }
-  }
-}
-
-uint16_t LocalValueNumbering::HandleSGet(MIR* mir, uint16_t opcode) {
-  const MirSFieldLoweringInfo& field_info = gvn_->GetMirGraph()->GetSFieldLoweringInfo(mir);
-  if (!field_info.IsResolved() || field_info.IsVolatile() ||
-      (!field_info.IsClassInitialized() &&
-       (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0)) {
-    // Volatile SGETs (and unresolved fields are potentially volatile) have acquire semantics
-    // and class initialization can call arbitrary functions, we need to wipe aliasing values.
-    HandleInvokeOrClInitOrAcquireOp(mir);
-  }
-  uint16_t res;
-  if (!field_info.IsResolved() || field_info.IsVolatile()) {
-    // Unresolved fields may be volatile, so handle them as such to be safe.
-    // Volatile fields always get a new memory version; field id is irrelevant.
-    // Use result s_reg - will be unique.
-    res = gvn_->LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
-  } else {
-    uint16_t type = SGetMemAccessType(static_cast<Instruction::Code>(opcode));
-    uint16_t field_id = gvn_->GetSFieldId(mir);
-    auto lb = sfield_value_map_.lower_bound(field_id);
-    if (lb != sfield_value_map_.end() && lb->first == field_id) {
-      res = lb->second;
-    } else {
-      // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
-      // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
-      // to determine the version of the field.
-      res = gvn_->LookupValue(kResolvedSFieldOp, field_id,
-                              unresolved_sfield_version_[type], global_memory_version_);
-      sfield_value_map_.PutBefore(lb, field_id, res);
-    }
-  }
-  if (opcode == Instruction::SGET_WIDE) {
-    SetOperandValueWide(mir->ssa_rep->defs[0], res);
-  } else {
-    SetOperandValue(mir->ssa_rep->defs[0], res);
-  }
-  return res;
-}
-
-void LocalValueNumbering::HandleSPut(MIR* mir, uint16_t opcode) {
-  const MirSFieldLoweringInfo& field_info = gvn_->GetMirGraph()->GetSFieldLoweringInfo(mir);
-  if (!field_info.IsClassInitialized() &&
-      (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
-    // Class initialization can call arbitrary functions, we need to wipe aliasing values.
-    HandleInvokeOrClInitOrAcquireOp(mir);
-  }
-  uint16_t type = SPutMemAccessType(static_cast<Instruction::Code>(opcode));
-  if (!field_info.IsResolved()) {
-    // Unresolved fields always alias with everything of the same type.
-    // Use mir->offset as modifier; without elaborate inlining, it will be unique.
-    unresolved_sfield_version_[type] =
-        gvn_->LookupValue(kUnresolvedSFieldOp, kNoValue, kNoValue, mir->offset);
-    RemoveSFieldsForType(type);
-  } else if (field_info.IsVolatile()) {
-    // Nothing to do, resolved volatile fields always get a new memory version anyway and
-    // can't alias with resolved non-volatile fields.
-  } else {
-    uint16_t field_id = gvn_->GetSFieldId(mir);
-    uint16_t value = (opcode == Instruction::SPUT_WIDE)
-                     ? GetOperandValueWide(mir->ssa_rep->uses[0])
-                     : GetOperandValue(mir->ssa_rep->uses[0]);
-    // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
-    // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
-    // to determine the version of the field.
-    auto lb = sfield_value_map_.lower_bound(field_id);
-    if (lb != sfield_value_map_.end() && lb->first == field_id) {
-      if (lb->second == value) {
-        // This SPUT can be eliminated, it stores the same value that's already in the field.
-        // TODO: Eliminate the SPUT.
-        return;
-      }
-      lb->second = value;  // Overwrite.
-    } else {
-      sfield_value_map_.PutBefore(lb, field_id, value);
-    }
-  }
-}
-
-void LocalValueNumbering::RemoveSFieldsForType(uint16_t type) {
-  // Erase all static fields of this type from the sfield_value_map_.
-  for (auto it = sfield_value_map_.begin(), end = sfield_value_map_.end(); it != end; ) {
-    if (gvn_->GetSFieldType(it->first) == type) {
-      it = sfield_value_map_.erase(it);
-    } else {
-      ++it;
-    }
-  }
-}
-
-void LocalValueNumbering::HandleInvokeOrClInitOrAcquireOp(MIR* mir) {
-  // Use mir->offset as modifier; without elaborate inlining, it will be unique.
-  global_memory_version_ =
-      gvn_->LookupValue(kInvokeMemoryVersionBumpOp, 0u, 0u, mir->offset);
-  // All static fields and instance fields and array elements of aliasing references,
-  // including escaped references, may have been modified.
-  sfield_value_map_.clear();
-  aliasing_ifield_value_map_.clear();
-  aliasing_array_value_map_.clear();
-  escaped_refs_.clear();
-  escaped_ifield_clobber_set_.clear();
-  escaped_array_clobber_set_.clear();
-}
-
-uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
-  uint16_t res = kNoValue;
-  uint16_t opcode = mir->dalvikInsn.opcode;
-  switch (opcode) {
-    case Instruction::NOP:
-    case Instruction::RETURN_VOID:
-    case Instruction::RETURN:
-    case Instruction::RETURN_OBJECT:
-    case Instruction::RETURN_WIDE:
-    case Instruction::GOTO:
-    case Instruction::GOTO_16:
-    case Instruction::GOTO_32:
-    case Instruction::THROW:
-    case Instruction::FILL_ARRAY_DATA:
-    case Instruction::PACKED_SWITCH:
-    case Instruction::SPARSE_SWITCH:
-    case Instruction::IF_EQ:
-    case Instruction::IF_NE:
-    case Instruction::IF_LT:
-    case Instruction::IF_GE:
-    case Instruction::IF_GT:
-    case Instruction::IF_LE:
-    case Instruction::IF_EQZ:
-    case Instruction::IF_NEZ:
-    case Instruction::IF_LTZ:
-    case Instruction::IF_GEZ:
-    case Instruction::IF_GTZ:
-    case Instruction::IF_LEZ:
-    case kMirOpFusedCmplFloat:
-    case kMirOpFusedCmpgFloat:
-    case kMirOpFusedCmplDouble:
-    case kMirOpFusedCmpgDouble:
-    case kMirOpFusedCmpLong:
-      // Nothing defined - take no action.
-      break;
-
-    case Instruction::MONITOR_ENTER:
-      HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
-      HandleInvokeOrClInitOrAcquireOp(mir);  // Acquire operation.
-      break;
-
-    case Instruction::MONITOR_EXIT:
-      HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
-      // If we're running GVN and CanModify(), uneliminated null check indicates bytecode error.
-      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 &&
-          gvn_->work_lvn_ != nullptr && gvn_->CanModify()) {
-        LOG(WARNING) << "Bytecode error: MONITOR_EXIT is still null checked at 0x" << std::hex
-            << mir->offset << " in " << PrettyMethod(gvn_->cu_->method_idx, *gvn_->cu_->dex_file);
-      }
-      break;
-
-    case Instruction::FILLED_NEW_ARRAY:
-    case Instruction::FILLED_NEW_ARRAY_RANGE:
-      // Nothing defined but the result will be unique and non-null.
-      if (mir->next != nullptr && mir->next->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
-        uint16_t array = MarkNonAliasingNonNull(mir->next);
-        // Do not SetOperandValue(), we'll do that when we process the MOVE_RESULT_OBJECT.
-        if (kLocalValueNumberingEnableFilledNewArrayTracking && mir->ssa_rep->num_uses != 0u) {
-          AliasingValues* values = GetAliasingValues(&non_aliasing_array_value_map_, array);
-          // Clear the value if we got a merged version in a loop.
-          *values = AliasingValues(this);
-          for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
-            DCHECK_EQ(High16Bits(i), 0u);
-            uint16_t index = gvn_->LookupValue(Instruction::CONST, i, 0u, 0);
-            uint16_t value = GetOperandValue(mir->ssa_rep->uses[i]);
-            values->load_value_map.Put(index, value);
-            RangeCheckKey key = { array, index };
-            range_checked_.insert(key);
-          }
-        }
-        // The MOVE_RESULT_OBJECT will be processed next and we'll return the value name then.
-      }
-      // All args escaped (if references).
-      for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
-        uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
-        HandleEscapingRef(reg);
-      }
-      break;
-
-    case kMirOpNullCheck:
-      HandleNullCheck(mir, GetOperandValue(mir->ssa_rep->uses[0]));
-      break;
-
-    case Instruction::INVOKE_DIRECT:
-    case Instruction::INVOKE_DIRECT_RANGE:
-    case Instruction::INVOKE_VIRTUAL:
-    case Instruction::INVOKE_VIRTUAL_RANGE:
-    case Instruction::INVOKE_SUPER:
-    case Instruction::INVOKE_SUPER_RANGE:
-    case Instruction::INVOKE_INTERFACE:
-    case Instruction::INVOKE_INTERFACE_RANGE: {
-        // Nothing defined but handle the null check.
-        uint16_t reg = GetOperandValue(mir->ssa_rep->uses[0]);
-        HandleNullCheck(mir, reg);
-      }
-      FALLTHROUGH_INTENDED;
-    case Instruction::INVOKE_STATIC:
-    case Instruction::INVOKE_STATIC_RANGE:
-      // Make ref args aliasing.
-      HandleInvokeArgs(mir, this);
-      HandleInvokeOrClInitOrAcquireOp(mir);
-      break;
-
-    case Instruction::INSTANCE_OF: {
-        uint16_t operand = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t type = mir->dalvikInsn.vC;
-        res = gvn_->LookupValue(Instruction::INSTANCE_OF, operand, type, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-    case Instruction::CHECK_CAST:
-      if (gvn_->CanModify()) {
-        // Check if there was an instance-of operation on the same value and if we are
-        // in a block where its result is true. If so, we can eliminate the check-cast.
-        uint16_t operand = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t type = mir->dalvikInsn.vB;
-        uint16_t cond = gvn_->FindValue(Instruction::INSTANCE_OF, operand, type, kNoValue);
-        if (cond != kNoValue && gvn_->IsTrueInBlock(cond, Id())) {
-          if (gvn_->GetCompilationUnit()->verbose) {
-            LOG(INFO) << "Removing check-cast at 0x" << std::hex << mir->offset;
-          }
-          // Don't use kMirOpNop. Keep the check-cast as it defines the type of the register.
-          mir->optimization_flags |= MIR_IGNORE_CHECK_CAST;
-        }
-      }
-      break;
-
-    case Instruction::MOVE_RESULT:
-    case Instruction::MOVE_RESULT_OBJECT:
-      // 1 result, treat as unique each time, use result s_reg - will be unique.
-      res = GetOperandValue(mir->ssa_rep->defs[0]);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      break;
-    case Instruction::MOVE_EXCEPTION:
-    case Instruction::NEW_INSTANCE:
-    case Instruction::NEW_ARRAY:
-      // 1 result, treat as unique each time, use result s_reg - will be unique.
-      res = MarkNonAliasingNonNull(mir);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      break;
-    case Instruction::CONST_CLASS:
-      DCHECK_EQ(Low16Bits(mir->dalvikInsn.vB), mir->dalvikInsn.vB);
-      res = gvn_->LookupValue(Instruction::CONST_CLASS, mir->dalvikInsn.vB, 0, 0);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      null_checked_.insert(res);
-      non_aliasing_refs_.insert(res);
-      break;
-    case Instruction::CONST_STRING:
-    case Instruction::CONST_STRING_JUMBO:
-      // These strings are internalized, so assign value based on the string pool index.
-      res = gvn_->LookupValue(Instruction::CONST_STRING, Low16Bits(mir->dalvikInsn.vB),
-                              High16Bits(mir->dalvikInsn.vB), 0);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      null_checked_.insert(res);  // May already be there.
-      // NOTE: Hacking the contents of an internalized string via reflection is possible
-      // but the behavior is undefined. Therefore, we consider the string constant and
-      // the reference non-aliasing.
-      // TUNING: We could keep this property even if the reference "escapes".
-      non_aliasing_refs_.insert(res);  // May already be there.
-      break;
-    case Instruction::MOVE_RESULT_WIDE:
-      // 1 wide result, treat as unique each time, use result s_reg - will be unique.
-      res = GetOperandValueWide(mir->ssa_rep->defs[0]);
-      SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      break;
-
-    case kMirOpPhi:
-      res = HandlePhi(mir);
-      break;
-
-    case Instruction::MOVE:
-    case Instruction::MOVE_OBJECT:
-    case Instruction::MOVE_16:
-    case Instruction::MOVE_OBJECT_16:
-    case Instruction::MOVE_FROM16:
-    case Instruction::MOVE_OBJECT_FROM16:
-    case kMirOpCopy:
-      // Just copy value number of source to value number of result.
-      res = GetOperandValue(mir->ssa_rep->uses[0]);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      break;
-
-    case Instruction::MOVE_WIDE:
-    case Instruction::MOVE_WIDE_16:
-    case Instruction::MOVE_WIDE_FROM16:
-      // Just copy value number of source to value number of result.
-      res = GetOperandValueWide(mir->ssa_rep->uses[0]);
-      SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      break;
-
-    case Instruction::CONST_HIGH16:
-      res = HandleConst(mir, mir->dalvikInsn.vB << 16);
-      break;
-    case Instruction::CONST:
-    case Instruction::CONST_4:
-    case Instruction::CONST_16:
-      res = HandleConst(mir, mir->dalvikInsn.vB);
-      break;
-
-    case Instruction::CONST_WIDE_16:
-    case Instruction::CONST_WIDE_32:
-      res = HandleConstWide(
-          mir,
-          mir->dalvikInsn.vB +
-              ((mir->dalvikInsn.vB & 0x80000000) != 0 ? UINT64_C(0xffffffff00000000) : 0u));
-      break;
-
-    case Instruction::CONST_WIDE:
-      res = HandleConstWide(mir, mir->dalvikInsn.vB_wide);
-      break;
-
-    case Instruction::CONST_WIDE_HIGH16:
-      res = HandleConstWide(mir, static_cast<uint64_t>(mir->dalvikInsn.vB) << 48);
-      break;
-
-    case Instruction::ARRAY_LENGTH: {
-        // Handle the null check.
-        uint16_t reg = GetOperandValue(mir->ssa_rep->uses[0]);
-        HandleNullCheck(mir, reg);
-      }
-      FALLTHROUGH_INTENDED;
-    case Instruction::NEG_INT:
-    case Instruction::NOT_INT:
-    case Instruction::NEG_FLOAT:
-    case Instruction::INT_TO_BYTE:
-    case Instruction::INT_TO_SHORT:
-    case Instruction::INT_TO_CHAR:
-    case Instruction::INT_TO_FLOAT:
-    case Instruction::FLOAT_TO_INT: {
-        // res = op + 1 operand
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::LONG_TO_FLOAT:
-    case Instruction::LONG_TO_INT:
-    case Instruction::DOUBLE_TO_FLOAT:
-    case Instruction::DOUBLE_TO_INT: {
-        // res = op + 1 wide operand
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::DOUBLE_TO_LONG:
-    case Instruction::LONG_TO_DOUBLE:
-    case Instruction::NEG_LONG:
-    case Instruction::NOT_LONG:
-    case Instruction::NEG_DOUBLE: {
-        // wide res = op + 1 wide operand
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::FLOAT_TO_DOUBLE:
-    case Instruction::FLOAT_TO_LONG:
-    case Instruction::INT_TO_DOUBLE:
-    case Instruction::INT_TO_LONG: {
-        // wide res = op + 1 operand
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        res = gvn_->LookupValue(opcode, operand1, kNoValue, kNoValue);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::CMPL_DOUBLE:
-    case Instruction::CMPG_DOUBLE:
-    case Instruction::CMP_LONG: {
-        // res = op + 2 wide operands
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::DIV_INT:
-    case Instruction::DIV_INT_2ADDR:
-    case Instruction::REM_INT:
-    case Instruction::REM_INT_2ADDR:
-      HandleDivZeroCheck(mir, GetOperandValue(mir->ssa_rep->uses[1]));
-      FALLTHROUGH_INTENDED;
-
-    case Instruction::CMPG_FLOAT:
-    case Instruction::CMPL_FLOAT:
-    case Instruction::ADD_INT:
-    case Instruction::ADD_INT_2ADDR:
-    case Instruction::MUL_INT:
-    case Instruction::MUL_INT_2ADDR:
-    case Instruction::AND_INT:
-    case Instruction::AND_INT_2ADDR:
-    case Instruction::OR_INT:
-    case Instruction::OR_INT_2ADDR:
-    case Instruction::XOR_INT:
-    case Instruction::XOR_INT_2ADDR:
-    case Instruction::SUB_INT:
-    case Instruction::SUB_INT_2ADDR:
-    case Instruction::SHL_INT:
-    case Instruction::SHL_INT_2ADDR:
-    case Instruction::SHR_INT:
-    case Instruction::SHR_INT_2ADDR:
-    case Instruction::USHR_INT:
-    case Instruction::USHR_INT_2ADDR: {
-        // res = op + 2 operands
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::DIV_LONG:
-    case Instruction::REM_LONG:
-    case Instruction::DIV_LONG_2ADDR:
-    case Instruction::REM_LONG_2ADDR:
-      HandleDivZeroCheck(mir, GetOperandValueWide(mir->ssa_rep->uses[2]));
-      FALLTHROUGH_INTENDED;
-
-    case Instruction::ADD_LONG:
-    case Instruction::SUB_LONG:
-    case Instruction::MUL_LONG:
-    case Instruction::AND_LONG:
-    case Instruction::OR_LONG:
-    case Instruction::XOR_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-    case Instruction::SUB_LONG_2ADDR:
-    case Instruction::MUL_LONG_2ADDR:
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::OR_LONG_2ADDR:
-    case Instruction::XOR_LONG_2ADDR:
-    case Instruction::ADD_DOUBLE:
-    case Instruction::SUB_DOUBLE:
-    case Instruction::MUL_DOUBLE:
-    case Instruction::DIV_DOUBLE:
-    case Instruction::REM_DOUBLE:
-    case Instruction::ADD_DOUBLE_2ADDR:
-    case Instruction::SUB_DOUBLE_2ADDR:
-    case Instruction::MUL_DOUBLE_2ADDR:
-    case Instruction::DIV_DOUBLE_2ADDR:
-    case Instruction::REM_DOUBLE_2ADDR: {
-        // wide res = op + 2 wide operands
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::SHL_LONG:
-    case Instruction::SHR_LONG:
-    case Instruction::USHR_LONG:
-    case Instruction::SHL_LONG_2ADDR:
-    case Instruction::SHR_LONG_2ADDR:
-    case Instruction::USHR_LONG_2ADDR: {
-        // wide res = op + 1 wide operand + 1 operand
-        uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[2]);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::ADD_FLOAT:
-    case Instruction::SUB_FLOAT:
-    case Instruction::MUL_FLOAT:
-    case Instruction::DIV_FLOAT:
-    case Instruction::REM_FLOAT:
-    case Instruction::ADD_FLOAT_2ADDR:
-    case Instruction::SUB_FLOAT_2ADDR:
-    case Instruction::MUL_FLOAT_2ADDR:
-    case Instruction::DIV_FLOAT_2ADDR:
-    case Instruction::REM_FLOAT_2ADDR: {
-        // res = op + 2 operands
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::RSUB_INT:
-    case Instruction::ADD_INT_LIT16:
-    case Instruction::MUL_INT_LIT16:
-    case Instruction::DIV_INT_LIT16:
-    case Instruction::REM_INT_LIT16:
-    case Instruction::AND_INT_LIT16:
-    case Instruction::OR_INT_LIT16:
-    case Instruction::XOR_INT_LIT16:
-    case Instruction::ADD_INT_LIT8:
-    case Instruction::RSUB_INT_LIT8:
-    case Instruction::MUL_INT_LIT8:
-    case Instruction::DIV_INT_LIT8:
-    case Instruction::REM_INT_LIT8:
-    case Instruction::AND_INT_LIT8:
-    case Instruction::OR_INT_LIT8:
-    case Instruction::XOR_INT_LIT8:
-    case Instruction::SHL_INT_LIT8:
-    case Instruction::SHR_INT_LIT8:
-    case Instruction::USHR_INT_LIT8: {
-        // Same as res = op + 2 operands, except use vC as operand 2
-        uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
-        uint16_t operand2 = gvn_->LookupValue(Instruction::CONST, mir->dalvikInsn.vC, 0, 0);
-        res = gvn_->LookupValue(opcode, operand1, operand2, kNoValue);
-        SetOperandValue(mir->ssa_rep->defs[0], res);
-      }
-      break;
-
-    case Instruction::AGET_OBJECT:
-    case Instruction::AGET:
-    case Instruction::AGET_WIDE:
-    case Instruction::AGET_BOOLEAN:
-    case Instruction::AGET_BYTE:
-    case Instruction::AGET_CHAR:
-    case Instruction::AGET_SHORT:
-      res = HandleAGet(mir, opcode);
-      break;
-
-    case Instruction::APUT_OBJECT:
-      HandlePutObject(mir);
-      FALLTHROUGH_INTENDED;
-    case Instruction::APUT:
-    case Instruction::APUT_WIDE:
-    case Instruction::APUT_BYTE:
-    case Instruction::APUT_BOOLEAN:
-    case Instruction::APUT_SHORT:
-    case Instruction::APUT_CHAR:
-      HandleAPut(mir, opcode);
-      break;
-
-    case Instruction::IGET_OBJECT:
-    case Instruction::IGET:
-    case Instruction::IGET_WIDE:
-    case Instruction::IGET_BOOLEAN:
-    case Instruction::IGET_BYTE:
-    case Instruction::IGET_CHAR:
-    case Instruction::IGET_SHORT:
-      res = HandleIGet(mir, opcode);
-      break;
-
-    case Instruction::IPUT_OBJECT:
-      HandlePutObject(mir);
-      FALLTHROUGH_INTENDED;
-    case Instruction::IPUT:
-    case Instruction::IPUT_WIDE:
-    case Instruction::IPUT_BOOLEAN:
-    case Instruction::IPUT_BYTE:
-    case Instruction::IPUT_CHAR:
-    case Instruction::IPUT_SHORT:
-      HandleIPut(mir, opcode);
-      break;
-
-    case Instruction::SGET_OBJECT:
-    case Instruction::SGET:
-    case Instruction::SGET_WIDE:
-    case Instruction::SGET_BOOLEAN:
-    case Instruction::SGET_BYTE:
-    case Instruction::SGET_CHAR:
-    case Instruction::SGET_SHORT:
-      res = HandleSGet(mir, opcode);
-      break;
-
-    case Instruction::SPUT_OBJECT:
-      HandlePutObject(mir);
-      FALLTHROUGH_INTENDED;
-    case Instruction::SPUT:
-    case Instruction::SPUT_WIDE:
-    case Instruction::SPUT_BOOLEAN:
-    case Instruction::SPUT_BYTE:
-    case Instruction::SPUT_CHAR:
-    case Instruction::SPUT_SHORT:
-      HandleSPut(mir, opcode);
-      break;
-  }
-  return res;
-}
-
-uint16_t LocalValueNumbering::GetEndingVregValueNumberImpl(int v_reg, bool wide) const {
-  const BasicBlock* bb = gvn_->GetBasicBlock(Id());
-  DCHECK(bb != nullptr);
-  int s_reg = bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
-  if (s_reg == INVALID_SREG) {
-    return kNoValue;
-  }
-  if (gvn_->GetMirGraph()->GetRegLocation(s_reg).wide != wide) {
-    return kNoValue;
-  }
-  if (wide) {
-    int high_s_reg = bb->data_flow_info->vreg_to_ssa_map_exit[v_reg + 1];
-    if (high_s_reg != s_reg + 1) {
-      return kNoValue;  // High word has been overwritten.
-    }
-    return GetSregValueWide(s_reg);
-  } else {
-    return GetSregValue(s_reg);
-  }
-}
-
-uint16_t LocalValueNumbering::GetStartingVregValueNumberImpl(int v_reg, bool wide) const {
-  DCHECK_EQ(gvn_->mode_, GlobalValueNumbering::kModeGvnPostProcessing);
-  DCHECK(gvn_->CanModify());
-  const BasicBlock* bb = gvn_->GetBasicBlock(Id());
-  DCHECK(bb != nullptr);
-  DCHECK_NE(bb->predecessors.size(), 0u);
-  if (bb->predecessors.size() == 1u) {
-    return gvn_->GetLvn(bb->predecessors[0])->GetEndingVregValueNumberImpl(v_reg, wide);
-  }
-  merge_names_.clear();
-  uint16_t value_name = kNoValue;
-  bool same_values = true;
-  for (BasicBlockId pred_id : bb->predecessors) {
-    value_name = gvn_->GetLvn(pred_id)->GetEndingVregValueNumberImpl(v_reg, wide);
-    if (value_name == kNoValue) {
-      return kNoValue;
-    }
-    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
-    merge_names_.push_back(value_name);
-  }
-  if (same_values) {
-    // value_name already contains the result.
-  } else {
-    auto lb = merge_map_.lower_bound(merge_names_);
-    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
-      value_name = lb->second;
-    } else {
-      value_name = kNoValue;  // We never assigned a value name to this set of merged names.
-    }
-  }
-  return value_name;
-}
-
-}    // namespace art
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
deleted file mode 100644
index dff5e27..0000000
--- a/compiler/dex/local_value_numbering.h
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
-#define ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
-
-#include <memory>
-
-#include "base/arena_object.h"
-#include "base/logging.h"
-#include "dex_instruction_utils.h"
-#include "global_value_numbering.h"
-
-namespace art {
-
-class DexFile;
-
-// Enable/disable tracking values stored in the FILLED_NEW_ARRAY result.
-static constexpr bool kLocalValueNumberingEnableFilledNewArrayTracking = true;
-
-class LocalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
- private:
-  static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
-
- public:
-  LocalValueNumbering(GlobalValueNumbering* gvn, BasicBlockId id, ScopedArenaAllocator* allocator);
-
-  BasicBlockId Id() const {
-    return id_;
-  }
-
-  bool Equals(const LocalValueNumbering& other) const;
-
-  bool IsValueNullChecked(uint16_t value_name) const {
-    return null_checked_.find(value_name) != null_checked_.end();
-  }
-
-  bool IsValueDivZeroChecked(uint16_t value_name) const {
-    return div_zero_checked_.find(value_name) != div_zero_checked_.end();
-  }
-
-  uint16_t GetSregValue(uint16_t s_reg) const {
-    DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    return GetSregValueImpl(s_reg, &sreg_value_map_);
-  }
-
-  uint16_t GetSregValueWide(uint16_t s_reg) const {
-    DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    return GetSregValueImpl(s_reg, &sreg_wide_value_map_);
-  }
-
-  // Get the starting value number for a given dalvik register.
-  uint16_t GetStartingVregValueNumber(int v_reg) const {
-    return GetStartingVregValueNumberImpl(v_reg, false);
-  }
-
-  // Get the starting value number for a given wide dalvik register.
-  uint16_t GetStartingVregValueNumberWide(int v_reg) const {
-    return GetStartingVregValueNumberImpl(v_reg, true);
-  }
-
-  enum MergeType {
-    kNormalMerge,
-    kCatchMerge,
-    kReturnMerge,  // RETURN or PHI+RETURN. Merge only sreg maps.
-  };
-
-  void MergeOne(const LocalValueNumbering& other, MergeType merge_type);
-  void Merge(MergeType merge_type);  // Merge gvn_->merge_lvns_.
-  void PrepareEntryBlock();
-
-  uint16_t GetValueNumber(MIR* mir);
-
- private:
-  // A set of value names.
-  typedef GlobalValueNumbering::ValueNameSet ValueNameSet;
-
-  // Key is s_reg, value is value name.
-  typedef ScopedArenaSafeMap<uint16_t, uint16_t> SregValueMap;
-
-  uint16_t GetEndingVregValueNumberImpl(int v_reg, bool wide) const;
-  uint16_t GetStartingVregValueNumberImpl(int v_reg, bool wide) const;
-
-  uint16_t GetSregValueImpl(int s_reg, const SregValueMap* map) const {
-    uint16_t res = kNoValue;
-    auto lb = map->find(s_reg);
-    if (lb != map->end()) {
-      res = lb->second;
-    } else {
-      res = gvn_->FindValue(kNoValue, s_reg, kNoValue, kNoValue);
-    }
-    return res;
-  }
-
-  void SetOperandValueImpl(uint16_t s_reg, uint16_t value, SregValueMap* map) {
-    DCHECK_EQ(map->count(s_reg), 0u);
-    map->Put(s_reg, value);
-  }
-
-  uint16_t GetOperandValueImpl(int s_reg, const SregValueMap* map) const {
-    uint16_t res = kNoValue;
-    auto lb = map->find(s_reg);
-    if (lb != map->end()) {
-      res = lb->second;
-    } else {
-      // Using the original value; s_reg refers to an input reg.
-      res = gvn_->LookupValue(kNoValue, s_reg, kNoValue, kNoValue);
-    }
-    return res;
-  }
-
-  void SetOperandValue(uint16_t s_reg, uint16_t value) {
-    DCHECK_EQ(sreg_wide_value_map_.count(s_reg), 0u);
-    DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    SetOperandValueImpl(s_reg, value, &sreg_value_map_);
-  }
-
-  uint16_t GetOperandValue(int s_reg) const {
-    DCHECK_EQ(sreg_wide_value_map_.count(s_reg), 0u);
-    DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    return GetOperandValueImpl(s_reg, &sreg_value_map_);
-  }
-
-  void SetOperandValueWide(uint16_t s_reg, uint16_t value) {
-    DCHECK_EQ(sreg_value_map_.count(s_reg), 0u);
-    DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).high_word);
-    SetOperandValueImpl(s_reg, value, &sreg_wide_value_map_);
-  }
-
-  uint16_t GetOperandValueWide(int s_reg) const {
-    DCHECK_EQ(sreg_value_map_.count(s_reg), 0u);
-    DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
-    DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).high_word);
-    return GetOperandValueImpl(s_reg, &sreg_wide_value_map_);
-  }
-
-  struct RangeCheckKey {
-    uint16_t array;
-    uint16_t index;
-
-    // NOTE: Can't define this at namespace scope for a private struct.
-    bool operator==(const RangeCheckKey& other) const {
-      return array == other.array && index == other.index;
-    }
-  };
-
-  struct RangeCheckKeyComparator {
-    bool operator()(const RangeCheckKey& lhs, const RangeCheckKey& rhs) const {
-      if (lhs.array != rhs.array) {
-        return lhs.array < rhs.array;
-      }
-      return lhs.index < rhs.index;
-    }
-  };
-
-  typedef ScopedArenaSet<RangeCheckKey, RangeCheckKeyComparator> RangeCheckSet;
-
-  // Maps instance field "location" (derived from base, field_id and type) to value name.
-  typedef ScopedArenaSafeMap<uint16_t, uint16_t> IFieldLocToValueMap;
-
-  // Maps static field id to value name
-  typedef ScopedArenaSafeMap<uint16_t, uint16_t> SFieldToValueMap;
-
-  struct EscapedIFieldClobberKey {
-    uint16_t base;      // Or array.
-    uint16_t type;
-    uint16_t field_id;  // None (kNoValue) for arrays and unresolved instance field stores.
-
-    // NOTE: Can't define this at namespace scope for a private struct.
-    bool operator==(const EscapedIFieldClobberKey& other) const {
-      return base == other.base && type == other.type && field_id == other.field_id;
-    }
-  };
-
-  struct EscapedIFieldClobberKeyComparator {
-    bool operator()(const EscapedIFieldClobberKey& lhs, const EscapedIFieldClobberKey& rhs) const {
-      // Compare base first. This makes sequential iteration respect the order of base.
-      if (lhs.base != rhs.base) {
-        return lhs.base < rhs.base;
-      }
-      // Compare type second. This makes the type-clobber entries (field_id == kNoValue) last
-      // for given base and type and makes it easy to prune unnecessary entries when merging
-      // escaped_ifield_clobber_set_ from multiple LVNs.
-      if (lhs.type != rhs.type) {
-        return lhs.type < rhs.type;
-      }
-      return lhs.field_id < rhs.field_id;
-    }
-  };
-
-  typedef ScopedArenaSet<EscapedIFieldClobberKey, EscapedIFieldClobberKeyComparator>
-      EscapedIFieldClobberSet;
-
-  struct EscapedArrayClobberKey {
-    uint16_t base;
-    uint16_t type;
-
-    // NOTE: Can't define this at namespace scope for a private struct.
-    bool operator==(const EscapedArrayClobberKey& other) const {
-      return base == other.base && type == other.type;
-    }
-  };
-
-  struct EscapedArrayClobberKeyComparator {
-    bool operator()(const EscapedArrayClobberKey& lhs, const EscapedArrayClobberKey& rhs) const {
-      // Compare base first. This makes sequential iteration respect the order of base.
-      if (lhs.base != rhs.base) {
-        return lhs.base < rhs.base;
-      }
-      return lhs.type < rhs.type;
-    }
-  };
-
-  // Clobber set for previously non-aliasing array refs that escaped.
-  typedef ScopedArenaSet<EscapedArrayClobberKey, EscapedArrayClobberKeyComparator>
-      EscapedArrayClobberSet;
-
-  // Known location values for an aliasing set. The set can be tied to one of:
-  //   1. Instance field. The locations are aliasing references used to access the field.
-  //   2. Non-aliasing array reference. The locations are indexes to the array.
-  //   3. Aliasing array type. The locations are (reference, index) pair ids assigned by GVN.
-  // In each case we keep track of the last stored value, if any, and the set of locations
-  // where it was stored. We also keep track of all values known for the current write state
-  // (load_value_map), which can be known either because they have been loaded since the last
-  // store or because they contained the last_stored_value before the store and thus could not
-  // have changed as a result.
-  struct AliasingValues {
-    explicit AliasingValues(LocalValueNumbering* lvn)
-        : memory_version_before_stores(kNoValue),
-          last_stored_value(kNoValue),
-          store_loc_set(std::less<uint16_t>(), lvn->null_checked_.get_allocator()),
-          last_load_memory_version(kNoValue),
-          load_value_map(std::less<uint16_t>(), lvn->null_checked_.get_allocator()) {
-    }
-
-    uint16_t memory_version_before_stores;  // kNoValue if start version for the field.
-    uint16_t last_stored_value;             // Last stored value name, kNoValue if none.
-    ValueNameSet store_loc_set;             // Where was last_stored_value stored.
-
-    // Maps refs (other than stored_to) to currently known values for this field other. On write,
-    // anything that differs from the written value is removed as it may be overwritten.
-    uint16_t last_load_memory_version;    // kNoValue if not known.
-    ScopedArenaSafeMap<uint16_t, uint16_t> load_value_map;
-
-    // NOTE: Can't define this at namespace scope for a private struct.
-    bool operator==(const AliasingValues& other) const {
-      return memory_version_before_stores == other.memory_version_before_stores &&
-          last_load_memory_version == other.last_load_memory_version &&
-          last_stored_value == other.last_stored_value &&
-          store_loc_set == other.store_loc_set &&
-          load_value_map == other.load_value_map;
-    }
-  };
-
-  // Maps instance field id to AliasingValues, locations are object refs.
-  typedef ScopedArenaSafeMap<uint16_t, AliasingValues> AliasingIFieldValuesMap;
-
-  // Maps non-aliasing array reference to AliasingValues, locations are array indexes.
-  typedef ScopedArenaSafeMap<uint16_t, AliasingValues> NonAliasingArrayValuesMap;
-
-  // Maps aliasing array type to AliasingValues, locations are (array, index) pair ids.
-  typedef ScopedArenaSafeMap<uint16_t, AliasingValues> AliasingArrayValuesMap;
-
-  // Helper classes defining versions for updating and merging the AliasingValues maps above.
-  class AliasingIFieldVersions;
-  class NonAliasingArrayVersions;
-  class AliasingArrayVersions;
-
-  template <typename Map>
-  AliasingValues* GetAliasingValues(Map* map, const typename Map::key_type& key);
-
-  template <typename Versions, typename KeyType>
-  void UpdateAliasingValuesLoadVersion(const KeyType& key, AliasingValues* values);
-
-  template <typename Versions, typename Map>
-  static uint16_t AliasingValuesMergeGet(GlobalValueNumbering* gvn,
-                                         const LocalValueNumbering* lvn,
-                                         Map* map, const typename Map::key_type& key,
-                                         uint16_t location);
-
-  template <typename Versions, typename Map>
-  uint16_t HandleAliasingValuesGet(Map* map, const typename Map::key_type& key,
-                                   uint16_t location);
-
-  template <typename Versions, typename Map>
-  bool HandleAliasingValuesPut(Map* map, const typename Map::key_type& key,
-                               uint16_t location, uint16_t value);
-
-  template <typename K>
-  void CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
-                             const ScopedArenaSafeMap<K, AliasingValues>& src);
-
-  uint16_t MarkNonAliasingNonNull(MIR* mir);
-  bool IsNonAliasing(uint16_t reg) const;
-  bool IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type) const;
-  bool IsNonAliasingArray(uint16_t reg, uint16_t type) const;
-  void HandleNullCheck(MIR* mir, uint16_t reg);
-  void HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index);
-  void HandleDivZeroCheck(MIR* mir, uint16_t reg);
-  void HandlePutObject(MIR* mir);
-  void HandleEscapingRef(uint16_t base);
-  void HandleInvokeArgs(const MIR* mir, const LocalValueNumbering* mir_lvn);
-  uint16_t HandlePhi(MIR* mir);
-  uint16_t HandleConst(MIR* mir, uint32_t value);
-  uint16_t HandleConstWide(MIR* mir, uint64_t value);
-  uint16_t HandleAGet(MIR* mir, uint16_t opcode);
-  void HandleAPut(MIR* mir, uint16_t opcode);
-  uint16_t HandleIGet(MIR* mir, uint16_t opcode);
-  void HandleIPut(MIR* mir, uint16_t opcode);
-  uint16_t HandleSGet(MIR* mir, uint16_t opcode);
-  void HandleSPut(MIR* mir, uint16_t opcode);
-  void RemoveSFieldsForType(uint16_t type);
-  void HandleInvokeOrClInitOrAcquireOp(MIR* mir);
-
-  bool SameMemoryVersion(const LocalValueNumbering& other) const;
-
-  uint16_t NewMemoryVersion(uint16_t* new_version);
-  void MergeMemoryVersions(bool clobbered_catch);
-
-  void PruneNonAliasingRefsForCatch();
-
-  template <typename Set, Set LocalValueNumbering::* set_ptr>
-  void IntersectSets();
-
-  void CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src);
-
-  // Intersect SSA reg value maps as sets, ignore dead regs.
-  template <SregValueMap LocalValueNumbering::* map_ptr>
-  void IntersectSregValueMaps();
-
-  // Intersect maps as sets. The value type must be equality-comparable.
-  template <typename Map>
-  static void InPlaceIntersectMaps(Map* work_map, const Map& other_map);
-
-  template <typename Set, Set LocalValueNumbering::*set_ptr, void (LocalValueNumbering::*MergeFn)(
-      const typename Set::value_type& entry, typename Set::iterator hint)>
-  void MergeSets();
-
-  void IntersectAliasingValueLocations(AliasingValues* work_values, const AliasingValues* values);
-
-  void MergeEscapedRefs(const ValueNameSet::value_type& entry, ValueNameSet::iterator hint);
-  void MergeEscapedIFieldTypeClobberSets(const EscapedIFieldClobberSet::value_type& entry,
-                                         EscapedIFieldClobberSet::iterator hint);
-  void MergeEscapedIFieldClobberSets(const EscapedIFieldClobberSet::value_type& entry,
-                                     EscapedIFieldClobberSet::iterator hint);
-  void MergeEscapedArrayClobberSets(const EscapedArrayClobberSet::value_type& entry,
-                                    EscapedArrayClobberSet::iterator hint);
-  void MergeSFieldValues(const SFieldToValueMap::value_type& entry,
-                         SFieldToValueMap::iterator hint);
-  void MergeNonAliasingIFieldValues(const IFieldLocToValueMap::value_type& entry,
-                                    IFieldLocToValueMap::iterator hint);
-  void MergeNullChecked();
-  void MergeDivZeroChecked();
-
-  template <typename Map, Map LocalValueNumbering::*map_ptr, typename Versions>
-  void MergeAliasingValues(const typename Map::value_type& entry, typename Map::iterator hint);
-
-  GlobalValueNumbering* gvn_;
-
-  // We're using the block id as a 16-bit operand value for some lookups.
-  static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "BasicBlockId must be 16 bit");
-  BasicBlockId id_;
-
-  SregValueMap sreg_value_map_;
-  SregValueMap sreg_wide_value_map_;
-
-  SFieldToValueMap sfield_value_map_;
-  IFieldLocToValueMap non_aliasing_ifield_value_map_;
-  AliasingIFieldValuesMap aliasing_ifield_value_map_;
-  NonAliasingArrayValuesMap non_aliasing_array_value_map_;
-  AliasingArrayValuesMap aliasing_array_value_map_;
-
-  // Data for dealing with memory clobbering and store/load aliasing.
-  uint16_t global_memory_version_;
-  uint16_t unresolved_sfield_version_[kDexMemAccessTypeCount];
-  uint16_t unresolved_ifield_version_[kDexMemAccessTypeCount];
-  // Value names of references to objects that cannot be reached through a different value name.
-  ValueNameSet non_aliasing_refs_;
-  // Previously non-aliasing refs that escaped but can still be used for non-aliasing AGET/IGET.
-  ValueNameSet escaped_refs_;
-  // Blacklists for cases where escaped_refs_ can't be used.
-  EscapedIFieldClobberSet escaped_ifield_clobber_set_;
-  EscapedArrayClobberSet escaped_array_clobber_set_;
-
-  // Range check and null check elimination.
-  RangeCheckSet range_checked_;
-  ValueNameSet null_checked_;
-  ValueNameSet div_zero_checked_;
-
-  // Reuse one vector for all merges to avoid leaking too much memory on the ArenaStack.
-  mutable ScopedArenaVector<uint16_t> merge_names_;
-  // Map to identify when different locations merge the same values.
-  ScopedArenaSafeMap<ScopedArenaVector<uint16_t>, uint16_t> merge_map_;
-  // New memory version for merge, kNoValue if all memory versions matched.
-  uint16_t merge_new_memory_version_;
-
-  DISALLOW_COPY_AND_ASSIGN(LocalValueNumbering);
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
deleted file mode 100644
index f98969e..0000000
--- a/compiler/dex/local_value_numbering_test.cc
+++ /dev/null
@@ -1,920 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dex/mir_field_info.h"
-#include "global_value_numbering.h"
-#include "local_value_numbering.h"
-#include "gtest/gtest.h"
-
-namespace art {
-
-class LocalValueNumberingTest : public testing::Test {
- protected:
-  struct IFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct SFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_field_idx;
-    bool is_volatile;
-    DexMemAccessType type;
-  };
-
-  struct MIRDef {
-    static constexpr size_t kMaxSsaDefs = 2;
-    static constexpr size_t kMaxSsaUses = 4;
-
-    Instruction::Code opcode;
-    int64_t value;
-    uint32_t field_info;
-    size_t num_uses;
-    int32_t uses[kMaxSsaUses];
-    size_t num_defs;
-    int32_t defs[kMaxSsaDefs];
-  };
-
-#define DEF_CONST(opcode, reg, value) \
-    { opcode, value, 0u, 0, { }, 1, { reg } }
-#define DEF_CONST_WIDE(opcode, reg, value) \
-    { opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_CONST_STRING(opcode, reg, index) \
-    { opcode, index, 0u, 0, { }, 1, { reg } }
-#define DEF_IGET(opcode, reg, obj, field_info) \
-    { opcode, 0u, field_info, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(opcode, reg, obj, field_info) \
-    { opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(opcode, reg, obj, field_info) \
-    { opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(opcode, reg, obj, field_info) \
-    { opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(opcode, reg, field_info) \
-    { opcode, 0u, field_info, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(opcode, reg, field_info) \
-    { opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(opcode, reg, field_info) \
-    { opcode, 0u, field_info, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(opcode, reg, field_info) \
-    { opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
-#define DEF_AGET(opcode, reg, obj, idx) \
-    { opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
-#define DEF_AGET_WIDE(opcode, reg, obj, idx) \
-    { opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
-#define DEF_APUT(opcode, reg, obj, idx) \
-    { opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
-#define DEF_APUT_WIDE(opcode, reg, obj, idx) \
-    { opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
-#define DEF_INVOKE1(opcode, reg) \
-    { opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_UNIQUE_REF(opcode, reg) \
-    { opcode, 0u, 0u, 0, { }, 1, { reg } }  // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
-#define DEF_DIV_REM(opcode, result, dividend, divisor) \
-    { opcode, 0u, 0u, 2, { dividend, divisor }, 1, { result } }
-#define DEF_DIV_REM_WIDE(opcode, result, dividend, divisor) \
-    { opcode, 0u, 0u, 4, { dividend, dividend + 1, divisor, divisor + 1 }, 2, { result, result + 1 } }
-
-  void DoPrepareIFields(const IFieldDef* defs, size_t count) {
-    cu_.mir_graph->ifield_lowering_infos_.clear();
-    cu_.mir_graph->ifield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
-      }
-      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareIFields(const IFieldDef (&defs)[count]) {
-    DoPrepareIFields(defs, count);
-  }
-
-  void DoPrepareSFields(const SFieldDef* defs, size_t count) {
-    cu_.mir_graph->sfield_lowering_infos_.clear();
-    cu_.mir_graph->sfield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const SFieldDef* def = &defs[i];
-      MirSFieldLoweringInfo field_info(def->field_idx, def->type);
-      // Mark even unresolved fields as initialized.
-      field_info.flags_ |= MirSFieldLoweringInfo::kFlagClassIsInitialized;
-      // NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by LVN.
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        field_info.flags_ &= ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile);
-      }
-      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareSFields(const SFieldDef (&defs)[count]) {
-    DoPrepareSFields(defs, count);
-  }
-
-  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
-    mir_count_ = count;
-    mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
-    ssa_reps_.resize(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const MIRDef* def = &defs[i];
-      MIR* mir = &mirs_[i];
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
-      mir->dalvikInsn.vB_wide = def->value;
-      if (IsInstructionIGetOrIPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
-        mir->meta.ifield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_info].MemAccessType(),
-                  IGetOrIPutMemAccessType(def->opcode));
-      } else if (IsInstructionSGetOrSPut(def->opcode)) {
-        ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
-        mir->meta.sfield_lowering_info = def->field_info;
-        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
-                  SGetOrSPutMemAccessType(def->opcode));
-      }
-      mir->ssa_rep = &ssa_reps_[i];
-      mir->ssa_rep->num_uses = def->num_uses;
-      mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
-      mir->ssa_rep->num_defs = def->num_defs;
-      mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->offset = i;  // LVN uses offset only for debug output
-      mir->optimization_flags = 0u;
-
-      if (i != 0u) {
-        mirs_[i - 1u].next = mir;
-      }
-    }
-    mirs_[count - 1u].next = nullptr;
-  }
-
-  template <size_t count>
-  void PrepareMIRs(const MIRDef (&defs)[count]) {
-    DoPrepareMIRs(defs, count);
-  }
-
-  void MakeSFieldUninitialized(uint32_t sfield_index) {
-    CHECK_LT(sfield_index, cu_.mir_graph->sfield_lowering_infos_.size());
-    cu_.mir_graph->sfield_lowering_infos_[sfield_index].flags_ &=
-        ~MirSFieldLoweringInfo::kFlagClassIsInitialized;
-  }
-
-  template <size_t count>
-  void MarkAsWideSRegs(const int32_t (&sregs)[count]) {
-    for (int32_t sreg : sregs) {
-      cu_.mir_graph->reg_location_[sreg].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].wide = true;
-      cu_.mir_graph->reg_location_[sreg + 1].high_word = true;
-    }
-  }
-
-  void PerformLVN() {
-    cu_.mir_graph->temp_.gvn.ifield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
-    cu_.mir_graph->temp_.gvn.sfield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
-        allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
-    gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
-                                                           GlobalValueNumbering::kModeLvn));
-    lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u, allocator_.get()));
-    value_names_.resize(mir_count_);
-    for (size_t i = 0; i != mir_count_; ++i) {
-      value_names_[i] =  lvn_->GetValueNumber(&mirs_[i]);
-    }
-    EXPECT_TRUE(gvn_->Good());
-  }
-
-  LocalValueNumberingTest()
-      : pool_(),
-        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
-        mir_count_(0u),
-        mirs_(nullptr),
-        ssa_reps_(),
-        allocator_(),
-        gvn_(),
-        lvn_(),
-        value_names_() {
-    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
-    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
-    // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
-    // 0 constants are integral, not references, and the values are all narrow.
-    // Nothing else is used by LVN/GVN. Tests can override the default values as needed.
-    cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
-        kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
-    cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
-  }
-
-  static constexpr size_t kMaxSsaRegs = 16384u;
-
-  ArenaPool pool_;
-  CompilationUnit cu_;
-  size_t mir_count_;
-  MIR* mirs_;
-  std::vector<SSARepresentation> ssa_reps_;
-  std::unique_ptr<ScopedArenaAllocator> allocator_;
-  std::unique_ptr<GlobalValueNumbering> gvn_;
-  std::unique_ptr<LocalValueNumbering> lvn_;
-  std::vector<uint16_t> value_names_;
-};
-
-TEST_F(LocalValueNumberingTest, IGetIGetInvokeIGet) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
-      DEF_IGET(Instruction::IGET, 1u, 10u, 0u),
-      DEF_INVOKE1(Instruction::INVOKE_VIRTUAL, 11u),
-      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 4u);
-  EXPECT_EQ(value_names_[0], value_names_[1]);
-  EXPECT_NE(value_names_[0], value_names_[3]);
-  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
-  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
-  EXPECT_EQ(mirs_[2].optimization_flags, 0u);
-  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
-}
-
-TEST_F(LocalValueNumberingTest, IGetIPutIGetIGetIGet) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessObject },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET(Instruction::IGET_OBJECT, 0u, 10u, 0u),
-      DEF_IPUT(Instruction::IPUT_OBJECT, 1u, 11u, 0u),  // May alias.
-      DEF_IGET(Instruction::IGET_OBJECT, 2u, 10u, 0u),
-      DEF_IGET(Instruction::IGET, 3u,  0u, 1u),
-      DEF_IGET(Instruction::IGET, 4u,  2u, 1u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 5u);
-  EXPECT_NE(value_names_[0], value_names_[2]);
-  EXPECT_NE(value_names_[3], value_names_[4]);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    EXPECT_EQ((i == 2u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UniquePreserve1) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 10u),
-      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
-      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),  // No aliasing since 10u is unique.
-      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 4u);
-  EXPECT_EQ(value_names_[1], value_names_[3]);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    EXPECT_EQ((i == 1u || i == 3u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UniquePreserve2) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 11u),
-      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
-      DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u),  // No aliasing since 11u is unique.
-      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 4u);
-  EXPECT_EQ(value_names_[1], value_names_[3]);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    EXPECT_EQ((i == 2u || i == 3u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UniquePreserveAndEscape) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 10u),
-      DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
-      DEF_INVOKE1(Instruction::INVOKE_VIRTUAL, 11u),  // 10u still unique.
-      DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
-      DEF_INVOKE1(Instruction::INVOKE_VIRTUAL, 10u),  // 10u not unique anymore.
-      DEF_IGET(Instruction::IGET, 3u, 10u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 6u);
-  EXPECT_EQ(value_names_[1], value_names_[3]);
-  EXPECT_NE(value_names_[1], value_names_[5]);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    EXPECT_EQ((i == 1u || i == 3u || i == 4u || i == 5u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, Volatile) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, true, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET(Instruction::IGET, 0u, 10u, 1u),  // Volatile.
-      DEF_IGET(Instruction::IGET, 1u,  0u, 0u),  // Non-volatile.
-      DEF_IGET(Instruction::IGET, 2u, 10u, 1u),  // Volatile.
-      DEF_IGET(Instruction::IGET, 3u,  2u, 1u),  // Non-volatile.
-      DEF_IGET(Instruction::IGET, 4u,  0u, 0u),  // Non-volatile.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 5u);
-  EXPECT_NE(value_names_[0], value_names_[2]);  // Volatile has always different value name.
-  EXPECT_NE(value_names_[1], value_names_[3]);  // Used different base because of volatile.
-  EXPECT_NE(value_names_[1], value_names_[4]);  // Not guaranteed to be the same after "acquire".
-
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    EXPECT_EQ((i == 2u || i == 4u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UnresolvedIField) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },  // Resolved field #1.
-      { 2u, 1u, 2u, false, kDexMemAccessWide },  // Resolved field #2.
-      { 3u, 0u, 0u, false, kDexMemAccessWord },  // Unresolved field.
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 30u),
-      DEF_IGET(Instruction::IGET, 1u, 30u, 0u),             // Resolved field #1, unique object.
-      DEF_IGET(Instruction::IGET, 2u, 31u, 0u),             // Resolved field #1.
-      DEF_IGET_WIDE(Instruction::IGET_WIDE, 3u, 31u, 1u),   // Resolved field #2.
-      DEF_IGET(Instruction::IGET, 5u, 32u, 2u),             // Unresolved IGET can be "acquire".
-      DEF_IGET(Instruction::IGET, 6u, 30u, 0u),             // Resolved field #1, unique object.
-      DEF_IGET(Instruction::IGET, 7u, 31u, 0u),             // Resolved field #1.
-      DEF_IGET_WIDE(Instruction::IGET_WIDE, 8u, 31u, 1u),   // Resolved field #2.
-      DEF_IPUT(Instruction::IPUT, 10u, 32u, 2u),            // IPUT clobbers field #1 (#2 is wide).
-      DEF_IGET(Instruction::IGET, 11u, 30u, 0u),            // Resolved field #1, unique object.
-      DEF_IGET(Instruction::IGET, 12u, 31u, 0u),            // Resolved field #1, new value name.
-      DEF_IGET_WIDE(Instruction::IGET_WIDE, 13u, 31u, 1u),  // Resolved field #2.
-      DEF_IGET_WIDE(Instruction::IGET_WIDE, 15u, 30u, 1u),  // Resolved field #2, unique object.
-      DEF_IPUT(Instruction::IPUT, 17u, 30u, 2u),            // IPUT clobbers field #1 (#2 is wide).
-      DEF_IGET(Instruction::IGET, 18u, 30u, 0u),            // Resolved field #1, unique object.
-      DEF_IGET_WIDE(Instruction::IGET_WIDE, 19u, 30u, 1u),  // Resolved field #2, unique object.
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 3, 8, 13, 15, 19 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 16u);
-  // Unresolved field is potentially volatile, so we need to adhere to the volatile semantics.
-  EXPECT_EQ(value_names_[1], value_names_[5]);    // Unique object.
-  EXPECT_NE(value_names_[2], value_names_[6]);    // Not guaranteed to be the same after "acquire".
-  EXPECT_NE(value_names_[3], value_names_[7]);    // Not guaranteed to be the same after "acquire".
-  EXPECT_EQ(value_names_[1], value_names_[9]);    // Unique object.
-  EXPECT_NE(value_names_[6], value_names_[10]);   // This aliased with unresolved IPUT.
-  EXPECT_EQ(value_names_[7], value_names_[11]);   // Still the same after "release".
-  EXPECT_EQ(value_names_[12], value_names_[15]);  // Still the same after "release".
-  EXPECT_NE(value_names_[1], value_names_[14]);   // This aliased with unresolved IPUT.
-  EXPECT_EQ(mirs_[0].optimization_flags, 0u);
-  EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
-  EXPECT_EQ(mirs_[2].optimization_flags, 0u);
-  EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
-  EXPECT_EQ(mirs_[4].optimization_flags, 0u);
-  for (size_t i = 5u; i != mir_count_; ++i) {
-    EXPECT_EQ((i == 1u || i == 3u || i >=5u) ? MIR_IGNORE_NULL_CHECK : 0,
-              mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UnresolvedSField) {
-  static const SFieldDef sfields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },  // Resolved field #1.
-      { 2u, 1u, 2u, false, kDexMemAccessWide },  // Resolved field #2.
-      { 3u, 0u, 0u, false, kDexMemAccessWord },  // Unresolved field.
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(Instruction::SGET, 0u, 0u),            // Resolved field #1.
-      DEF_SGET_WIDE(Instruction::SGET_WIDE, 1u, 1u),  // Resolved field #2.
-      DEF_SGET(Instruction::SGET, 3u, 2u),            // Unresolved SGET can be "acquire".
-      DEF_SGET(Instruction::SGET, 4u, 0u),            // Resolved field #1.
-      DEF_SGET_WIDE(Instruction::SGET_WIDE, 5u, 1u),  // Resolved field #2.
-      DEF_SPUT(Instruction::SPUT, 7u, 2u),            // SPUT clobbers field #1 (#2 is wide).
-      DEF_SGET(Instruction::SGET, 8u, 0u),            // Resolved field #1.
-      DEF_SGET_WIDE(Instruction::SGET_WIDE, 9u, 1u),  // Resolved field #2.
-  };
-
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 1, 5, 9 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 8u);
-  // Unresolved field is potentially volatile, so we need to adhere to the volatile semantics.
-  EXPECT_NE(value_names_[0], value_names_[3]);  // Not guaranteed to be the same after "acquire".
-  EXPECT_NE(value_names_[1], value_names_[4]);  // Not guaranteed to be the same after "acquire".
-  EXPECT_NE(value_names_[3], value_names_[6]);  // This aliased with unresolved IPUT.
-  EXPECT_EQ(value_names_[4], value_names_[7]);  // Still the same after "release".
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    EXPECT_EQ(0, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UninitializedSField) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },  // Resolved field #1.
-  };
-  static const SFieldDef sfields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },  // Resolved field #1.
-      { 2u, 1u, 2u, false, kDexMemAccessWord },  // Resolved field #2; uninitialized.
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 200u),
-      DEF_IGET(Instruction::IGET, 1u, 100u, 0u),
-      DEF_IGET(Instruction::IGET, 2u, 200u, 0u),
-      DEF_SGET(Instruction::SGET, 3u, 0u),
-      DEF_SGET(Instruction::SGET, 4u, 1u),            // Can call <clinit>().
-      DEF_IGET(Instruction::IGET, 5u, 100u, 0u),      // Differs from 1u.
-      DEF_IGET(Instruction::IGET, 6u, 200u, 0u),      // Same as 2u.
-      DEF_SGET(Instruction::SGET, 7u, 0u),            // Differs from 3u.
-  };
-
-  PrepareIFields(ifields);
-  PrepareSFields(sfields);
-  MakeSFieldUninitialized(1u);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 8u);
-  EXPECT_NE(value_names_[1], value_names_[5]);
-  EXPECT_EQ(value_names_[2], value_names_[6]);
-  EXPECT_NE(value_names_[3], value_names_[7]);
-}
-
-TEST_F(LocalValueNumberingTest, ConstString) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_STRING(Instruction::CONST_STRING, 0u, 0u),
-      DEF_CONST_STRING(Instruction::CONST_STRING, 1u, 0u),
-      DEF_CONST_STRING(Instruction::CONST_STRING, 2u, 2u),
-      DEF_CONST_STRING(Instruction::CONST_STRING, 3u, 0u),
-      DEF_INVOKE1(Instruction::INVOKE_DIRECT, 2u),
-      DEF_CONST_STRING(Instruction::CONST_STRING, 4u, 2u),
-  };
-
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 6u);
-  EXPECT_EQ(value_names_[1], value_names_[0]);
-  EXPECT_NE(value_names_[2], value_names_[0]);
-  EXPECT_EQ(value_names_[3], value_names_[0]);
-  EXPECT_EQ(value_names_[5], value_names_[2]);
-}
-
-TEST_F(LocalValueNumberingTest, SameValueInDifferentMemoryLocations) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const SFieldDef sfields[] = {
-      { 3u, 1u, 3u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 201u),
-      DEF_IGET(Instruction::IGET, 0u, 100u, 0u),
-      DEF_IPUT(Instruction::IPUT, 0u, 100u, 1u),
-      DEF_IPUT(Instruction::IPUT, 0u, 101u, 1u),
-      DEF_APUT(Instruction::APUT, 0u, 200u, 300u),
-      DEF_APUT(Instruction::APUT, 0u, 200u, 301u),
-      DEF_APUT(Instruction::APUT, 0u, 201u, 300u),
-      DEF_APUT(Instruction::APUT, 0u, 201u, 301u),
-      DEF_SPUT(Instruction::SPUT, 0u, 0u),
-      DEF_IGET(Instruction::IGET, 9u, 100u, 0u),
-      DEF_IGET(Instruction::IGET, 10u, 100u, 1u),
-      DEF_IGET(Instruction::IGET, 11u, 101u, 1u),
-      DEF_AGET(Instruction::AGET, 12u, 200u, 300u),
-      DEF_AGET(Instruction::AGET, 13u, 200u, 301u),
-      DEF_AGET(Instruction::AGET, 14u, 201u, 300u),
-      DEF_AGET(Instruction::AGET, 15u, 201u, 301u),
-      DEF_SGET(Instruction::SGET, 16u, 0u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 17u);
-  for (size_t i = 9; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(value_names_[1], value_names_[i]) << i;
-  }
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    int expected_flags =
-        ((i == 2u || (i >= 5u && i <= 7u) || (i >= 9u && i <= 15u)) ? MIR_IGNORE_NULL_CHECK : 0) |
-        ((i >= 12u && i <= 15u) ? MIR_IGNORE_RANGE_CHECK : 0);
-    EXPECT_EQ(expected_flags, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, UniqueArrayAliasing) {
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 20u),
-      DEF_AGET(Instruction::AGET, 1u, 20u, 40u),
-      DEF_APUT(Instruction::APUT, 2u, 20u, 41u),  // May alias with index for sreg 40u.
-      DEF_AGET(Instruction::AGET, 3u, 20u, 40u),
-  };
-
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 4u);
-  EXPECT_NE(value_names_[1], value_names_[3]);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    int expected_flags =
-        ((i >= 1u) ? MIR_IGNORE_NULL_CHECK : 0) |
-        ((i == 3u) ? MIR_IGNORE_RANGE_CHECK : 0);
-    EXPECT_EQ(expected_flags, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, EscapingRefs) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },    // Field #1.
-      { 2u, 1u, 2u, false, kDexMemAccessWord },    // Field #2.
-      { 3u, 1u, 3u, false, kDexMemAccessObject },  // For storing escaping refs.
-      { 4u, 1u, 4u, false, kDexMemAccessWide },    // Wide.
-      { 5u, 0u, 0u, false, kDexMemAccessWord },    // Unresolved field, int.
-      { 6u, 0u, 0u, false, kDexMemAccessWide },    // Unresolved field, wide.
-  };
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
-      DEF_IGET(Instruction::IGET, 1u, 20u, 0u),
-      DEF_IGET(Instruction::IGET, 2u, 20u, 1u),
-      DEF_IPUT(Instruction::IPUT_OBJECT, 20u, 30u, 2u),      // Ref escapes.
-      DEF_IGET(Instruction::IGET, 4u, 20u, 0u),
-      DEF_IGET(Instruction::IGET, 5u, 20u, 1u),
-      DEF_IPUT(Instruction::IPUT, 6u, 31u, 0u),              // May alias with field #1.
-      DEF_IGET(Instruction::IGET, 7u, 20u, 0u),              // New value.
-      DEF_IGET(Instruction::IGET, 8u, 20u, 1u),              // Still the same.
-      DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 9u, 31u, 3u),    // No aliasing, different type.
-      DEF_IGET(Instruction::IGET, 11u, 20u, 0u),
-      DEF_IGET(Instruction::IGET, 12u, 20u, 1u),
-      DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 13u, 31u, 5u),   // No aliasing, different type.
-      DEF_IGET(Instruction::IGET, 15u, 20u, 0u),
-      DEF_IGET(Instruction::IGET, 16u, 20u, 1u),
-      DEF_IPUT(Instruction::IPUT, 17u, 31u, 4u),             // Aliasing, same type.
-      DEF_IGET(Instruction::IGET, 18u, 20u, 0u),
-      DEF_IGET(Instruction::IGET, 19u, 20u, 1u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 9, 13 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 18u);
-  EXPECT_EQ(value_names_[1], value_names_[4]);
-  EXPECT_EQ(value_names_[2], value_names_[5]);
-  EXPECT_NE(value_names_[4], value_names_[7]);  // New value.
-  EXPECT_EQ(value_names_[5], value_names_[8]);
-  EXPECT_EQ(value_names_[7], value_names_[10]);
-  EXPECT_EQ(value_names_[8], value_names_[11]);
-  EXPECT_EQ(value_names_[10], value_names_[13]);
-  EXPECT_EQ(value_names_[11], value_names_[14]);
-  EXPECT_NE(value_names_[13], value_names_[16]);  // New value.
-  EXPECT_NE(value_names_[14], value_names_[17]);  // New value.
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected =
-        ((i != 0u && i != 3u && i != 6u) ? MIR_IGNORE_NULL_CHECK : 0) |
-        ((i == 3u) ? MIR_STORE_NON_NULL_VALUE: 0);
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, EscapingArrayRefs) {
-  static const MIRDef mirs[] = {
-      DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 20u),
-      DEF_AGET(Instruction::AGET, 1u, 20u, 40u),
-      DEF_AGET(Instruction::AGET, 2u, 20u, 41u),
-      DEF_APUT(Instruction::APUT_OBJECT, 20u, 30u, 42u),    // Array ref escapes.
-      DEF_AGET(Instruction::AGET, 4u, 20u, 40u),
-      DEF_AGET(Instruction::AGET, 5u, 20u, 41u),
-      DEF_APUT_WIDE(Instruction::APUT_WIDE, 6u, 31u, 43u),  // No aliasing, different type.
-      DEF_AGET(Instruction::AGET, 8u, 20u, 40u),
-      DEF_AGET(Instruction::AGET, 9u, 20u, 41u),
-      DEF_APUT(Instruction::APUT, 10u, 32u, 40u),           // May alias with all elements.
-      DEF_AGET(Instruction::AGET, 11u, 20u, 40u),           // New value (same index name).
-      DEF_AGET(Instruction::AGET, 12u, 20u, 41u),           // New value (different index name).
-  };
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 6 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 12u);
-  EXPECT_EQ(value_names_[1], value_names_[4]);
-  EXPECT_EQ(value_names_[2], value_names_[5]);
-  EXPECT_EQ(value_names_[4], value_names_[7]);
-  EXPECT_EQ(value_names_[5], value_names_[8]);
-  EXPECT_NE(value_names_[7], value_names_[10]);  // New value.
-  EXPECT_NE(value_names_[8], value_names_[11]);  // New value.
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected =
-        ((i != 0u && i != 3u && i != 6u && i != 9u) ? MIR_IGNORE_NULL_CHECK : 0u) |
-        ((i >= 4 && i != 6u && i != 9u) ? MIR_IGNORE_RANGE_CHECK : 0u) |
-        ((i == 3u) ? MIR_STORE_NON_NULL_VALUE: 0);
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, StoringSameValueKeepsMemoryVersion) {
-  static const IFieldDef ifields[] = {
-      { 1u, 1u, 1u, false, kDexMemAccessWord },
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const SFieldDef sfields[] = {
-      { 2u, 1u, 2u, false, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET(Instruction::IGET, 0u, 30u, 0u),
-      DEF_IGET(Instruction::IGET, 1u, 31u, 0u),
-      DEF_IPUT(Instruction::IPUT, 1u, 31u, 0u),            // Store the same value.
-      DEF_IGET(Instruction::IGET, 3u, 30u, 0u),
-      DEF_AGET(Instruction::AGET, 4u, 32u, 40u),
-      DEF_AGET(Instruction::AGET, 5u, 33u, 40u),
-      DEF_APUT(Instruction::APUT, 5u, 33u, 40u),           // Store the same value.
-      DEF_AGET(Instruction::AGET, 7u, 32u, 40u),
-      DEF_SGET(Instruction::SGET, 8u, 0u),
-      DEF_SPUT(Instruction::SPUT, 8u, 0u),                 // Store the same value.
-      DEF_SGET(Instruction::SGET, 10u, 0u),
-      DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 50u),      // Test with unique references.
-      { Instruction::FILLED_NEW_ARRAY, 0, 0u, 2, { 12u, 13u }, 0, { } },
-      DEF_UNIQUE_REF(Instruction::MOVE_RESULT_OBJECT, 51u),
-      DEF_IGET(Instruction::IGET, 14u, 50u, 0u),
-      DEF_IGET(Instruction::IGET, 15u, 50u, 1u),
-      DEF_IPUT(Instruction::IPUT, 15u, 50u, 1u),           // Store the same value.
-      DEF_IGET(Instruction::IGET, 17u, 50u, 0u),
-      DEF_AGET(Instruction::AGET, 18u, 51u, 40u),
-      DEF_AGET(Instruction::AGET, 19u, 51u, 41u),
-      DEF_APUT(Instruction::APUT, 19u, 51u, 41u),          // Store the same value.
-      DEF_AGET(Instruction::AGET, 21u, 51u, 40u),
-  };
-
-  PrepareIFields(ifields);
-  PrepareSFields(sfields);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 22u);
-  EXPECT_NE(value_names_[0], value_names_[1]);
-  EXPECT_EQ(value_names_[0], value_names_[3]);
-  EXPECT_NE(value_names_[4], value_names_[5]);
-  EXPECT_EQ(value_names_[4], value_names_[7]);
-  EXPECT_EQ(value_names_[8], value_names_[10]);
-  EXPECT_NE(value_names_[14], value_names_[15]);
-  EXPECT_EQ(value_names_[14], value_names_[17]);
-  EXPECT_NE(value_names_[18], value_names_[19]);
-  EXPECT_EQ(value_names_[18], value_names_[21]);
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected =
-        ((i == 2u || i == 3u || i == 6u || i == 7u || (i >= 14u)) ? MIR_IGNORE_NULL_CHECK : 0u) |
-        ((i == 6u || i == 7u || i >= 20u) ? MIR_IGNORE_RANGE_CHECK : 0u);
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, FilledNewArrayTracking) {
-  if (!kLocalValueNumberingEnableFilledNewArrayTracking) {
-    // Feature disabled.
-    return;
-  }
-  static const MIRDef mirs[] = {
-      DEF_CONST(Instruction::CONST, 0u, 100),
-      DEF_CONST(Instruction::CONST, 1u, 200),
-      { Instruction::FILLED_NEW_ARRAY, 0, 0u, 2, { 0u, 1u }, 0, { } },
-      DEF_UNIQUE_REF(Instruction::MOVE_RESULT_OBJECT, 10u),
-      DEF_CONST(Instruction::CONST, 20u, 0),
-      DEF_CONST(Instruction::CONST, 21u, 1),
-      DEF_AGET(Instruction::AGET, 6u, 10u, 20u),
-      DEF_AGET(Instruction::AGET, 7u, 10u, 21u),
-  };
-
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 8u);
-  EXPECT_EQ(value_names_[0], value_names_[6]);
-  EXPECT_EQ(value_names_[1], value_names_[7]);
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected = (i == 6u || i == 7u) ? (MIR_IGNORE_NULL_CHECK | MIR_IGNORE_RANGE_CHECK) : 0u;
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-TEST_F(LocalValueNumberingTest, ClInitOnSget) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, false, kDexMemAccessObject },
-      { 1u, 2u, 1u, false, kDexMemAccessObject },
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(Instruction::SGET_OBJECT, 0u, 0u),
-      DEF_AGET(Instruction::AGET, 1u, 0u, 100u),
-      DEF_SGET(Instruction::SGET_OBJECT, 2u, 1u),
-      DEF_SGET(Instruction::SGET_OBJECT, 3u, 0u),
-      DEF_AGET(Instruction::AGET, 4u, 3u, 100u),
-  };
-
-  PrepareSFields(sfields);
-  MakeSFieldUninitialized(1u);
-  PrepareMIRs(mirs);
-  PerformLVN();
-  ASSERT_EQ(value_names_.size(), 5u);
-  EXPECT_NE(value_names_[0], value_names_[3]);
-}
-
-TEST_F(LocalValueNumberingTest, DivZeroCheck) {
-  static const MIRDef mirs[] = {
-      DEF_DIV_REM(Instruction::DIV_INT, 1u, 10u, 20u),
-      DEF_DIV_REM(Instruction::DIV_INT, 2u, 20u, 20u),
-      DEF_DIV_REM(Instruction::DIV_INT_2ADDR, 3u, 10u, 1u),
-      DEF_DIV_REM(Instruction::REM_INT, 4u, 30u, 20u),
-      DEF_DIV_REM_WIDE(Instruction::REM_LONG, 5u, 12u, 14u),
-      DEF_DIV_REM_WIDE(Instruction::DIV_LONG_2ADDR, 7u, 16u, 14u),
-  };
-
-  static const bool expected_ignore_div_zero_check[] = {
-      false, true, false, true, false, true,
-  };
-
-  PrepareMIRs(mirs);
-  static const int32_t wide_sregs[] = { 5, 7, 12, 14, 16 };
-  MarkAsWideSRegs(wide_sregs);
-  PerformLVN();
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    int expected = expected_ignore_div_zero_check[i] ? MIR_IGNORE_DIV_ZERO_CHECK : 0u;
-    EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
-  }
-}
-
-static constexpr int64_t shift_minus_1(size_t by) {
-  return static_cast<int64_t>(static_cast<uint64_t>(INT64_C(-1)) << by);
-}
-
-TEST_F(LocalValueNumberingTest, ConstWide) {
-  static const MIRDef mirs[] = {
-      // Core reg constants.
-      DEF_CONST(Instruction::CONST_WIDE_16, 0u, 0),
-      DEF_CONST(Instruction::CONST_WIDE_16, 2u, 1),
-      DEF_CONST(Instruction::CONST_WIDE_16, 4u, -1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 6u, 1 << 16),
-      DEF_CONST(Instruction::CONST_WIDE_32, 8u, shift_minus_1(16)),
-      DEF_CONST(Instruction::CONST_WIDE_32, 10u, (1 << 16) + 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 12u, (1 << 16) - 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 14u, -(1 << 16) + 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 16u, -(1 << 16) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 18u, INT64_C(1) << 32),
-      DEF_CONST(Instruction::CONST_WIDE, 20u, shift_minus_1(32)),
-      DEF_CONST(Instruction::CONST_WIDE, 22u, (INT64_C(1) << 32) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 24u, (INT64_C(1) << 32) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 26u, shift_minus_1(32) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 28u, shift_minus_1(32) - 1),
-      DEF_CONST(Instruction::CONST_WIDE_HIGH16, 30u, 1),       // Effectively 1 << 48.
-      DEF_CONST(Instruction::CONST_WIDE_HIGH16, 32u, 0xffff),  // Effectively -1 << 48.
-      DEF_CONST(Instruction::CONST_WIDE, 34u, (INT64_C(1) << 48) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 36u, (INT64_C(1) << 48) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 38u, shift_minus_1(48) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 40u, shift_minus_1(48) - 1),
-      // FP reg constants.
-      DEF_CONST(Instruction::CONST_WIDE_16, 42u, 0),
-      DEF_CONST(Instruction::CONST_WIDE_16, 44u, 1),
-      DEF_CONST(Instruction::CONST_WIDE_16, 46u, -1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 48u, 1 << 16),
-      DEF_CONST(Instruction::CONST_WIDE_32, 50u, shift_minus_1(16)),
-      DEF_CONST(Instruction::CONST_WIDE_32, 52u, (1 << 16) + 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 54u, (1 << 16) - 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 56u, -(1 << 16) + 1),
-      DEF_CONST(Instruction::CONST_WIDE_32, 58u, -(1 << 16) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 60u, INT64_C(1) << 32),
-      DEF_CONST(Instruction::CONST_WIDE, 62u, shift_minus_1(32)),
-      DEF_CONST(Instruction::CONST_WIDE, 64u, (INT64_C(1) << 32) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 66u, (INT64_C(1) << 32) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 68u, shift_minus_1(32) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 70u, shift_minus_1(32) - 1),
-      DEF_CONST(Instruction::CONST_WIDE_HIGH16, 72u, 1),       // Effectively 1 << 48.
-      DEF_CONST(Instruction::CONST_WIDE_HIGH16, 74u, 0xffff),  // Effectively -1 << 48.
-      DEF_CONST(Instruction::CONST_WIDE, 76u, (INT64_C(1) << 48) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 78u, (INT64_C(1) << 48) - 1),
-      DEF_CONST(Instruction::CONST_WIDE, 80u, shift_minus_1(48) + 1),
-      DEF_CONST(Instruction::CONST_WIDE, 82u, shift_minus_1(48) - 1),
-  };
-
-  PrepareMIRs(mirs);
-  for (size_t i = 0; i != arraysize(mirs); ++i) {
-    const int32_t wide_sregs[] = { mirs_[i].ssa_rep->defs[0] };
-    MarkAsWideSRegs(wide_sregs);
-  }
-  for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs); ++i) {
-    cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
-  }
-  PerformLVN();
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    for (size_t j = i + 1u; j != mir_count_; ++j) {
-      EXPECT_NE(value_names_[i], value_names_[j]) << i << " " << j;
-    }
-  }
-}
-
-TEST_F(LocalValueNumberingTest, Const) {
-  static const MIRDef mirs[] = {
-      // Core reg constants.
-      DEF_CONST(Instruction::CONST_4, 0u, 0),
-      DEF_CONST(Instruction::CONST_4, 1u, 1),
-      DEF_CONST(Instruction::CONST_4, 2u, -1),
-      DEF_CONST(Instruction::CONST_16, 3u, 1 << 4),
-      DEF_CONST(Instruction::CONST_16, 4u, shift_minus_1(4)),
-      DEF_CONST(Instruction::CONST_16, 5u, (1 << 4) + 1),
-      DEF_CONST(Instruction::CONST_16, 6u, (1 << 4) - 1),
-      DEF_CONST(Instruction::CONST_16, 7u, -(1 << 4) + 1),
-      DEF_CONST(Instruction::CONST_16, 8u, -(1 << 4) - 1),
-      DEF_CONST(Instruction::CONST_HIGH16, 9u, 1),       // Effectively 1 << 16.
-      DEF_CONST(Instruction::CONST_HIGH16, 10u, 0xffff),  // Effectively -1 << 16.
-      DEF_CONST(Instruction::CONST, 11u, (1 << 16) + 1),
-      DEF_CONST(Instruction::CONST, 12u, (1 << 16) - 1),
-      DEF_CONST(Instruction::CONST, 13u, shift_minus_1(16) + 1),
-      DEF_CONST(Instruction::CONST, 14u, shift_minus_1(16) - 1),
-      // FP reg constants.
-      DEF_CONST(Instruction::CONST_4, 15u, 0),
-      DEF_CONST(Instruction::CONST_4, 16u, 1),
-      DEF_CONST(Instruction::CONST_4, 17u, -1),
-      DEF_CONST(Instruction::CONST_16, 18u, 1 << 4),
-      DEF_CONST(Instruction::CONST_16, 19u, shift_minus_1(4)),
-      DEF_CONST(Instruction::CONST_16, 20u, (1 << 4) + 1),
-      DEF_CONST(Instruction::CONST_16, 21u, (1 << 4) - 1),
-      DEF_CONST(Instruction::CONST_16, 22u, -(1 << 4) + 1),
-      DEF_CONST(Instruction::CONST_16, 23u, -(1 << 4) - 1),
-      DEF_CONST(Instruction::CONST_HIGH16, 24u, 1),       // Effectively 1 << 16.
-      DEF_CONST(Instruction::CONST_HIGH16, 25u, 0xffff),  // Effectively -1 << 16.
-      DEF_CONST(Instruction::CONST, 26u, (1 << 16) + 1),
-      DEF_CONST(Instruction::CONST, 27u, (1 << 16) - 1),
-      DEF_CONST(Instruction::CONST, 28u, shift_minus_1(16) + 1),
-      DEF_CONST(Instruction::CONST, 29u, shift_minus_1(16) - 1),
-      // null reference constant.
-      DEF_CONST(Instruction::CONST_4, 30u, 0),
-  };
-
-  PrepareMIRs(mirs);
-  static_assert((arraysize(mirs) & 1) != 0, "missing null or unmatched fp/core");
-  cu_.mir_graph->reg_location_[arraysize(mirs) - 1].ref = true;
-  for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs) - 1; ++i) {
-    cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
-  }
-  PerformLVN();
-  for (size_t i = 0u; i != mir_count_; ++i) {
-    for (size_t j = i + 1u; j != mir_count_; ++j) {
-      EXPECT_NE(value_names_[i], value_names_[j]) << i << " " << j;
-    }
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
deleted file mode 100644
index 18ce563..0000000
--- a/compiler/dex/mir_analysis.cc
+++ /dev/null
@@ -1,1433 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <memory>
-
-#include "base/logging.h"
-#include "base/scoped_arena_containers.h"
-#include "dataflow_iterator-inl.h"
-#include "compiler_ir.h"
-#include "dex_flags.h"
-#include "dex_instruction-inl.h"
-#include "dex/mir_field_info.h"
-#include "dex/verified_method.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "driver/dex_compilation_unit.h"
-#include "scoped_thread_state_change.h"
-#include "utils.h"
-
-namespace art {
-
-enum InstructionAnalysisAttributeOps : uint8_t {
-  kUninterestingOp = 0,
-  kArithmeticOp,
-  kFpOp,
-  kSingleOp,
-  kDoubleOp,
-  kIntOp,
-  kLongOp,
-  kBranchOp,
-  kInvokeOp,
-  kArrayOp,
-  kHeavyweightOp,
-  kSimpleConstOp,
-  kMoveOp,
-  kSwitch
-};
-
-enum InstructionAnalysisAttributeMasks : uint16_t {
-  kAnNone = 1 << kUninterestingOp,
-  kAnMath = 1 << kArithmeticOp,
-  kAnFp = 1 << kFpOp,
-  kAnLong = 1 << kLongOp,
-  kAnInt = 1 << kIntOp,
-  kAnSingle = 1 << kSingleOp,
-  kAnDouble = 1 << kDoubleOp,
-  kAnFloatMath = 1 << kFpOp,
-  kAnBranch = 1 << kBranchOp,
-  kAnInvoke = 1 << kInvokeOp,
-  kAnArrayOp = 1 << kArrayOp,
-  kAnHeavyWeight = 1 << kHeavyweightOp,
-  kAnSimpleConst = 1 << kSimpleConstOp,
-  kAnMove = 1 << kMoveOp,
-  kAnSwitch = 1 << kSwitch,
-  kAnComputational = kAnMath | kAnArrayOp | kAnMove | kAnSimpleConst,
-};
-
-// Instruction characteristics used to statically identify computation-intensive methods.
-static const uint16_t kAnalysisAttributes[kMirOpLast] = {
-  // 00 NOP
-  kAnNone,
-
-  // 01 MOVE vA, vB
-  kAnMove,
-
-  // 02 MOVE_FROM16 vAA, vBBBB
-  kAnMove,
-
-  // 03 MOVE_16 vAAAA, vBBBB
-  kAnMove,
-
-  // 04 MOVE_WIDE vA, vB
-  kAnMove,
-
-  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
-  kAnMove,
-
-  // 06 MOVE_WIDE_16 vAAAA, vBBBB
-  kAnMove,
-
-  // 07 MOVE_OBJECT vA, vB
-  kAnMove,
-
-  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
-  kAnMove,
-
-  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
-  kAnMove,
-
-  // 0A MOVE_RESULT vAA
-  kAnMove,
-
-  // 0B MOVE_RESULT_WIDE vAA
-  kAnMove,
-
-  // 0C MOVE_RESULT_OBJECT vAA
-  kAnMove,
-
-  // 0D MOVE_EXCEPTION vAA
-  kAnMove,
-
-  // 0E RETURN_VOID
-  kAnBranch,
-
-  // 0F RETURN vAA
-  kAnBranch,
-
-  // 10 RETURN_WIDE vAA
-  kAnBranch,
-
-  // 11 RETURN_OBJECT vAA
-  kAnBranch,
-
-  // 12 CONST_4 vA, #+B
-  kAnSimpleConst,
-
-  // 13 CONST_16 vAA, #+BBBB
-  kAnSimpleConst,
-
-  // 14 CONST vAA, #+BBBBBBBB
-  kAnSimpleConst,
-
-  // 15 CONST_HIGH16 VAA, #+BBBB0000
-  kAnSimpleConst,
-
-  // 16 CONST_WIDE_16 vAA, #+BBBB
-  kAnSimpleConst,
-
-  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
-  kAnSimpleConst,
-
-  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
-  kAnSimpleConst,
-
-  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
-  kAnSimpleConst,
-
-  // 1A CONST_STRING vAA, string@BBBB
-  kAnNone,
-
-  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
-  kAnNone,
-
-  // 1C CONST_CLASS vAA, type@BBBB
-  kAnNone,
-
-  // 1D MONITOR_ENTER vAA
-  kAnNone,
-
-  // 1E MONITOR_EXIT vAA
-  kAnNone,
-
-  // 1F CHK_CAST vAA, type@BBBB
-  kAnNone,
-
-  // 20 INSTANCE_OF vA, vB, type@CCCC
-  kAnNone,
-
-  // 21 ARRAY_LENGTH vA, vB
-  kAnArrayOp,
-
-  // 22 NEW_INSTANCE vAA, type@BBBB
-  kAnHeavyWeight,
-
-  // 23 NEW_ARRAY vA, vB, type@CCCC
-  kAnHeavyWeight,
-
-  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
-  kAnHeavyWeight,
-
-  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
-  kAnHeavyWeight,
-
-  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
-  kAnNone,
-
-  // 27 THROW vAA
-  kAnHeavyWeight | kAnBranch,
-
-  // 28 GOTO
-  kAnBranch,
-
-  // 29 GOTO_16
-  kAnBranch,
-
-  // 2A GOTO_32
-  kAnBranch,
-
-  // 2B PACKED_SWITCH vAA, +BBBBBBBB
-  kAnSwitch,
-
-  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
-  kAnSwitch,
-
-  // 2D CMPL_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // 2E CMPG_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // 2F CMPL_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // 30 CMPG_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // 31 CMP_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // 32 IF_EQ vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 33 IF_NE vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 34 IF_LT vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 35 IF_GE vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 36 IF_GT vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 37 IF_LE vA, vB, +CCCC
-  kAnMath | kAnBranch | kAnInt,
-
-  // 38 IF_EQZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 39 IF_NEZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 3A IF_LTZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 3B IF_GEZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 3C IF_GTZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 3D IF_LEZ vAA, +BBBB
-  kAnMath | kAnBranch | kAnInt,
-
-  // 3E UNUSED_3E
-  kAnNone,
-
-  // 3F UNUSED_3F
-  kAnNone,
-
-  // 40 UNUSED_40
-  kAnNone,
-
-  // 41 UNUSED_41
-  kAnNone,
-
-  // 42 UNUSED_42
-  kAnNone,
-
-  // 43 UNUSED_43
-  kAnNone,
-
-  // 44 AGET vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 45 AGET_WIDE vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 46 AGET_OBJECT vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 47 AGET_BOOLEAN vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 48 AGET_BYTE vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 49 AGET_CHAR vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4A AGET_SHORT vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4B APUT vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4C APUT_WIDE vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4D APUT_OBJECT vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4E APUT_BOOLEAN vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 4F APUT_BYTE vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 50 APUT_CHAR vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 51 APUT_SHORT vAA, vBB, vCC
-  kAnArrayOp,
-
-  // 52 IGET vA, vB, field@CCCC
-  kAnNone,
-
-  // 53 IGET_WIDE vA, vB, field@CCCC
-  kAnNone,
-
-  // 54 IGET_OBJECT vA, vB, field@CCCC
-  kAnNone,
-
-  // 55 IGET_BOOLEAN vA, vB, field@CCCC
-  kAnNone,
-
-  // 56 IGET_BYTE vA, vB, field@CCCC
-  kAnNone,
-
-  // 57 IGET_CHAR vA, vB, field@CCCC
-  kAnNone,
-
-  // 58 IGET_SHORT vA, vB, field@CCCC
-  kAnNone,
-
-  // 59 IPUT vA, vB, field@CCCC
-  kAnNone,
-
-  // 5A IPUT_WIDE vA, vB, field@CCCC
-  kAnNone,
-
-  // 5B IPUT_OBJECT vA, vB, field@CCCC
-  kAnNone,
-
-  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
-  kAnNone,
-
-  // 5D IPUT_BYTE vA, vB, field@CCCC
-  kAnNone,
-
-  // 5E IPUT_CHAR vA, vB, field@CCCC
-  kAnNone,
-
-  // 5F IPUT_SHORT vA, vB, field@CCCC
-  kAnNone,
-
-  // 60 SGET vAA, field@BBBB
-  kAnNone,
-
-  // 61 SGET_WIDE vAA, field@BBBB
-  kAnNone,
-
-  // 62 SGET_OBJECT vAA, field@BBBB
-  kAnNone,
-
-  // 63 SGET_BOOLEAN vAA, field@BBBB
-  kAnNone,
-
-  // 64 SGET_BYTE vAA, field@BBBB
-  kAnNone,
-
-  // 65 SGET_CHAR vAA, field@BBBB
-  kAnNone,
-
-  // 66 SGET_SHORT vAA, field@BBBB
-  kAnNone,
-
-  // 67 SPUT vAA, field@BBBB
-  kAnNone,
-
-  // 68 SPUT_WIDE vAA, field@BBBB
-  kAnNone,
-
-  // 69 SPUT_OBJECT vAA, field@BBBB
-  kAnNone,
-
-  // 6A SPUT_BOOLEAN vAA, field@BBBB
-  kAnNone,
-
-  // 6B SPUT_BYTE vAA, field@BBBB
-  kAnNone,
-
-  // 6C SPUT_CHAR vAA, field@BBBB
-  kAnNone,
-
-  // 6D SPUT_SHORT vAA, field@BBBB
-  kAnNone,
-
-  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 73 RETURN_VOID_NO_BARRIER
-  kAnBranch,
-
-  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
-  kAnInvoke | kAnHeavyWeight,
-
-  // 79 UNUSED_79
-  kAnNone,
-
-  // 7A UNUSED_7A
-  kAnNone,
-
-  // 7B NEG_INT vA, vB
-  kAnMath | kAnInt,
-
-  // 7C NOT_INT vA, vB
-  kAnMath | kAnInt,
-
-  // 7D NEG_LONG vA, vB
-  kAnMath | kAnLong,
-
-  // 7E NOT_LONG vA, vB
-  kAnMath | kAnLong,
-
-  // 7F NEG_FLOAT vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // 80 NEG_DOUBLE vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // 81 INT_TO_LONG vA, vB
-  kAnMath | kAnInt | kAnLong,
-
-  // 82 INT_TO_FLOAT vA, vB
-  kAnMath | kAnFp | kAnInt | kAnSingle,
-
-  // 83 INT_TO_DOUBLE vA, vB
-  kAnMath | kAnFp | kAnInt | kAnDouble,
-
-  // 84 LONG_TO_INT vA, vB
-  kAnMath | kAnInt | kAnLong,
-
-  // 85 LONG_TO_FLOAT vA, vB
-  kAnMath | kAnFp | kAnLong | kAnSingle,
-
-  // 86 LONG_TO_DOUBLE vA, vB
-  kAnMath | kAnFp | kAnLong | kAnDouble,
-
-  // 87 FLOAT_TO_INT vA, vB
-  kAnMath | kAnFp | kAnInt | kAnSingle,
-
-  // 88 FLOAT_TO_LONG vA, vB
-  kAnMath | kAnFp | kAnLong | kAnSingle,
-
-  // 89 FLOAT_TO_DOUBLE vA, vB
-  kAnMath | kAnFp | kAnSingle | kAnDouble,
-
-  // 8A DOUBLE_TO_INT vA, vB
-  kAnMath | kAnFp | kAnInt | kAnDouble,
-
-  // 8B DOUBLE_TO_LONG vA, vB
-  kAnMath | kAnFp | kAnLong | kAnDouble,
-
-  // 8C DOUBLE_TO_FLOAT vA, vB
-  kAnMath | kAnFp | kAnSingle | kAnDouble,
-
-  // 8D INT_TO_BYTE vA, vB
-  kAnMath | kAnInt,
-
-  // 8E INT_TO_CHAR vA, vB
-  kAnMath | kAnInt,
-
-  // 8F INT_TO_SHORT vA, vB
-  kAnMath | kAnInt,
-
-  // 90 ADD_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 91 SUB_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 92 MUL_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 93 DIV_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 94 REM_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 95 AND_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 96 OR_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 97 XOR_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 98 SHL_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 99 SHR_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 9A USHR_INT vAA, vBB, vCC
-  kAnMath | kAnInt,
-
-  // 9B ADD_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // 9C SUB_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // 9D MUL_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // 9E DIV_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // 9F REM_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A0 AND_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A1 OR_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A2 XOR_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A3 SHL_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A4 SHR_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A5 USHR_LONG vAA, vBB, vCC
-  kAnMath | kAnLong,
-
-  // A6 ADD_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // A7 SUB_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // A8 MUL_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // A9 DIV_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // AA REM_FLOAT vAA, vBB, vCC
-  kAnMath | kAnFp | kAnSingle,
-
-  // AB ADD_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // AC SUB_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // AD MUL_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // AE DIV_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // AF REM_DOUBLE vAA, vBB, vCC
-  kAnMath | kAnFp | kAnDouble,
-
-  // B0 ADD_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B1 SUB_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B2 MUL_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B3 DIV_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B4 REM_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B5 AND_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B6 OR_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B7 XOR_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B8 SHL_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // B9 SHR_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // BA USHR_INT_2ADDR vA, vB
-  kAnMath | kAnInt,
-
-  // BB ADD_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // BC SUB_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // BD MUL_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // BE DIV_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // BF REM_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C0 AND_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C1 OR_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C2 XOR_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C3 SHL_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C4 SHR_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C5 USHR_LONG_2ADDR vA, vB
-  kAnMath | kAnLong,
-
-  // C6 ADD_FLOAT_2ADDR vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // C7 SUB_FLOAT_2ADDR vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // C8 MUL_FLOAT_2ADDR vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // C9 DIV_FLOAT_2ADDR vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // CA REM_FLOAT_2ADDR vA, vB
-  kAnMath | kAnFp | kAnSingle,
-
-  // CB ADD_DOUBLE_2ADDR vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // CC SUB_DOUBLE_2ADDR vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // CD MUL_DOUBLE_2ADDR vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // CE DIV_DOUBLE_2ADDR vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // CF REM_DOUBLE_2ADDR vA, vB
-  kAnMath | kAnFp | kAnDouble,
-
-  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D1 RSUB_INT vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D4 REM_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D5 AND_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D6 OR_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
-  kAnMath | kAnInt,
-
-  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DA MUL_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DB DIV_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DC REM_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DD AND_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DE OR_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // DF XOR_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
-  kAnMath | kAnInt,
-
-  // E3 IGET_QUICK
-  kAnNone,
-
-  // E4 IGET_WIDE_QUICK
-  kAnNone,
-
-  // E5 IGET_OBJECT_QUICK
-  kAnNone,
-
-  // E6 IPUT_QUICK
-  kAnNone,
-
-  // E7 IPUT_WIDE_QUICK
-  kAnNone,
-
-  // E8 IPUT_OBJECT_QUICK
-  kAnNone,
-
-  // E9 INVOKE_VIRTUAL_QUICK
-  kAnInvoke | kAnHeavyWeight,
-
-  // EA INVOKE_VIRTUAL_RANGE_QUICK
-  kAnInvoke | kAnHeavyWeight,
-
-  // EB IPUT_BOOLEAN_QUICK
-  kAnNone,
-
-  // EC IPUT_BYTE_QUICK
-  kAnNone,
-
-  // ED IPUT_CHAR_QUICK
-  kAnNone,
-
-  // EE IPUT_SHORT_QUICK
-  kAnNone,
-
-  // EF IGET_BOOLEAN_QUICK
-  kAnNone,
-
-  // F0 IGET_BYTE_QUICK
-  kAnNone,
-
-  // F1 IGET_CHAR_QUICK
-  kAnNone,
-
-  // F2 IGET_SHORT_QUICK
-  kAnNone,
-
-  // F3 UNUSED_F3
-  kAnNone,
-
-  // F4 UNUSED_F4
-  kAnNone,
-
-  // F5 UNUSED_F5
-  kAnNone,
-
-  // F6 UNUSED_F6
-  kAnNone,
-
-  // F7 UNUSED_F7
-  kAnNone,
-
-  // F8 UNUSED_F8
-  kAnNone,
-
-  // F9 UNUSED_F9
-  kAnNone,
-
-  // FA UNUSED_FA
-  kAnNone,
-
-  // FB UNUSED_FB
-  kAnNone,
-
-  // FC UNUSED_FC
-  kAnNone,
-
-  // FD UNUSED_FD
-  kAnNone,
-
-  // FE UNUSED_FE
-  kAnNone,
-
-  // FF UNUSED_FF
-  kAnNone,
-
-  // Beginning of extended MIR opcodes
-  // 100 MIR_PHI
-  kAnNone,
-
-  // 101 MIR_COPY
-  kAnNone,
-
-  // 102 MIR_FUSED_CMPL_FLOAT
-  kAnNone,
-
-  // 103 MIR_FUSED_CMPG_FLOAT
-  kAnNone,
-
-  // 104 MIR_FUSED_CMPL_DOUBLE
-  kAnNone,
-
-  // 105 MIR_FUSED_CMPG_DOUBLE
-  kAnNone,
-
-  // 106 MIR_FUSED_CMP_LONG
-  kAnNone,
-
-  // 107 MIR_NOP
-  kAnNone,
-
-  // 108 MIR_NULL_CHECK
-  kAnNone,
-
-  // 109 MIR_RANGE_CHECK
-  kAnNone,
-
-  // 10A MIR_DIV_ZERO_CHECK
-  kAnNone,
-
-  // 10B MIR_CHECK
-  kAnNone,
-
-  // 10C MIR_CHECKPART2
-  kAnNone,
-
-  // 10D MIR_SELECT
-  kAnNone,
-
-  // 10E MirOpConstVector
-  kAnNone,
-
-  // 10F MirOpMoveVector
-  kAnNone,
-
-  // 110 MirOpPackedMultiply
-  kAnNone,
-
-  // 111 MirOpPackedAddition
-  kAnNone,
-
-  // 112 MirOpPackedSubtract
-  kAnNone,
-
-  // 113 MirOpPackedShiftLeft
-  kAnNone,
-
-  // 114 MirOpPackedSignedShiftRight
-  kAnNone,
-
-  // 115 MirOpPackedUnsignedShiftRight
-  kAnNone,
-
-  // 116 MirOpPackedAnd
-  kAnNone,
-
-  // 117 MirOpPackedOr
-  kAnNone,
-
-  // 118 MirOpPackedXor
-  kAnNone,
-
-  // 119 MirOpPackedAddReduce
-  kAnNone,
-
-  // 11A MirOpPackedReduce
-  kAnNone,
-
-  // 11B MirOpPackedSet
-  kAnNone,
-
-  // 11C MirOpReserveVectorRegisters
-  kAnNone,
-
-  // 11D MirOpReturnVectorRegisters
-  kAnNone,
-
-  // 11E MirOpMemBarrier
-  kAnNone,
-
-  // 11F MirOpPackedArrayGet
-  kAnArrayOp,
-
-  // 120 MirOpPackedArrayPut
-  kAnArrayOp,
-};
-
-struct MethodStats {
-  int dex_instructions;
-  int math_ops;
-  int fp_ops;
-  int array_ops;
-  int branch_ops;
-  int heavyweight_ops;
-  bool has_computational_loop;
-  bool has_switch;
-  float math_ratio;
-  float fp_ratio;
-  float array_ratio;
-  float branch_ratio;
-  float heavyweight_ratio;
-};
-
-void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
-  if (bb->visited || (bb->block_type != kDalvikByteCode)) {
-    return;
-  }
-  bool computational_block = true;
-  bool has_math = false;
-  /*
-   * For the purposes of this scan, we want to treat the set of basic blocks broken
-   * by an exception edge as a single basic block.  We'll scan forward along the fallthrough
-   * edges until we reach an explicit branch or return.
-   */
-  BasicBlock* ending_bb = bb;
-  if (ending_bb->last_mir_insn != nullptr) {
-    uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
-    while ((ending_flags & kAnBranch) == 0) {
-      ending_bb = GetBasicBlock(ending_bb->fall_through);
-      ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
-    }
-  }
-  /*
-   * Ideally, we'd weight the operations by loop nesting level, but to do so we'd
-   * first need to do some expensive loop detection - and the point of this is to make
-   * an informed guess before investing in computation.  However, we can cheaply detect
-   * many simple loop forms without having to do full dataflow analysis.
-   */
-  int loop_scale_factor = 1;
-  // Simple for and while loops
-  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->fall_through == NullBasicBlockId)) {
-    if ((GetBasicBlock(ending_bb->taken)->taken == bb->id) ||
-        (GetBasicBlock(ending_bb->taken)->fall_through == bb->id)) {
-      loop_scale_factor = 25;
-    }
-  }
-  // Simple do-while loop
-  if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->taken == bb->id)) {
-    loop_scale_factor = 25;
-  }
-
-  BasicBlock* tbb = bb;
-  bool done = false;
-  while (!done) {
-    tbb->visited = true;
-    for (MIR* mir = tbb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
-        // Skip any MIR pseudo-op.
-        continue;
-      }
-      uint16_t flags = kAnalysisAttributes[mir->dalvikInsn.opcode];
-      stats->dex_instructions += loop_scale_factor;
-      if ((flags & kAnBranch) == 0) {
-        computational_block &= ((flags & kAnComputational) != 0);
-      } else {
-        stats->branch_ops += loop_scale_factor;
-      }
-      if ((flags & kAnMath) != 0) {
-        stats->math_ops += loop_scale_factor;
-        has_math = true;
-      }
-      if ((flags & kAnFp) != 0) {
-        stats->fp_ops += loop_scale_factor;
-      }
-      if ((flags & kAnArrayOp) != 0) {
-        stats->array_ops += loop_scale_factor;
-      }
-      if ((flags & kAnHeavyWeight) != 0) {
-        stats->heavyweight_ops += loop_scale_factor;
-      }
-      if ((flags & kAnSwitch) != 0) {
-        stats->has_switch = true;
-      }
-    }
-    if (tbb == ending_bb) {
-      done = true;
-    } else {
-      tbb = GetBasicBlock(tbb->fall_through);
-    }
-  }
-  if (has_math && computational_block && (loop_scale_factor > 1)) {
-    stats->has_computational_loop = true;
-  }
-}
-
-bool MIRGraph::ComputeSkipCompilation(MethodStats* stats, bool skip_default,
-                                      std::string* skip_message) {
-  float count = stats->dex_instructions;
-  stats->math_ratio = stats->math_ops / count;
-  stats->fp_ratio = stats->fp_ops / count;
-  stats->branch_ratio = stats->branch_ops / count;
-  stats->array_ratio = stats->array_ops / count;
-  stats->heavyweight_ratio = stats->heavyweight_ops / count;
-
-  if (cu_->enable_debug & (1 << kDebugShowFilterStats)) {
-    LOG(INFO) << "STATS " << stats->dex_instructions << ", math:"
-              << stats->math_ratio << ", fp:"
-              << stats->fp_ratio << ", br:"
-              << stats->branch_ratio << ", hw:"
-              << stats->heavyweight_ratio << ", arr:"
-              << stats->array_ratio << ", hot:"
-              << stats->has_computational_loop << ", "
-              << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-  }
-
-  // Computation intensive?
-  if (stats->has_computational_loop && (stats->heavyweight_ratio < 0.04)) {
-    return false;
-  }
-
-  // Complex, logic-intensive?
-  if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
-      stats->branch_ratio > 0.3) {
-    return false;
-  }
-
-  // Significant floating point?
-  if (stats->fp_ratio > 0.05) {
-    return false;
-  }
-
-  // Significant generic math?
-  if (stats->math_ratio > 0.3) {
-    return false;
-  }
-
-  // If array-intensive, compiling is probably worthwhile.
-  if (stats->array_ratio > 0.1) {
-    return false;
-  }
-
-  // Switch operations benefit greatly from compilation, so go ahead and spend the cycles.
-  if (stats->has_switch) {
-    return false;
-  }
-
-  // If significant in size and high proportion of expensive operations, skip.
-  if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
-      (stats->heavyweight_ratio > 0.3)) {
-    *skip_message = "Is a small method with heavyweight ratio " +
-                    std::to_string(stats->heavyweight_ratio);
-    return true;
-  }
-
-  return skip_default;
-}
-
- /*
-  * Will eventually want this to be a bit more sophisticated and happen at verification time.
-  */
-bool MIRGraph::SkipCompilation(std::string* skip_message) {
-  const CompilerOptions& compiler_options = cu_->compiler_driver->GetCompilerOptions();
-  CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
-  if (compiler_filter == CompilerOptions::kEverything) {
-    return false;
-  }
-
-  // Contains a pattern we don't want to compile?
-  if (PuntToInterpreter()) {
-    *skip_message = "Punt to interpreter set";
-    return true;
-  }
-
-  DCHECK(compiler_options.IsCompilationEnabled());
-
-  // Set up compilation cutoffs based on current filter mode.
-  size_t small_cutoff;
-  size_t default_cutoff;
-  switch (compiler_filter) {
-    case CompilerOptions::kBalanced:
-      small_cutoff = compiler_options.GetSmallMethodThreshold();
-      default_cutoff = compiler_options.GetLargeMethodThreshold();
-      break;
-    case CompilerOptions::kSpace:
-      small_cutoff = compiler_options.GetTinyMethodThreshold();
-      default_cutoff = compiler_options.GetSmallMethodThreshold();
-      break;
-    case CompilerOptions::kSpeed:
-    case CompilerOptions::kTime:
-      small_cutoff = compiler_options.GetHugeMethodThreshold();
-      default_cutoff = compiler_options.GetHugeMethodThreshold();
-      break;
-    default:
-      LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
-      UNREACHABLE();
-  }
-
-  // If size < cutoff, assume we'll compile - but allow removal.
-  bool skip_compilation = (GetNumDalvikInsns() >= default_cutoff);
-  if (skip_compilation) {
-    *skip_message = "#Insns >= default_cutoff: " + std::to_string(GetNumDalvikInsns());
-  }
-
-  /*
-   * Filter 1: Huge methods are likely to be machine generated, but some aren't.
-   * If huge, assume we won't compile, but allow futher analysis to turn it back on.
-   */
-  if (compiler_options.IsHugeMethod(GetNumDalvikInsns())) {
-    skip_compilation = true;
-    *skip_message = "Huge method: " + std::to_string(GetNumDalvikInsns());
-    // If we're got a huge number of basic blocks, don't bother with further analysis.
-    if (static_cast<size_t>(GetNumBlocks()) > (compiler_options.GetHugeMethodThreshold() / 2)) {
-      return true;
-    }
-  } else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
-    /* If it's large and contains no branches, it's likely to be machine generated initialization */
-      (GetBranchCount() == 0)) {
-    *skip_message = "Large method with no branches";
-    return true;
-  } else if (compiler_filter == CompilerOptions::kSpeed) {
-    // If not huge, compile.
-    return false;
-  }
-
-  // Filter 2: Skip class initializers.
-  if (((cu_->access_flags & kAccConstructor) != 0) && ((cu_->access_flags & kAccStatic) != 0)) {
-    *skip_message = "Class initializer";
-    return true;
-  }
-
-  // Filter 3: if this method is a special pattern, go ahead and emit the canned pattern.
-  if (cu_->compiler_driver->GetMethodInlinerMap() != nullptr &&
-      cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
-          ->IsSpecial(cu_->method_idx)) {
-    return false;
-  }
-
-  // Filter 4: if small, just compile.
-  if (GetNumDalvikInsns() < small_cutoff) {
-    return false;
-  }
-
-  // Analyze graph for:
-  //  o floating point computation
-  //  o basic blocks contained in loop with heavy arithmetic.
-  //  o proportion of conditional branches.
-
-  MethodStats stats;
-  memset(&stats, 0, sizeof(stats));
-
-  ClearAllVisitedFlags();
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    AnalyzeBlock(bb, &stats);
-  }
-
-  return ComputeSkipCompilation(&stats, skip_compilation, skip_message);
-}
-
-void MIRGraph::DoCacheFieldLoweringInfo() {
-  static constexpr uint32_t kFieldIndexFlagQuickened = 0x80000000;
-  // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
-  const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 2u;
-  ScopedArenaAllocator allocator(&cu_->arena_stack);
-  auto* field_idxs = allocator.AllocArray<uint32_t>(max_refs, kArenaAllocMisc);
-  DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(
-      max_refs, kArenaAllocMisc);
-  // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
-  size_t ifield_pos = 0u;
-  size_t sfield_pos = max_refs;
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    if (bb->block_type != kDalvikByteCode) {
-      continue;
-    }
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      // Get field index and try to find it among existing indexes. If found, it's usually among
-      // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
-      // is a linear search, it actually performs much better than map based approach.
-      const bool is_iget_or_iput = IsInstructionIGetOrIPut(mir->dalvikInsn.opcode);
-      const bool is_iget_or_iput_quick = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode);
-      if (is_iget_or_iput || is_iget_or_iput_quick) {
-        uint32_t field_idx;
-        DexMemAccessType access_type;
-        if (is_iget_or_iput) {
-          field_idx = mir->dalvikInsn.vC;
-          access_type = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
-        } else {
-          DCHECK(is_iget_or_iput_quick);
-          // Set kFieldIndexFlagQuickened so that we don't deduplicate against non quickened field
-          // indexes.
-          field_idx = mir->offset | kFieldIndexFlagQuickened;
-          access_type = IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode);
-        }
-        size_t i = ifield_pos;
-        while (i != 0u && field_idxs[i - 1] != field_idx) {
-          --i;
-        }
-        if (i != 0u) {
-          mir->meta.ifield_lowering_info = i - 1;
-          DCHECK_EQ(field_types[i - 1], access_type);
-        } else {
-          mir->meta.ifield_lowering_info = ifield_pos;
-          field_idxs[ifield_pos] = field_idx;
-          field_types[ifield_pos] = access_type;
-          ++ifield_pos;
-        }
-      } else if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
-        auto field_idx = mir->dalvikInsn.vB;
-        size_t i = sfield_pos;
-        while (i != max_refs && field_idxs[i] != field_idx) {
-          ++i;
-        }
-        if (i != max_refs) {
-          mir->meta.sfield_lowering_info = max_refs - i - 1u;
-          DCHECK_EQ(field_types[i], SGetOrSPutMemAccessType(mir->dalvikInsn.opcode));
-        } else {
-          mir->meta.sfield_lowering_info = max_refs - sfield_pos;
-          --sfield_pos;
-          field_idxs[sfield_pos] = field_idx;
-          field_types[sfield_pos] = SGetOrSPutMemAccessType(mir->dalvikInsn.opcode);
-        }
-      }
-      DCHECK_LE(ifield_pos, sfield_pos);
-    }
-  }
-
-  if (ifield_pos != 0u) {
-    // Resolve instance field infos.
-    DCHECK_EQ(ifield_lowering_infos_.size(), 0u);
-    ifield_lowering_infos_.reserve(ifield_pos);
-    for (size_t pos = 0u; pos != ifield_pos; ++pos) {
-      const uint32_t field_idx = field_idxs[pos];
-      const bool is_quickened = (field_idx & kFieldIndexFlagQuickened) != 0;
-      const uint32_t masked_field_idx = field_idx & ~kFieldIndexFlagQuickened;
-      CHECK_LT(masked_field_idx, 1u << 16);
-      ifield_lowering_infos_.push_back(
-          MirIFieldLoweringInfo(masked_field_idx, field_types[pos], is_quickened));
-    }
-    ScopedObjectAccess soa(Thread::Current());
-    MirIFieldLoweringInfo::Resolve(soa,
-                                   cu_->compiler_driver,
-                                   GetCurrentDexCompilationUnit(),
-                                   ifield_lowering_infos_.data(),
-                                   ifield_pos);
-  }
-
-  if (sfield_pos != max_refs) {
-    // Resolve static field infos.
-    DCHECK_EQ(sfield_lowering_infos_.size(), 0u);
-    sfield_lowering_infos_.reserve(max_refs - sfield_pos);
-    for (size_t pos = max_refs; pos != sfield_pos;) {
-      --pos;
-      sfield_lowering_infos_.push_back(MirSFieldLoweringInfo(field_idxs[pos], field_types[pos]));
-    }
-    MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
-                                   sfield_lowering_infos_.data(), max_refs - sfield_pos);
-  }
-}
-
-void MIRGraph::DoCacheMethodLoweringInfo() {
-  static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
-  static constexpr uint32_t kMethodIdxFlagQuickened = 0x80000000;
-
-  // Embed the map value in the entry to avoid extra padding in 64-bit builds.
-  struct MapEntry {
-    // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
-    const MethodReference* devirt_target;
-    uint32_t target_method_idx;
-    uint32_t vtable_idx;
-    uint16_t invoke_type;
-    // Map value.
-    uint32_t lowering_info_index;
-  };
-
-  struct MapEntryComparator {
-    bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
-      if (lhs.target_method_idx != rhs.target_method_idx) {
-        return lhs.target_method_idx < rhs.target_method_idx;
-      }
-      if (lhs.invoke_type != rhs.invoke_type) {
-        return lhs.invoke_type < rhs.invoke_type;
-      }
-      if (lhs.vtable_idx != rhs.vtable_idx) {
-        return lhs.vtable_idx < rhs.vtable_idx;
-      }
-      if (lhs.devirt_target != rhs.devirt_target) {
-        if (lhs.devirt_target == nullptr) {
-          return true;
-        }
-        if (rhs.devirt_target == nullptr) {
-          return false;
-        }
-        return devirt_cmp(*lhs.devirt_target, *rhs.devirt_target);
-      }
-      return false;
-    }
-    MethodReferenceComparator devirt_cmp;
-  };
-
-  ScopedArenaAllocator allocator(&cu_->arena_stack);
-
-  // All INVOKE instructions take 3 code units and there must also be a RETURN.
-  const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
-
-  // Map invoke key (see MapEntry) to lowering info index and vice versa.
-  // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
-  // multi_index_container with one ordered index and one sequential index.
-  ScopedArenaSet<MapEntry, MapEntryComparator> invoke_map(MapEntryComparator(),
-                                                          allocator.Adapter());
-  const MapEntry** sequential_entries =
-      allocator.AllocArray<const MapEntry*>(max_refs, kArenaAllocMisc);
-
-  // Find INVOKE insns and their devirtualization targets.
-  const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    if (bb->block_type != kDalvikByteCode) {
-      continue;
-    }
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      const bool is_quick_invoke = IsInstructionQuickInvoke(mir->dalvikInsn.opcode);
-      const bool is_invoke = IsInstructionInvoke(mir->dalvikInsn.opcode);
-      if (is_quick_invoke || is_invoke) {
-        uint32_t vtable_index = 0;
-        uint32_t target_method_idx = 0;
-        uint32_t invoke_type_idx = 0;  // Default to virtual (in case of quickened).
-        DCHECK_EQ(invoke_types[invoke_type_idx], kVirtual);
-        if (is_quick_invoke) {
-          // We need to store the vtable index since we can't necessarily recreate it at resolve
-          // phase if the dequickening resolved to an interface method.
-          vtable_index = mir->dalvikInsn.vB;
-          // Fake up the method index by storing the mir offset so that we can read the dequicken
-          // info in resolve.
-          target_method_idx = mir->offset | kMethodIdxFlagQuickened;
-        } else {
-          DCHECK(is_invoke);
-          // Decode target method index and invoke type.
-          invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
-          target_method_idx = mir->dalvikInsn.vB;
-        }
-        // Find devirtualization target.
-        // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
-        // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
-        // and increment it as needed instead of making O(log n) lookups.
-        const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
-        // Try to insert a new entry. If the insertion fails, we will have found an old one.
-        MapEntry entry = {
-            devirt_target,
-            target_method_idx,
-            vtable_index,
-            invoke_types[invoke_type_idx],
-            static_cast<uint32_t>(invoke_map.size())
-        };
-        auto it = invoke_map.insert(entry).first;  // Iterator to either the old or the new entry.
-        mir->meta.method_lowering_info = it->lowering_info_index;
-        // If we didn't actually insert, this will just overwrite an existing value with the same.
-        sequential_entries[it->lowering_info_index] = &*it;
-      }
-    }
-  }
-  if (invoke_map.empty()) {
-    return;
-  }
-  // Prepare unique method infos, set method info indexes for their MIRs.
-  const size_t count = invoke_map.size();
-  method_lowering_infos_.reserve(count);
-  for (size_t pos = 0u; pos != count; ++pos) {
-    const MapEntry* entry = sequential_entries[pos];
-    const bool is_quick = (entry->target_method_idx & kMethodIdxFlagQuickened) != 0;
-    const uint32_t masked_method_idx = entry->target_method_idx & ~kMethodIdxFlagQuickened;
-    MirMethodLoweringInfo method_info(masked_method_idx,
-                                      static_cast<InvokeType>(entry->invoke_type), is_quick);
-    if (entry->devirt_target != nullptr) {
-      method_info.SetDevirtualizationTarget(*entry->devirt_target);
-    }
-    if (is_quick) {
-      method_info.SetVTableIndex(entry->vtable_idx);
-    }
-    method_lowering_infos_.push_back(method_info);
-  }
-  MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
-                                 method_lowering_infos_.data(), count);
-}
-
-}  // namespace art
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
deleted file mode 100644
index f1cc5fc..0000000
--- a/compiler/dex/mir_dataflow.cc
+++ /dev/null
@@ -1,1453 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "local_value_numbering.h"
-#include "dataflow_iterator-inl.h"
-
-namespace art {
-
-/*
- * Main table containing data flow attributes for each bytecode. The
- * first kNumPackedOpcodes entries are for Dalvik bytecode
- * instructions, where extended opcode at the MIR level are appended
- * afterwards.
- *
- * TODO - many optimization flags are incomplete - they will only limit the
- * scope of optimizations but will not cause mis-optimizations.
- */
-const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
-  // 00 NOP
-  DF_NOP,
-
-  // 01 MOVE vA, vB
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 02 MOVE_FROM16 vAA, vBBBB
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 03 MOVE_16 vAAAA, vBBBB
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 04 MOVE_WIDE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
-  // 05 MOVE_WIDE_FROM16 vAA, vBBBB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
-  // 06 MOVE_WIDE_16 vAAAA, vBBBB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
-  // 07 MOVE_OBJECT vA, vB
-  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
-  // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
-  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
-  // 09 MOVE_OBJECT_16 vAAAA, vBBBB
-  DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
-  // 0A MOVE_RESULT vAA
-  DF_DA,
-
-  // 0B MOVE_RESULT_WIDE vAA
-  DF_DA | DF_A_WIDE,
-
-  // 0C MOVE_RESULT_OBJECT vAA
-  DF_DA | DF_REF_A,
-
-  // 0D MOVE_EXCEPTION vAA
-  DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
-  // 0E RETURN_VOID
-  DF_NOP,
-
-  // 0F RETURN vAA
-  DF_UA,
-
-  // 10 RETURN_WIDE vAA
-  DF_UA | DF_A_WIDE,
-
-  // 11 RETURN_OBJECT vAA
-  DF_UA | DF_REF_A,
-
-  // 12 CONST_4 vA, #+B
-  DF_DA | DF_SETS_CONST,
-
-  // 13 CONST_16 vAA, #+BBBB
-  DF_DA | DF_SETS_CONST,
-
-  // 14 CONST vAA, #+BBBBBBBB
-  DF_DA | DF_SETS_CONST,
-
-  // 15 CONST_HIGH16 VAA, #+BBBB0000
-  DF_DA | DF_SETS_CONST,
-
-  // 16 CONST_WIDE_16 vAA, #+BBBB
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
-  DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
-  // 1A CONST_STRING vAA, string@BBBB
-  DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
-  // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
-  DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
-  // 1C CONST_CLASS vAA, type@BBBB
-  DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
-  // 1D MONITOR_ENTER vAA
-  DF_UA | DF_NULL_CHK_A | DF_REF_A,
-
-  // 1E MONITOR_EXIT vAA
-  DF_UA | DF_NULL_CHK_A | DF_REF_A,
-
-  // 1F CHK_CAST vAA, type@BBBB
-  DF_UA | DF_REF_A | DF_CHK_CAST | DF_UMS,
-
-  // 20 INSTANCE_OF vA, vB, type@CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
-
-  // 21 ARRAY_LENGTH vA, vB
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_CORE_A | DF_REF_B,
-
-  // 22 NEW_INSTANCE vAA, type@BBBB
-  DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
-
-  // 23 NEW_ARRAY vA, vB, type@CCCC
-  DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
-
-  // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
-
-  // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
-  DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
-
-  // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
-  DF_UA | DF_REF_A | DF_UMS,
-
-  // 27 THROW vAA
-  DF_UA | DF_REF_A | DF_UMS,
-
-  // 28 GOTO
-  DF_NOP,
-
-  // 29 GOTO_16
-  DF_NOP,
-
-  // 2A GOTO_32
-  DF_NOP,
-
-  // 2B PACKED_SWITCH vAA, +BBBBBBBB
-  DF_UA | DF_CORE_A,
-
-  // 2C SPARSE_SWITCH vAA, +BBBBBBBB
-  DF_UA | DF_CORE_A,
-
-  // 2D CMPL_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 2E CMPG_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 2F CMPL_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 30 CMPG_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
-
-  // 31 CMP_LONG vAA, vBB, vCC
-  DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 32 IF_EQ vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 33 IF_NE vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 34 IF_LT vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 35 IF_GE vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 36 IF_GT vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 37 IF_LE vA, vB, +CCCC
-  DF_UA | DF_UB | DF_SAME_TYPE_AB,
-
-  // 38 IF_EQZ vAA, +BBBB
-  DF_UA,
-
-  // 39 IF_NEZ vAA, +BBBB
-  DF_UA,
-
-  // 3A IF_LTZ vAA, +BBBB
-  DF_UA,
-
-  // 3B IF_GEZ vAA, +BBBB
-  DF_UA,
-
-  // 3C IF_GTZ vAA, +BBBB
-  DF_UA,
-
-  // 3D IF_LEZ vAA, +BBBB
-  DF_UA,
-
-  // 3E UNUSED_3E
-  DF_NOP,
-
-  // 3F UNUSED_3F
-  DF_NOP,
-
-  // 40 UNUSED_40
-  DF_NOP,
-
-  // 41 UNUSED_41
-  DF_NOP,
-
-  // 42 UNUSED_42
-  DF_NOP,
-
-  // 43 UNUSED_43
-  DF_NOP,
-
-  // 44 AGET vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 45 AGET_WIDE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 46 AGET_OBJECT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 47 AGET_BOOLEAN vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 48 AGET_BYTE vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 49 AGET_CHAR vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4A AGET_SHORT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4B APUT vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4C APUT_WIDE vAA, vBB, vCC
-  DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4D APUT_OBJECT vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4E APUT_BOOLEAN vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 4F APUT_BYTE vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 50 APUT_CHAR vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 51 APUT_SHORT vAA, vBB, vCC
-  DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 52 IGET vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 53 IGET_WIDE vA, vB, field@CCCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 54 IGET_OBJECT vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 55 IGET_BOOLEAN vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 56 IGET_BYTE vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 57 IGET_CHAR vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 58 IGET_SHORT vA, vB, field@CCCC
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 59 IPUT vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5A IPUT_WIDE vA, vB, field@CCCC
-  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5B IPUT_OBJECT vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5C IPUT_BOOLEAN vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5D IPUT_BYTE vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5E IPUT_CHAR vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 5F IPUT_SHORT vA, vB, field@CCCC
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // 60 SGET vAA, field@BBBB
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 61 SGET_WIDE vAA, field@BBBB
-  DF_DA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 62 SGET_OBJECT vAA, field@BBBB
-  DF_DA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 63 SGET_BOOLEAN vAA, field@BBBB
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 64 SGET_BYTE vAA, field@BBBB
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 65 SGET_CHAR vAA, field@BBBB
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 66 SGET_SHORT vAA, field@BBBB
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 67 SPUT vAA, field@BBBB
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 68 SPUT_WIDE vAA, field@BBBB
-  DF_UA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 69 SPUT_OBJECT vAA, field@BBBB
-  DF_UA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 6A SPUT_BOOLEAN vAA, field@BBBB
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 6B SPUT_BYTE vAA, field@BBBB
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 6C SPUT_CHAR vAA, field@BBBB
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 6D SPUT_SHORT vAA, field@BBBB
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_CLINIT | DF_UMS,
-
-  // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 73 RETURN_VOID_NO_BARRIER
-  DF_NOP,
-
-  // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_CLINIT | DF_UMS,
-
-  // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // 79 UNUSED_79
-  DF_NOP,
-
-  // 7A UNUSED_7A
-  DF_NOP,
-
-  // 7B NEG_INT vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 7C NOT_INT vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 7D NEG_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 7E NOT_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 7F NEG_FLOAT vA, vB
-  DF_DA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 80 NEG_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 81 INT_TO_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 82 INT_TO_FLOAT vA, vB
-  DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
-
-  // 83 INT_TO_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
-
-  // 84 LONG_TO_INT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 85 LONG_TO_FLOAT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
-
-  // 86 LONG_TO_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
-
-  // 87 FLOAT_TO_INT vA, vB
-  DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
-
-  // 88 FLOAT_TO_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
-
-  // 89 FLOAT_TO_DOUBLE vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 8A DOUBLE_TO_INT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
-
-  // 8B DOUBLE_TO_LONG vA, vB
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
-
-  // 8C DOUBLE_TO_FLOAT vA, vB
-  DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 8D INT_TO_BYTE vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 8E INT_TO_CHAR vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 8F INT_TO_SHORT vA, vB
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // 90 ADD_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 91 SUB_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 92 MUL_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 93 DIV_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 94 REM_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 95 AND_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 96 OR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 97 XOR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 98 SHL_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 99 SHR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9A USHR_INT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9B ADD_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9C SUB_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9D MUL_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9E DIV_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // 9F REM_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A0 AND_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A1 OR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A2 XOR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A3 SHL_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A4 SHR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A5 USHR_LONG vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
-  // A6 ADD_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // A7 SUB_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // A8 MUL_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // A9 DIV_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AA REM_FLOAT vAA, vBB, vCC
-  DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AB ADD_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AC SUB_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AD MUL_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AE DIV_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // AF REM_DOUBLE vAA, vBB, vCC
-  DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
-  // B0 ADD_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B1 SUB_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B2 MUL_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B3 DIV_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B4 REM_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B5 AND_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B6 OR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B7 XOR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B8 SHL_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // B9 SHR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // BA USHR_INT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // BB ADD_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BC SUB_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BD MUL_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BE DIV_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // BF REM_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C0 AND_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C1 OR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C2 XOR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // C3 SHL_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // C4 SHR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // C5 USHR_LONG_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // C6 ADD_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // C7 SUB_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // C8 MUL_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // C9 DIV_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // CA REM_FLOAT_2ADDR vA, vB
-  DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // CB ADD_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CC SUB_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CD MUL_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CE DIV_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // CF REM_DOUBLE_2ADDR vA, vB
-  DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // D0 ADD_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D1 RSUB_INT vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D2 MUL_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D3 DIV_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D4 REM_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D5 AND_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D6 OR_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D7 XOR_INT_LIT16 vA, vB, #+CCCC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D8 ADD_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DA MUL_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DB DIV_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DC REM_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DD AND_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DE OR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // DF XOR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E0 SHL_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E1 SHR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E2 USHR_INT_LIT8 vAA, vBB, #+CC
-  DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
-  // E3 IGET_QUICK
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E4 IGET_WIDE_QUICK
-  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E5 IGET_OBJECT_QUICK
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E6 IPUT_QUICK
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E7 IPUT_WIDE_QUICK
-  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E8 IPUT_OBJECT_QUICK
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E9 INVOKE_VIRTUAL_QUICK
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // EA INVOKE_VIRTUAL_RANGE_QUICK
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // EB IPUT_BOOLEAN_QUICK vA, vB, index
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // EC IPUT_BYTE_QUICK vA, vB, index
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // ED IPUT_CHAR_QUICK vA, vB, index
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // EE IPUT_SHORT_QUICK vA, vB, index
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // EF IGET_BOOLEAN_QUICK vA, vB, index
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // F0 IGET_BYTE_QUICK vA, vB, index
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // F1 IGET_CHAR_QUICK vA, vB, index
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // F2 IGET_SHORT_QUICK vA, vB, index
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // F3 UNUSED_F3
-  DF_NOP,
-
-  // F4 UNUSED_F4
-  DF_NOP,
-
-  // F5 UNUSED_F5
-  DF_NOP,
-
-  // F6 UNUSED_F6
-  DF_NOP,
-
-  // F7 UNUSED_F7
-  DF_NOP,
-
-  // F8 UNUSED_F8
-  DF_NOP,
-
-  // F9 UNUSED_F9
-  DF_NOP,
-
-  // FA UNUSED_FA
-  DF_NOP,
-
-  // FB UNUSED_FB
-  DF_NOP,
-
-  // FC UNUSED_FC
-  DF_NOP,
-
-  // FD UNUSED_FD
-  DF_NOP,
-
-  // FE UNUSED_FE
-  DF_NOP,
-
-  // FF UNUSED_FF
-  DF_NOP,
-
-  // Beginning of extended MIR opcodes
-  // 100 MIR_PHI
-  DF_DA | DF_NULL_TRANSFER_N,
-
-  // 101 MIR_COPY
-  DF_DA | DF_UB | DF_IS_MOVE,
-
-  // 102 MIR_FUSED_CMPL_FLOAT
-  DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 103 MIR_FUSED_CMPG_FLOAT
-  DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
-  // 104 MIR_FUSED_CMPL_DOUBLE
-  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 105 MIR_FUSED_CMPG_DOUBLE
-  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
-  // 106 MIR_FUSED_CMP_LONG
-  DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
-  // 107 MIR_NOP
-  DF_NOP,
-
-  // 108 MIR_NULL_CHECK
-  DF_UA | DF_REF_A | DF_NULL_CHK_A | DF_LVN,
-
-  // 109 MIR_RANGE_CHECK
-  0,
-
-  // 10A MIR_DIV_ZERO_CHECK
-  0,
-
-  // 10B MIR_CHECK
-  0,
-
-  // 10D MIR_SELECT
-  DF_DA | DF_UB,
-
-  // 10E MirOpConstVector
-  0,
-
-  // 10F MirOpMoveVector
-  0,
-
-  // 110 MirOpPackedMultiply
-  0,
-
-  // 111 MirOpPackedAddition
-  0,
-
-  // 112 MirOpPackedSubtract
-  0,
-
-  // 113 MirOpPackedShiftLeft
-  0,
-
-  // 114 MirOpPackedSignedShiftRight
-  0,
-
-  // 115 MirOpPackedUnsignedShiftRight
-  0,
-
-  // 116 MirOpPackedAnd
-  0,
-
-  // 117 MirOpPackedOr
-  0,
-
-  // 118 MirOpPackedXor
-  0,
-
-  // 119 MirOpPackedAddReduce
-  DF_FORMAT_EXTENDED,
-
-  // 11A MirOpPackedReduce
-  DF_FORMAT_EXTENDED,
-
-  // 11B MirOpPackedSet
-  DF_FORMAT_EXTENDED,
-
-  // 11C MirOpReserveVectorRegisters
-  0,
-
-  // 11D MirOpReturnVectorRegisters
-  0,
-
-  // 11E MirOpMemBarrier
-  0,
-
-  // 11F MirOpPackedArrayGet
-  DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 120 MirOpPackedArrayPut
-  DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
-
-  // 121 MirOpMaddInt
-  DF_FORMAT_EXTENDED,
-
-  // 122 MirOpMsubInt
-  DF_FORMAT_EXTENDED,
-
-  // 123 MirOpMaddLong
-  DF_FORMAT_EXTENDED,
-
-  // 124 MirOpMsubLong
-  DF_FORMAT_EXTENDED,
-};
-
-/* Any register that is used before being defined is considered live-in */
-void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
-                               ArenaBitVector* live_in_v, int dalvik_reg_id) {
-  use_v->SetBit(dalvik_reg_id);
-  if (!def_v->IsBitSet(dalvik_reg_id)) {
-    live_in_v->SetBit(dalvik_reg_id);
-  }
-}
-
-/* Mark a reg as being defined */
-void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) {
-  def_v->SetBit(dalvik_reg_id);
-}
-
-void MIRGraph::HandleExtended(ArenaBitVector* use_v, ArenaBitVector* def_v,
-                              ArenaBitVector* live_in_v,
-                              const MIR::DecodedInstruction& d_insn) {
-  // For vector MIRs, vC contains type information
-  bool is_vector_type_wide = false;
-  int type_size = d_insn.vC >> 16;
-  if (type_size == k64 || type_size == kDouble) {
-    is_vector_type_wide = true;
-  }
-
-  switch (static_cast<int>(d_insn.opcode)) {
-    case kMirOpPackedAddReduce:
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vA);
-      if (is_vector_type_wide == true) {
-        HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vA + 1);
-      }
-      HandleDef(def_v, d_insn.vA);
-      if (is_vector_type_wide == true) {
-        HandleDef(def_v, d_insn.vA + 1);
-      }
-      break;
-    case kMirOpPackedReduce:
-      HandleDef(def_v, d_insn.vA);
-      if (is_vector_type_wide == true) {
-        HandleDef(def_v, d_insn.vA + 1);
-      }
-      break;
-    case kMirOpPackedSet:
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
-      if (is_vector_type_wide == true) {
-        HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB + 1);
-      }
-      break;
-    case kMirOpMaddInt:
-    case kMirOpMsubInt:
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC);
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0]);
-      HandleDef(def_v, d_insn.vA);
-      break;
-    case kMirOpMaddLong:
-    case kMirOpMsubLong:
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB + 1);
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC);
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC + 1);
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0]);
-      HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0] + 1);
-      HandleDef(def_v, d_insn.vA);
-      HandleDef(def_v, d_insn.vA + 1);
-      break;
-    default:
-      LOG(ERROR) << "Unexpected Extended Opcode " << d_insn.opcode;
-      break;
-  }
-}
-
-/*
- * Find out live-in variables for natural loops. Variables that are live-in in
- * the main loop body are considered to be defined in the entry block.
- */
-bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
-  MIR* mir;
-  ArenaBitVector *use_v, *def_v, *live_in_v;
-
-  if (bb->data_flow_info == nullptr) return false;
-
-  use_v = bb->data_flow_info->use_v =
-      new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false);
-  def_v = bb->data_flow_info->def_v =
-      new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false);
-  live_in_v = bb->data_flow_info->live_in_v =
-      new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false);
-
-  for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    uint64_t df_attributes = GetDataFlowAttributes(mir);
-    MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
-
-    if (df_attributes & DF_HAS_USES) {
-      if (df_attributes & DF_UA) {
-        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vA);
-        if (df_attributes & DF_A_WIDE) {
-          HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vA+1);
-        }
-      }
-      if (df_attributes & DF_UB) {
-        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vB);
-        if (df_attributes & DF_B_WIDE) {
-          HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vB+1);
-        }
-      }
-      if (df_attributes & DF_UC) {
-        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC);
-        if (df_attributes & DF_C_WIDE) {
-          HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC+1);
-        }
-      }
-    }
-    if (df_attributes & DF_FORMAT_35C) {
-      for (unsigned int i = 0; i < d_insn->vA; i++) {
-        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->arg[i]);
-      }
-    }
-    if (df_attributes & DF_FORMAT_3RC) {
-      for (unsigned int i = 0; i < d_insn->vA; i++) {
-        HandleLiveInUse(use_v, def_v, live_in_v, d_insn->vC+i);
-      }
-    }
-    if (df_attributes & DF_HAS_DEFS) {
-      HandleDef(def_v, d_insn->vA);
-      if (df_attributes & DF_A_WIDE) {
-        HandleDef(def_v, d_insn->vA+1);
-      }
-    }
-    if (df_attributes & DF_FORMAT_EXTENDED) {
-      HandleExtended(use_v, def_v, live_in_v, mir->dalvikInsn);
-    }
-  }
-  return true;
-}
-
-int MIRGraph::AddNewSReg(int v_reg) {
-  int subscript = ++ssa_last_defs_[v_reg];
-  uint32_t ssa_reg = GetNumSSARegs();
-  SetNumSSARegs(ssa_reg + 1);
-  ssa_base_vregs_.push_back(v_reg);
-  ssa_subscripts_.push_back(subscript);
-  DCHECK_EQ(ssa_base_vregs_.size(), ssa_subscripts_.size());
-  // If we are expanding very late, update use counts too.
-  if (ssa_reg > 0 && use_counts_.size() == ssa_reg) {
-    // Need to expand the counts.
-    use_counts_.push_back(0);
-    raw_use_counts_.push_back(0);
-  }
-  return ssa_reg;
-}
-
-/* Find out the latest SSA register for a given Dalvik register */
-void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index) {
-  DCHECK((dalvik_reg >= 0) && (dalvik_reg < static_cast<int>(GetNumOfCodeAndTempVRs())));
-  uses[reg_index] = vreg_to_ssa_map_[dalvik_reg];
-}
-
-/* Setup a new SSA register for a given Dalvik register */
-void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) {
-  DCHECK((dalvik_reg >= 0) && (dalvik_reg < static_cast<int>(GetNumOfCodeAndTempVRs())));
-  int ssa_reg = AddNewSReg(dalvik_reg);
-  vreg_to_ssa_map_[dalvik_reg] = ssa_reg;
-  defs[reg_index] = ssa_reg;
-}
-
-void MIRGraph::AllocateSSAUseData(MIR *mir, int num_uses) {
-  mir->ssa_rep->num_uses = num_uses;
-
-  if (mir->ssa_rep->num_uses_allocated < num_uses) {
-    mir->ssa_rep->uses = arena_->AllocArray<int32_t>(num_uses, kArenaAllocDFInfo);
-  }
-}
-
-void MIRGraph::AllocateSSADefData(MIR *mir, int num_defs) {
-  mir->ssa_rep->num_defs = num_defs;
-
-  if (mir->ssa_rep->num_defs_allocated < num_defs) {
-    mir->ssa_rep->defs = arena_->AllocArray<int32_t>(num_defs, kArenaAllocDFInfo);
-  }
-}
-
-/* Look up new SSA names for format_35c instructions */
-void MIRGraph::DataFlowSSAFormat35C(MIR* mir) {
-  MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
-  int num_uses = d_insn->vA;
-  int i;
-
-  AllocateSSAUseData(mir, num_uses);
-
-  for (i = 0; i < num_uses; i++) {
-    HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
-  }
-}
-
-/* Look up new SSA names for format_3rc instructions */
-void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) {
-  MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
-  int num_uses = d_insn->vA;
-  int i;
-
-  AllocateSSAUseData(mir, num_uses);
-
-  for (i = 0; i < num_uses; i++) {
-    HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
-  }
-}
-
-void MIRGraph::DataFlowSSAFormatExtended(MIR* mir) {
-  const MIR::DecodedInstruction& d_insn = mir->dalvikInsn;
-  // For vector MIRs, vC contains type information
-  bool is_vector_type_wide = false;
-  int type_size = d_insn.vC >> 16;
-  if (type_size == k64 || type_size == kDouble) {
-    is_vector_type_wide = true;
-  }
-
-  switch (static_cast<int>(mir->dalvikInsn.opcode)) {
-    case kMirOpPackedAddReduce:
-      // We have one use, plus one more for wide
-      AllocateSSAUseData(mir, is_vector_type_wide ? 2 : 1);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.vA, 0);
-      if (is_vector_type_wide == true) {
-        HandleSSAUse(mir->ssa_rep->uses, d_insn.vA + 1, 1);
-      }
-
-      // We have a def, plus one more for wide
-      AllocateSSADefData(mir, is_vector_type_wide ? 2 : 1);
-      HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
-      if (is_vector_type_wide == true) {
-        HandleSSADef(mir->ssa_rep->defs, d_insn.vA + 1, 1);
-      }
-      break;
-    case kMirOpPackedReduce:
-      // We have a def, plus one more for wide
-      AllocateSSADefData(mir, is_vector_type_wide ? 2 : 1);
-      HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
-      if (is_vector_type_wide == true) {
-        HandleSSADef(mir->ssa_rep->defs, d_insn.vA + 1, 1);
-      }
-      break;
-    case kMirOpPackedSet:
-      // We have one use, plus one more for wide
-      AllocateSSAUseData(mir, is_vector_type_wide ? 2 : 1);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
-      if (is_vector_type_wide == true) {
-        HandleSSAUse(mir->ssa_rep->uses, d_insn.vB + 1, 1);
-      }
-      break;
-    case kMirOpMaddInt:
-    case kMirOpMsubInt:
-      AllocateSSAUseData(mir, 3);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.vC, 1);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0], 2);
-      AllocateSSADefData(mir, 1);
-      HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
-      break;
-    case kMirOpMaddLong:
-    case kMirOpMsubLong:
-      AllocateSSAUseData(mir, 6);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.vB + 1, 1);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.vC, 2);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.vC + 1, 3);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0], 4);
-      HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0] + 1, 5);
-      AllocateSSADefData(mir, 2);
-      HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
-      HandleSSADef(mir->ssa_rep->defs, d_insn.vA + 1, 1);
-      break;
-    default:
-      LOG(ERROR) << "Missing case for extended MIR: " << mir->dalvikInsn.opcode;
-      break;
-  }
-}
-
-/* Entry function to convert a block into SSA representation */
-bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
-  if (bb->data_flow_info == nullptr) return false;
-
-  /*
-   * Pruned SSA form: Insert phi nodes for each dalvik register marked in phi_node_blocks
-   * only if the dalvik register is in the live-in set.
-   */
-  BasicBlockId bb_id = bb->id;
-  for (int dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
-    if (temp_.ssa.phi_node_blocks[dalvik_reg]->IsBitSet(bb_id)) {
-      if (!bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) {
-        /* Variable will be clobbered before being used - no need for phi */
-        vreg_to_ssa_map_[dalvik_reg] = INVALID_SREG;
-        continue;
-      }
-      MIR *phi = NewMIR();
-      phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
-      phi->dalvikInsn.vA = dalvik_reg;
-      phi->offset = bb->start_offset;
-      phi->m_unit_index = 0;  // Arbitrarily assign all Phi nodes to outermost method.
-      bb->PrependMIR(phi);
-    }
-  }
-
-  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    mir->ssa_rep =
-        static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
-                                                              kArenaAllocDFInfo));
-    memset(mir->ssa_rep, 0, sizeof(*mir->ssa_rep));
-
-    uint64_t df_attributes = GetDataFlowAttributes(mir);
-
-      // If not a pseudo-op, note non-leaf or can throw
-    if (!MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
-      int flags = mir->dalvikInsn.FlagsOf();
-
-      if ((flags & Instruction::kInvoke) != 0) {
-        attributes_ &= ~METHOD_IS_LEAF;
-      }
-    }
-
-    int num_uses = 0;
-
-    if (df_attributes & DF_FORMAT_35C) {
-      DataFlowSSAFormat35C(mir);
-      continue;
-    }
-
-    if (df_attributes & DF_FORMAT_3RC) {
-      DataFlowSSAFormat3RC(mir);
-      continue;
-    }
-
-    if (df_attributes & DF_FORMAT_EXTENDED) {
-      DataFlowSSAFormatExtended(mir);
-      continue;
-    }
-
-    if (df_attributes & DF_HAS_USES) {
-      if (df_attributes & DF_UA) {
-        num_uses++;
-        if (df_attributes & DF_A_WIDE) {
-          num_uses++;
-        }
-      }
-      if (df_attributes & DF_UB) {
-        num_uses++;
-        if (df_attributes & DF_B_WIDE) {
-          num_uses++;
-        }
-      }
-      if (df_attributes & DF_UC) {
-        num_uses++;
-        if (df_attributes & DF_C_WIDE) {
-          num_uses++;
-        }
-      }
-    }
-
-    AllocateSSAUseData(mir, num_uses);
-
-    int num_defs = 0;
-
-    if (df_attributes & DF_HAS_DEFS) {
-      num_defs++;
-      if (df_attributes & DF_A_WIDE) {
-        num_defs++;
-      }
-    }
-
-    AllocateSSADefData(mir, num_defs);
-
-    MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
-
-    if (df_attributes & DF_HAS_USES) {
-      num_uses = 0;
-      if (df_attributes & DF_UA) {
-        HandleSSAUse(mir->ssa_rep->uses, d_insn->vA, num_uses++);
-        if (df_attributes & DF_A_WIDE) {
-          HandleSSAUse(mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
-        }
-      }
-      if (df_attributes & DF_UB) {
-        HandleSSAUse(mir->ssa_rep->uses, d_insn->vB, num_uses++);
-        if (df_attributes & DF_B_WIDE) {
-          HandleSSAUse(mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
-        }
-      }
-      if (df_attributes & DF_UC) {
-        HandleSSAUse(mir->ssa_rep->uses, d_insn->vC, num_uses++);
-        if (df_attributes & DF_C_WIDE) {
-          HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
-        }
-      }
-    }
-    if (df_attributes & DF_HAS_DEFS) {
-      HandleSSADef(mir->ssa_rep->defs, d_insn->vA, 0);
-      if (df_attributes & DF_A_WIDE) {
-        HandleSSADef(mir->ssa_rep->defs, d_insn->vA+1, 1);
-      }
-    }
-  }
-
-  /*
-   * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
-   * input to PHI nodes can be derived from the snapshot of all
-   * predecessor blocks.
-   */
-  bb->data_flow_info->vreg_to_ssa_map_exit =
-      arena_->AllocArray<int32_t>(GetNumOfCodeAndTempVRs(), kArenaAllocDFInfo);
-
-  memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, vreg_to_ssa_map_,
-         sizeof(int) * GetNumOfCodeAndTempVRs());
-  return true;
-}
-
-void MIRGraph::InitializeBasicBlockDataFlow() {
-  /*
-   * Allocate the BasicBlockDataFlow structure for the entry and code blocks.
-   */
-  for (BasicBlock* bb : block_list_) {
-    if (bb->hidden == true) continue;
-    if (bb->block_type == kDalvikByteCode ||
-        bb->block_type == kEntryBlock ||
-        bb->block_type == kExitBlock) {
-      bb->data_flow_info =
-          static_cast<BasicBlockDataFlow*>(arena_->Alloc(sizeof(BasicBlockDataFlow),
-                                                         kArenaAllocDFInfo));
-      }
-  }
-}
-
-/* Setup the basic data structures for SSA conversion */
-void MIRGraph::CompilerInitializeSSAConversion() {
-  size_t num_reg = GetNumOfCodeAndTempVRs();
-
-  ssa_base_vregs_.clear();
-  ssa_base_vregs_.reserve(num_reg + GetDefCount() + 128);
-  ssa_subscripts_.clear();
-  ssa_subscripts_.reserve(num_reg + GetDefCount() + 128);
-
-  /*
-   * Initial number of SSA registers is equal to the number of Dalvik
-   * registers.
-   */
-  SetNumSSARegs(num_reg);
-
-  /*
-   * Initialize the SSA2Dalvik map list. For the first num_reg elements,
-   * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
-   * into "(0 << 16) | i"
-   */
-  for (unsigned int i = 0; i < num_reg; i++) {
-    ssa_base_vregs_.push_back(i);
-    ssa_subscripts_.push_back(0);
-  }
-
-  /*
-   * Initialize the DalvikToSSAMap map. There is one entry for each
-   * Dalvik register, and the SSA names for those are the same.
-   */
-  vreg_to_ssa_map_ = arena_->AllocArray<int32_t>(num_reg, kArenaAllocDFInfo);
-  /* Keep track of the higest def for each dalvik reg */
-  ssa_last_defs_ = arena_->AllocArray<int>(num_reg, kArenaAllocDFInfo);
-
-  for (unsigned int i = 0; i < num_reg; i++) {
-    vreg_to_ssa_map_[i] = i;
-    ssa_last_defs_[i] = 0;
-  }
-
-  // Create a compiler temporary for Method*. This is done after SSA initialization.
-  CompilerTemp* method_temp = GetNewCompilerTemp(kCompilerTempSpecialMethodPtr, false);
-  // The MIR graph keeps track of the sreg for method pointer specially, so record that now.
-  method_sreg_ = method_temp->s_reg_low;
-
-  InitializeBasicBlockDataFlow();
-}
-
-uint32_t MIRGraph::GetUseCountWeight(BasicBlock* bb) const {
-  // Each level of nesting adds *100 to count, up to 3 levels deep.
-  uint32_t depth = std::min(3U, static_cast<uint32_t>(bb->nesting_depth));
-  uint32_t weight = std::max(1U, depth * 100);
-  return weight;
-}
-
-/*
- * Count uses, weighting by loop nesting depth.  This code only
- * counts explicitly used s_regs.  A later phase will add implicit
- * counts for things such as Method*, null-checked references, etc.
- */
-void MIRGraph::CountUses(BasicBlock* bb) {
-  if (bb->block_type != kDalvikByteCode) {
-    return;
-  }
-  uint32_t weight = GetUseCountWeight(bb);
-  for (MIR* mir = bb->first_mir_insn; (mir != nullptr); mir = mir->next) {
-    if (mir->ssa_rep == nullptr) {
-      continue;
-    }
-    for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
-      int s_reg = mir->ssa_rep->uses[i];
-      raw_use_counts_[s_reg] += 1u;
-      use_counts_[s_reg] += weight;
-    }
-  }
-}
-
-/* Verify if all the successor is connected with all the claimed predecessors */
-bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
-  for (BasicBlockId pred_id : bb->predecessors) {
-    BasicBlock* pred_bb = GetBasicBlock(pred_id);
-    DCHECK(pred_bb != nullptr);
-    bool found = false;
-    if (pred_bb->taken == bb->id) {
-        found = true;
-    } else if (pred_bb->fall_through == bb->id) {
-        found = true;
-    } else if (pred_bb->successor_block_list_type != kNotUsed) {
-      for (SuccessorBlockInfo* successor_block_info : pred_bb->successor_blocks) {
-        BasicBlockId succ_bb = successor_block_info->block;
-        if (succ_bb == bb->id) {
-            found = true;
-            break;
-        }
-      }
-    }
-    if (found == false) {
-      char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
-      GetBlockName(bb, block_name1);
-      GetBlockName(pred_bb, block_name2);
-      DumpCFG("/sdcard/cfg/", false);
-      LOG(FATAL) << "Successor " << block_name1 << " not found from "
-                 << block_name2;
-    }
-  }
-  return true;
-}
-
-void MIRGraph::VerifyDataflow() {
-    /* Verify if all blocks are connected as claimed */
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    VerifyPredInfo(bb);
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
deleted file mode 100644
index 13bbc3e..0000000
--- a/compiler/dex/mir_field_info.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mir_field_info.h"
-
-#include <string.h>
-
-#include "base/logging.h"
-#include "dex/verified_method.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_driver-inl.h"
-#include "mirror/class_loader.h"  // Only to allow casts in Handle<ClassLoader>.
-#include "mirror/dex_cache.h"     // Only to allow casts in Handle<DexCache>.
-#include "scoped_thread_state_change.h"
-#include "handle_scope-inl.h"
-
-namespace art {
-
-void MirIFieldLoweringInfo::Resolve(const ScopedObjectAccess& soa,
-                                    CompilerDriver* compiler_driver,
-                                    const DexCompilationUnit* mUnit,
-                                    MirIFieldLoweringInfo* field_infos, size_t count) {
-  if (kIsDebugBuild) {
-    DCHECK(field_infos != nullptr);
-    DCHECK_NE(count, 0u);
-    for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
-      MirIFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType(), it->IsQuickened());
-      unresolved.field_offset_ = it->field_offset_;
-      unresolved.CheckEquals(*it);
-    }
-  }
-
-  // We're going to resolve fields and check access in a tight loop. It's better to hold
-  // the lock and needed references once than re-acquiring them again and again.
-  StackHandleScope<3> hs(soa.Self());
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
-  Handle<mirror::Class> referrer_class(hs.NewHandle(
-      compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
-  const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod();
-  // Even if the referrer class is unresolved (i.e. we're compiling a method without class
-  // definition) we still want to resolve fields and record all available info.
-  for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
-    uint32_t field_idx;
-    ArtField* resolved_field;
-    if (!it->IsQuickened()) {
-      field_idx = it->field_idx_;
-      resolved_field = compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit,
-                                                     field_idx, false);
-    } else {
-      const auto mir_offset = it->field_idx_;
-      // For quickened instructions, it->field_offset_ actually contains the mir offset.
-      // We need to use the de-quickening info to get dex file / field idx
-      auto* field_idx_ptr = verified_method->GetDequickenIndex(mir_offset);
-      CHECK(field_idx_ptr != nullptr);
-      field_idx = field_idx_ptr->index;
-      StackHandleScope<1> hs2(soa.Self());
-      auto h_dex_cache = hs2.NewHandle(compiler_driver->FindDexCache(field_idx_ptr->dex_file));
-      resolved_field = compiler_driver->ResolveFieldWithDexFile(
-          soa, h_dex_cache, class_loader, field_idx_ptr->dex_file, field_idx, false);
-      // Since we don't have a valid field index we can't go slow path later.
-      CHECK(resolved_field != nullptr);
-    }
-    if (UNLIKELY(resolved_field == nullptr)) {
-      continue;
-    }
-    compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
-        &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
-    bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field);
-    it->field_offset_ = compiler_driver->GetFieldOffset(resolved_field);
-    std::pair<bool, bool> fast_path = compiler_driver->IsFastInstanceField(
-        dex_cache.Get(), referrer_class.Get(), resolved_field, field_idx);
-    it->flags_ = 0u |  // Without kFlagIsStatic.
-        (it->flags_ & (kMemAccessTypeMask << kBitMemAccessTypeBegin)) |
-        (is_volatile ? kFlagIsVolatile : 0u) |
-        (fast_path.first ? kFlagFastGet : 0u) |
-        (fast_path.second ? kFlagFastPut : 0u);
-  }
-}
-
-void MirSFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
-                                    const DexCompilationUnit* mUnit,
-                                    MirSFieldLoweringInfo* field_infos, size_t count) {
-  if (kIsDebugBuild) {
-    DCHECK(field_infos != nullptr);
-    DCHECK_NE(count, 0u);
-    for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
-      MirSFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType());
-      // In 64-bit builds, there's padding after storage_index_, don't include it in memcmp.
-      size_t size = OFFSETOF_MEMBER(MirSFieldLoweringInfo, storage_index_) +
-          sizeof(it->storage_index_);
-      DCHECK_EQ(memcmp(&unresolved, &*it, size), 0);
-    }
-  }
-
-  // We're going to resolve fields and check access in a tight loop. It's better to hold
-  // the lock and needed references once than re-acquiring them again and again.
-  ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<3> hs(soa.Self());
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
-  Handle<mirror::Class> referrer_class_handle(hs.NewHandle(
-      compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
-  // Even if the referrer class is unresolved (i.e. we're compiling a method without class
-  // definition) we still want to resolve fields and record all available info.
-
-  for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
-    uint32_t field_idx = it->field_idx_;
-    ArtField* resolved_field =
-        compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, true);
-    if (UNLIKELY(resolved_field == nullptr)) {
-      continue;
-    }
-    compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
-        &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
-    bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field) ? 1u : 0u;
-
-    mirror::Class* referrer_class = referrer_class_handle.Get();
-    std::pair<bool, bool> fast_path = compiler_driver->IsFastStaticField(
-        dex_cache.Get(), referrer_class, resolved_field, field_idx, &it->storage_index_);
-    uint16_t flags = kFlagIsStatic |
-        (it->flags_ & (kMemAccessTypeMask << kBitMemAccessTypeBegin)) |
-        (is_volatile ? kFlagIsVolatile : 0u) |
-        (fast_path.first ? kFlagFastGet : 0u) |
-        (fast_path.second ? kFlagFastPut : 0u);
-    if (fast_path.first) {
-      it->field_offset_ = compiler_driver->GetFieldOffset(resolved_field);
-      bool is_referrers_class =
-          compiler_driver->IsStaticFieldInReferrerClass(referrer_class, resolved_field);
-      bool is_class_initialized =
-          compiler_driver->IsStaticFieldsClassInitialized(referrer_class, resolved_field);
-      bool is_class_in_dex_cache = !is_referrers_class &&  // If referrer's class, we don't care.
-          compiler_driver->CanAssumeTypeIsPresentInDexCache(*dex_cache->GetDexFile(),
-                                                            it->storage_index_);
-      flags |= (is_referrers_class ? kFlagIsReferrersClass : 0u) |
-          (is_class_initialized ? kFlagClassIsInitialized : 0u) |
-          (is_class_in_dex_cache ? kFlagClassIsInDexCache : 0u);
-    }
-    it->flags_ = flags;
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
deleted file mode 100644
index b6dc27d..0000000
--- a/compiler/dex/mir_field_info.h
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_MIR_FIELD_INFO_H_
-#define ART_COMPILER_DEX_MIR_FIELD_INFO_H_
-
-#include "base/macros.h"
-#include "dex_file.h"
-#include "dex_instruction_utils.h"
-#include "offsets.h"
-
-namespace art {
-
-class CompilerDriver;
-class DexCompilationUnit;
-class ScopedObjectAccess;
-
-/*
- * Field info is calculated from the perspective of the compilation unit that accesses
- * the field and stored in that unit's MIRGraph. Therefore it does not need to reference the
- * dex file or method for which it has been calculated. However, we do store the declaring
- * field index, class index and dex file of the resolved field to help distinguish between fields.
- */
-
-class MirFieldInfo {
- public:
-  uint16_t FieldIndex() const {
-    return field_idx_;
-  }
-  void SetFieldIndex(uint16_t field_idx) {
-    field_idx_ = field_idx;
-  }
-
-  bool IsStatic() const {
-    return (flags_ & kFlagIsStatic) != 0u;
-  }
-
-  bool IsResolved() const {
-    return declaring_dex_file_ != nullptr;
-  }
-
-  const DexFile* DeclaringDexFile() const {
-    return declaring_dex_file_;
-  }
-  void SetDeclaringDexFile(const DexFile* dex_file) {
-    declaring_dex_file_ = dex_file;
-  }
-
-  uint16_t DeclaringClassIndex() const {
-    return declaring_class_idx_;
-  }
-
-  uint16_t DeclaringFieldIndex() const {
-    return declaring_field_idx_;
-  }
-
-  bool IsVolatile() const {
-    return (flags_ & kFlagIsVolatile) != 0u;
-  }
-
-  // IGET_QUICK, IGET_BYTE_QUICK, ...
-  bool IsQuickened() const {
-    return (flags_ & kFlagIsQuickened) != 0u;
-  }
-
-  DexMemAccessType MemAccessType() const {
-    return static_cast<DexMemAccessType>((flags_ >> kBitMemAccessTypeBegin) & kMemAccessTypeMask);
-  }
-
-  void CheckEquals(const MirFieldInfo& other) const {
-    CHECK_EQ(field_idx_, other.field_idx_);
-    CHECK_EQ(flags_, other.flags_);
-    CHECK_EQ(declaring_field_idx_, other.declaring_field_idx_);
-    CHECK_EQ(declaring_class_idx_, other.declaring_class_idx_);
-    CHECK_EQ(declaring_dex_file_, other.declaring_dex_file_);
-  }
-
- protected:
-  enum {
-    kBitIsStatic = 0,
-    kBitIsVolatile,
-    kBitIsQuickened,
-    kBitMemAccessTypeBegin,
-    kBitMemAccessTypeEnd = kBitMemAccessTypeBegin + 3,  // 3 bits for raw type.
-    kFieldInfoBitEnd = kBitMemAccessTypeEnd
-  };
-  static constexpr uint16_t kFlagIsVolatile = 1u << kBitIsVolatile;
-  static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
-  static constexpr uint16_t kFlagIsQuickened = 1u << kBitIsQuickened;
-  static constexpr uint16_t kMemAccessTypeMask = 7u;
-  static_assert((1u << (kBitMemAccessTypeEnd - kBitMemAccessTypeBegin)) - 1u == kMemAccessTypeMask,
-                "Invalid raw type mask");
-
-  MirFieldInfo(uint16_t field_idx, uint16_t flags, DexMemAccessType type)
-      : field_idx_(field_idx),
-        flags_(flags | static_cast<uint16_t>(type) << kBitMemAccessTypeBegin),
-        declaring_field_idx_(0u),
-        declaring_class_idx_(0u),
-        declaring_dex_file_(nullptr) {
-  }
-
-  // Make copy-ctor/assign/dtor protected to avoid slicing.
-  MirFieldInfo(const MirFieldInfo& other) = default;
-  MirFieldInfo& operator=(const MirFieldInfo& other) = default;
-  ~MirFieldInfo() = default;
-
-  // The field index in the compiling method's dex file.
-  uint16_t field_idx_;
-  // Flags, for volatility and derived class data.
-  uint16_t flags_;
-  // The field index in the dex file that defines field, 0 if unresolved.
-  uint16_t declaring_field_idx_;
-  // The type index of the class declaring the field, 0 if unresolved.
-  uint16_t declaring_class_idx_;
-  // The dex file that defines the class containing the field and the field, null if unresolved.
-  const DexFile* declaring_dex_file_;
-};
-
-class MirIFieldLoweringInfo : public MirFieldInfo {
- public:
-  // For each requested instance field retrieve the field's declaring location (dex file, class
-  // index and field index) and volatility and compute whether we can fast path the access
-  // with IGET/IPUT. For fast path fields, retrieve the field offset.
-  static void Resolve(const ScopedObjectAccess& soa,
-                      CompilerDriver* compiler_driver,
-                      const DexCompilationUnit* mUnit,
-                      MirIFieldLoweringInfo* field_infos,
-                      size_t count)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Construct an unresolved instance field lowering info.
-  MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened)
-      : MirFieldInfo(field_idx,
-                     kFlagIsVolatile | (is_quickened ? kFlagIsQuickened : 0u),
-                     type),  // Without kFlagIsStatic.
-        field_offset_(0u) {
-  }
-
-  bool FastGet() const {
-    return (flags_ & kFlagFastGet) != 0u;
-  }
-
-  bool FastPut() const {
-    return (flags_ & kFlagFastPut) != 0u;
-  }
-
-  MemberOffset FieldOffset() const {
-    return field_offset_;
-  }
-
-  void CheckEquals(const MirIFieldLoweringInfo& other) const {
-    MirFieldInfo::CheckEquals(other);
-    CHECK_EQ(field_offset_.Uint32Value(), other.field_offset_.Uint32Value());
-  }
-
- private:
-  enum {
-    kBitFastGet = kFieldInfoBitEnd,
-    kBitFastPut,
-    kIFieldLoweringInfoBitEnd
-  };
-  static_assert(kIFieldLoweringInfoBitEnd <= 16, "Too many flags");
-  static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
-  static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
-
-  // The member offset of the field, 0u if unresolved.
-  MemberOffset field_offset_;
-
-  friend class NullCheckEliminationTest;
-  friend class GlobalValueNumberingTest;
-  friend class GvnDeadCodeEliminationTest;
-  friend class LocalValueNumberingTest;
-  friend class TypeInferenceTest;
-};
-
-class MirSFieldLoweringInfo : public MirFieldInfo {
- public:
-  // For each requested static field retrieve the field's declaring location (dex file, class
-  // index and field index) and volatility and compute whether we can fast path the access with
-  // IGET/IPUT. For fast path fields (at least for IGET), retrieve the information needed for
-  // the field access, i.e. the field offset, whether the field is in the same class as the
-  // method being compiled, whether the declaring class can be safely assumed to be initialized
-  // and the type index of the declaring class in the compiled method's dex file.
-  static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
-                      MirSFieldLoweringInfo* field_infos, size_t count)
-      REQUIRES(!Locks::mutator_lock_);
-
-  // Construct an unresolved static field lowering info.
-  MirSFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type)
-      : MirFieldInfo(field_idx, kFlagIsVolatile | kFlagIsStatic, type),
-        field_offset_(0u),
-        storage_index_(DexFile::kDexNoIndex) {
-  }
-
-  bool FastGet() const {
-    return (flags_ & kFlagFastGet) != 0u;
-  }
-
-  bool FastPut() const {
-    return (flags_ & kFlagFastPut) != 0u;
-  }
-
-  bool IsReferrersClass() const {
-    return (flags_ & kFlagIsReferrersClass) != 0u;
-  }
-
-  bool IsClassInitialized() const {
-    return (flags_ & kFlagClassIsInitialized) != 0u;
-  }
-
-  bool IsClassInDexCache() const {
-    return (flags_ & kFlagClassIsInDexCache) != 0u;
-  }
-
-  MemberOffset FieldOffset() const {
-    return field_offset_;
-  }
-
-  uint32_t StorageIndex() const {
-    return storage_index_;
-  }
-
- private:
-  enum {
-    kBitFastGet = kFieldInfoBitEnd,
-    kBitFastPut,
-    kBitIsReferrersClass,
-    kBitClassIsInitialized,
-    kBitClassIsInDexCache,
-    kSFieldLoweringInfoBitEnd
-  };
-  static_assert(kSFieldLoweringInfoBitEnd <= 16, "Too many flags");
-  static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
-  static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
-  static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
-  static constexpr uint16_t kFlagClassIsInitialized = 1u << kBitClassIsInitialized;
-  static constexpr uint16_t kFlagClassIsInDexCache = 1u << kBitClassIsInDexCache;
-
-  // The member offset of the field, 0u if unresolved.
-  MemberOffset field_offset_;
-  // The type index of the declaring class in the compiling method's dex file,
-  // -1 if the field is unresolved or there's no appropriate TypeId in that dex file.
-  uint32_t storage_index_;
-
-  friend class ClassInitCheckEliminationTest;
-  friend class GlobalValueNumberingTest;
-  friend class GvnDeadCodeEliminationTest;
-  friend class LocalValueNumberingTest;
-  friend class TypeInferenceTest;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_MIR_FIELD_INFO_H_
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
deleted file mode 100644
index 6dc148d..0000000
--- a/compiler/dex/mir_graph.cc
+++ /dev/null
@@ -1,2589 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mir_graph.h"
-
-#include <inttypes.h>
-#include <queue>
-#include <unistd.h>
-
-#include "base/bit_vector-inl.h"
-#include "base/logging.h"
-#include "base/stl_util.h"
-#include "base/stringprintf.h"
-#include "base/scoped_arena_containers.h"
-#include "compiler_ir.h"
-#include "dex_file-inl.h"
-#include "dex_flags.h"
-#include "dex_instruction-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "dex/quick/quick_compiler.h"
-#include "leb128.h"
-#include "pass_driver_me_post_opt.h"
-#include "stack.h"
-#include "utils.h"
-
-namespace art {
-
-#define MAX_PATTERN_LEN 5
-
-const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
-  "Phi",
-  "Copy",
-  "FusedCmplFloat",
-  "FusedCmpgFloat",
-  "FusedCmplDouble",
-  "FusedCmpgDouble",
-  "FusedCmpLong",
-  "Nop",
-  "OpNullCheck",
-  "OpRangeCheck",
-  "OpDivZeroCheck",
-  "Check",
-  "Select",
-  "ConstVector",
-  "MoveVector",
-  "PackedMultiply",
-  "PackedAddition",
-  "PackedSubtract",
-  "PackedShiftLeft",
-  "PackedSignedShiftRight",
-  "PackedUnsignedShiftRight",
-  "PackedAnd",
-  "PackedOr",
-  "PackedXor",
-  "PackedAddReduce",
-  "PackedReduce",
-  "PackedSet",
-  "ReserveVectorRegisters",
-  "ReturnVectorRegisters",
-  "MemBarrier",
-  "PackedArrayGet",
-  "PackedArrayPut",
-  "MaddInt",
-  "MsubInt",
-  "MaddLong",
-  "MsubLong",
-};
-
-MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
-    : reg_location_(nullptr),
-      block_id_map_(std::less<unsigned int>(), arena->Adapter()),
-      cu_(cu),
-      ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
-      ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
-      vreg_to_ssa_map_(nullptr),
-      ssa_last_defs_(nullptr),
-      is_constant_v_(nullptr),
-      constant_values_(nullptr),
-      use_counts_(arena->Adapter()),
-      raw_use_counts_(arena->Adapter()),
-      num_reachable_blocks_(0),
-      max_num_reachable_blocks_(0),
-      dfs_orders_up_to_date_(false),
-      domination_up_to_date_(false),
-      mir_ssa_rep_up_to_date_(false),
-      topological_order_up_to_date_(false),
-      dfs_order_(arena->Adapter(kArenaAllocDfsPreOrder)),
-      dfs_post_order_(arena->Adapter(kArenaAllocDfsPostOrder)),
-      dom_post_order_traversal_(arena->Adapter(kArenaAllocDomPostOrder)),
-      topological_order_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
-      topological_order_loop_ends_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
-      topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
-      topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
-      max_nested_loops_(0u),
-      i_dom_list_(nullptr),
-      temp_scoped_alloc_(),
-      block_list_(arena->Adapter(kArenaAllocBBList)),
-      try_block_addr_(nullptr),
-      entry_block_(nullptr),
-      exit_block_(nullptr),
-      current_code_item_(nullptr),
-      m_units_(arena->Adapter()),
-      method_stack_(arena->Adapter()),
-      current_method_(kInvalidEntry),
-      current_offset_(kInvalidEntry),
-      def_count_(0),
-      opcode_count_(nullptr),
-      num_ssa_regs_(0),
-      extended_basic_blocks_(arena->Adapter()),
-      method_sreg_(0),
-      attributes_(METHOD_IS_LEAF),  // Start with leaf assumption, change on encountering invoke.
-      checkstats_(nullptr),
-      arena_(arena),
-      backward_branches_(0),
-      forward_branches_(0),
-      num_non_special_compiler_temps_(0),
-      max_available_special_compiler_temps_(1),  // We only need the method ptr as a special temp for now.
-      requested_backend_temp_(false),
-      compiler_temps_committed_(false),
-      punt_to_interpreter_(false),
-      merged_df_flags_(0u),
-      ifield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
-      sfield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
-      method_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
-      suspend_checks_in_loops_(nullptr) {
-  memset(&temp_, 0, sizeof(temp_));
-  use_counts_.reserve(256);
-  raw_use_counts_.reserve(256);
-  block_list_.reserve(100);
-  try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
-
-
-  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-    // X86 requires a temp to keep track of the method address.
-    // TODO For x86_64, addressing can be done with RIP. When that is implemented,
-    // this needs to be updated to reserve 0 temps for BE.
-    max_available_non_special_compiler_temps_ = cu_->target64 ? 2 : 1;
-    reserved_temps_for_backend_ = max_available_non_special_compiler_temps_;
-  } else {
-    // Other architectures do not have a known lower bound for non-special temps.
-    // We allow the update of the max to happen at BE initialization stage and simply set 0 for now.
-    max_available_non_special_compiler_temps_ = 0;
-    reserved_temps_for_backend_ = 0;
-  }
-}
-
-MIRGraph::~MIRGraph() {
-  STLDeleteElements(&block_list_);
-  STLDeleteElements(&m_units_);
-}
-
-/*
- * Parse an instruction, return the length of the instruction
- */
-int MIRGraph::ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction) {
-  const Instruction* inst = Instruction::At(code_ptr);
-  decoded_instruction->opcode = inst->Opcode();
-  decoded_instruction->vA = inst->HasVRegA() ? inst->VRegA() : 0;
-  decoded_instruction->vB = inst->HasVRegB() ? inst->VRegB() : 0;
-  decoded_instruction->vB_wide = inst->HasWideVRegB() ? inst->WideVRegB() : 0;
-  decoded_instruction->vC = inst->HasVRegC() ?  inst->VRegC() : 0;
-  if (inst->HasVarArgs35c()) {
-    inst->GetVarArgs(decoded_instruction->arg);
-  }
-  return inst->SizeInCodeUnits();
-}
-
-
-/* Split an existing block from the specified code offset into two */
-BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
-                                 BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
-  DCHECK_GT(code_offset, orig_block->start_offset);
-  MIR* insn = orig_block->first_mir_insn;
-  MIR* prev = nullptr;  // Will be set to instruction before split.
-  while (insn) {
-    if (insn->offset == code_offset) break;
-    prev = insn;
-    insn = insn->next;
-  }
-  if (insn == nullptr) {
-    LOG(FATAL) << "Break split failed";
-  }
-  // Now insn is at the instruction where we want to split, namely
-  // insn will be the first instruction of the "bottom" block.
-  // Similarly, prev will be the last instruction of the "top" block
-
-  BasicBlock* bottom_block = CreateNewBB(kDalvikByteCode);
-
-  bottom_block->start_offset = code_offset;
-  bottom_block->first_mir_insn = insn;
-  bottom_block->last_mir_insn = orig_block->last_mir_insn;
-
-  /* If this block was terminated by a return, conditional branch or throw,
-   * the flag needs to go with the bottom block
-   */
-  bottom_block->terminated_by_return = orig_block->terminated_by_return;
-  orig_block->terminated_by_return = false;
-
-  bottom_block->conditional_branch = orig_block->conditional_branch;
-  orig_block->conditional_branch = false;
-
-  bottom_block->explicit_throw = orig_block->explicit_throw;
-  orig_block->explicit_throw = false;
-
-  /* Handle the taken path */
-  bottom_block->taken = orig_block->taken;
-  if (bottom_block->taken != NullBasicBlockId) {
-    orig_block->taken = NullBasicBlockId;
-    BasicBlock* bb_taken = GetBasicBlock(bottom_block->taken);
-    bb_taken->ErasePredecessor(orig_block->id);
-    bb_taken->predecessors.push_back(bottom_block->id);
-  }
-
-  /* Handle the fallthrough path */
-  bottom_block->fall_through = orig_block->fall_through;
-  orig_block->fall_through = bottom_block->id;
-  bottom_block->predecessors.push_back(orig_block->id);
-  if (bottom_block->fall_through != NullBasicBlockId) {
-    BasicBlock* bb_fall_through = GetBasicBlock(bottom_block->fall_through);
-    bb_fall_through->ErasePredecessor(orig_block->id);
-    bb_fall_through->predecessors.push_back(bottom_block->id);
-  }
-
-  /* Handle the successor list */
-  if (orig_block->successor_block_list_type != kNotUsed) {
-    bottom_block->successor_block_list_type = orig_block->successor_block_list_type;
-    bottom_block->successor_blocks.swap(orig_block->successor_blocks);
-    orig_block->successor_block_list_type = kNotUsed;
-    DCHECK(orig_block->successor_blocks.empty());  // Empty after the swap() above.
-    for (SuccessorBlockInfo* successor_block_info : bottom_block->successor_blocks) {
-      BasicBlock* bb = GetBasicBlock(successor_block_info->block);
-      if (bb != nullptr) {
-        bb->ErasePredecessor(orig_block->id);
-        bb->predecessors.push_back(bottom_block->id);
-      }
-    }
-  }
-
-  orig_block->last_mir_insn = prev;
-  prev->next = nullptr;
-
-  /*
-   * Update the immediate predecessor block pointer so that outgoing edges
-   * can be applied to the proper block.
-   */
-  if (immed_pred_block_p) {
-    DCHECK_EQ(*immed_pred_block_p, orig_block);
-    *immed_pred_block_p = bottom_block;
-  }
-
-  // Associate dex instructions in the bottom block with the new container.
-  DCHECK(insn != nullptr);
-  DCHECK(insn != orig_block->first_mir_insn);
-  DCHECK(insn == bottom_block->first_mir_insn);
-  DCHECK_EQ(insn->offset, bottom_block->start_offset);
-  // Scan the "bottom" instructions, remapping them to the
-  // newly created "bottom" block.
-  MIR* p = insn;
-  p->bb = bottom_block->id;
-  while (p != bottom_block->last_mir_insn) {
-    p = p->next;
-    DCHECK(p != nullptr);
-    p->bb = bottom_block->id;
-  }
-
-  return bottom_block;
-}
-
-/*
- * Given a code offset, find out the block that starts with it. If the offset
- * is in the middle of an existing block, split it into two.  If immed_pred_block_p
- * is not non-null and is the block being split, update *immed_pred_block_p to
- * point to the bottom block so that outgoing edges can be set up properly
- * (by the caller)
- * Utilizes a map for fast lookup of the typical cases.
- */
-BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create,
-                                BasicBlock** immed_pred_block_p,
-                                ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
-  if (UNLIKELY(code_offset >= current_code_item_->insns_size_in_code_units_)) {
-    // There can be a fall-through out of the method code. We shall record such a block
-    // here (assuming create==true) and check that it's dead at the end of InlineMethod().
-    // Though we're only aware of the cases where code_offset is exactly the same as
-    // insns_size_in_code_units_, treat greater code_offset the same just in case.
-    code_offset = current_code_item_->insns_size_in_code_units_;
-  }
-
-  int block_id = (*dex_pc_to_block_map)[code_offset];
-  BasicBlock* bb = GetBasicBlock(block_id);
-
-  if ((bb != nullptr) && (bb->start_offset == code_offset)) {
-    // Does this containing block start with the desired instruction?
-    return bb;
-  }
-
-  // No direct hit.
-  if (!create) {
-    return nullptr;
-  }
-
-  if (bb != nullptr) {
-    // The target exists somewhere in an existing block.
-    BasicBlock* bottom_block = SplitBlock(code_offset, bb, bb == *immed_pred_block_p ?  immed_pred_block_p : nullptr);
-    DCHECK(bottom_block != nullptr);
-    MIR* p = bottom_block->first_mir_insn;
-    BasicBlock* orig_block = bb;
-    DCHECK_EQ((*dex_pc_to_block_map)[p->offset], orig_block->id);
-    // Scan the "bottom" instructions, remapping them to the
-    // newly created "bottom" block.
-    (*dex_pc_to_block_map)[p->offset] = bottom_block->id;
-    while (p != bottom_block->last_mir_insn) {
-      p = p->next;
-      DCHECK(p != nullptr);
-      int opcode = p->dalvikInsn.opcode;
-      /*
-       * Some messiness here to ensure that we only enter real opcodes and only the
-       * first half of a potentially throwing instruction that has been split into
-       * CHECK and work portions. Since the 2nd half of a split operation is always
-       * the first in a BasicBlock, we can't hit it here.
-       */
-      if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
-        BasicBlockId mapped_id = (*dex_pc_to_block_map)[p->offset];
-        // At first glance the instructions should all be mapped to orig_block.
-        // However, multiple instructions may correspond to the same dex, hence an earlier
-        // instruction may have already moved the mapping for dex to bottom_block.
-        DCHECK((mapped_id == orig_block->id) || (mapped_id == bottom_block->id));
-        (*dex_pc_to_block_map)[p->offset] = bottom_block->id;
-      }
-    }
-    return bottom_block;
-  }
-
-  // Create a new block.
-  bb = CreateNewBB(kDalvikByteCode);
-  bb->start_offset = code_offset;
-  (*dex_pc_to_block_map)[bb->start_offset] = bb->id;
-  return bb;
-}
-
-
-/* Identify code range in try blocks and set up the empty catch blocks */
-void MIRGraph::ProcessTryCatchBlocks(ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
-  int tries_size = current_code_item_->tries_size_;
-  DexOffset offset;
-
-  if (tries_size == 0) {
-    return;
-  }
-
-  for (int i = 0; i < tries_size; i++) {
-    const DexFile::TryItem* pTry =
-        DexFile::GetTryItems(*current_code_item_, i);
-    DexOffset start_offset = pTry->start_addr_;
-    DexOffset end_offset = start_offset + pTry->insn_count_;
-    for (offset = start_offset; offset < end_offset; offset++) {
-      try_block_addr_->SetBit(offset);
-    }
-  }
-
-  // Iterate over each of the handlers to enqueue the empty Catch blocks.
-  const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*current_code_item_, 0);
-  uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
-  for (uint32_t idx = 0; idx < handlers_size; idx++) {
-    CatchHandlerIterator iterator(handlers_ptr);
-    for (; iterator.HasNext(); iterator.Next()) {
-      uint32_t address = iterator.GetHandlerAddress();
-      FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map);
-    }
-    handlers_ptr = iterator.EndDataPointer();
-  }
-}
-
-bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
-                                     NarrowDexOffset catch_offset) {
-  // Catches for monitor-exit during stack unwinding have the pattern
-  //   move-exception (move)* (goto)? monitor-exit throw
-  // In the currently generated dex bytecode we see these catching a bytecode range including
-  // either its own or an identical monitor-exit, http://b/15745363 . This function checks if
-  // it's the case for a given monitor-exit and catch block so that we can ignore it.
-  // (We don't want to ignore all monitor-exit catches since one could enclose a synchronized
-  // block in a try-block and catch the NPE, Error or Throwable and we should let it through;
-  // even though a throwing monitor-exit certainly indicates a bytecode error.)
-  const Instruction* monitor_exit = Instruction::At(current_code_item_->insns_ + monitor_exit_offset);
-  DCHECK(monitor_exit->Opcode() == Instruction::MONITOR_EXIT);
-  int monitor_reg = monitor_exit->VRegA_11x();
-  const Instruction* check_insn = Instruction::At(current_code_item_->insns_ + catch_offset);
-  if (check_insn->Opcode() == Instruction::MOVE_EXCEPTION) {
-    if (check_insn->VRegA_11x() == monitor_reg) {
-      // Unexpected move-exception to the same register. Probably not the pattern we're looking for.
-      return false;
-    }
-    check_insn = check_insn->Next();
-  }
-  while (true) {
-    int dest = -1;
-    bool wide = false;
-    switch (check_insn->Opcode()) {
-      case Instruction::MOVE_WIDE:
-        wide = true;
-        FALLTHROUGH_INTENDED;
-      case Instruction::MOVE_OBJECT:
-      case Instruction::MOVE:
-        dest = check_insn->VRegA_12x();
-        break;
-
-      case Instruction::MOVE_WIDE_FROM16:
-        wide = true;
-        FALLTHROUGH_INTENDED;
-      case Instruction::MOVE_OBJECT_FROM16:
-      case Instruction::MOVE_FROM16:
-        dest = check_insn->VRegA_22x();
-        break;
-
-      case Instruction::MOVE_WIDE_16:
-        wide = true;
-        FALLTHROUGH_INTENDED;
-      case Instruction::MOVE_OBJECT_16:
-      case Instruction::MOVE_16:
-        dest = check_insn->VRegA_32x();
-        break;
-
-      case Instruction::GOTO:
-      case Instruction::GOTO_16:
-      case Instruction::GOTO_32:
-        check_insn = check_insn->RelativeAt(check_insn->GetTargetOffset());
-        FALLTHROUGH_INTENDED;
-      default:
-        return check_insn->Opcode() == Instruction::MONITOR_EXIT &&
-            check_insn->VRegA_11x() == monitor_reg;
-    }
-
-    if (dest == monitor_reg || (wide && dest + 1 == monitor_reg)) {
-      return false;
-    }
-
-    check_insn = check_insn->Next();
-  }
-}
-
-/* Process instructions with the kBranch flag */
-BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
-                                       int width, int flags, const uint16_t* code_ptr,
-                                       const uint16_t* code_end,
-                                       ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
-  DexOffset target = cur_offset;
-  switch (insn->dalvikInsn.opcode) {
-    case Instruction::GOTO:
-    case Instruction::GOTO_16:
-    case Instruction::GOTO_32:
-      target += insn->dalvikInsn.vA;
-      break;
-    case Instruction::IF_EQ:
-    case Instruction::IF_NE:
-    case Instruction::IF_LT:
-    case Instruction::IF_GE:
-    case Instruction::IF_GT:
-    case Instruction::IF_LE:
-      cur_block->conditional_branch = true;
-      target += insn->dalvikInsn.vC;
-      break;
-    case Instruction::IF_EQZ:
-    case Instruction::IF_NEZ:
-    case Instruction::IF_LTZ:
-    case Instruction::IF_GEZ:
-    case Instruction::IF_GTZ:
-    case Instruction::IF_LEZ:
-      cur_block->conditional_branch = true;
-      target += insn->dalvikInsn.vB;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
-  }
-  CountBranch(target);
-  BasicBlock* taken_block = FindBlock(target, /* create */ true,
-                                      /* immed_pred_block_p */ &cur_block,
-                                      dex_pc_to_block_map);
-  DCHECK(taken_block != nullptr);
-  cur_block->taken = taken_block->id;
-  taken_block->predecessors.push_back(cur_block->id);
-
-  /* Always terminate the current block for conditional branches */
-  if (flags & Instruction::kContinue) {
-    BasicBlock* fallthrough_block = FindBlock(cur_offset +  width,
-                                             /* create */
-                                             true,
-                                             /* immed_pred_block_p */
-                                             &cur_block,
-                                             dex_pc_to_block_map);
-    DCHECK(fallthrough_block != nullptr);
-    cur_block->fall_through = fallthrough_block->id;
-    fallthrough_block->predecessors.push_back(cur_block->id);
-  } else if (code_ptr < code_end) {
-    FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map);
-  }
-  return cur_block;
-}
-
-/* Process instructions with the kSwitch flag */
-BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
-                                       int width, int flags ATTRIBUTE_UNUSED,
-                                       ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
-  const uint16_t* switch_data =
-      reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset +
-          static_cast<int32_t>(insn->dalvikInsn.vB));
-  int size;
-  const int* keyTable;
-  const int* target_table;
-  int i;
-  int first_key;
-
-  /*
-   * Packed switch data format:
-   *  ushort ident = 0x0100   magic value
-   *  ushort size             number of entries in the table
-   *  int first_key           first (and lowest) switch case value
-   *  int targets[size]       branch targets, relative to switch opcode
-   *
-   * Total size is (4+size*2) 16-bit code units.
-   */
-  if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
-    DCHECK_EQ(static_cast<int>(switch_data[0]),
-              static_cast<int>(Instruction::kPackedSwitchSignature));
-    size = switch_data[1];
-    first_key = switch_data[2] | (switch_data[3] << 16);
-    target_table = reinterpret_cast<const int*>(&switch_data[4]);
-    keyTable = nullptr;        // Make the compiler happy.
-  /*
-   * Sparse switch data format:
-   *  ushort ident = 0x0200   magic value
-   *  ushort size             number of entries in the table; > 0
-   *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
-   *  int targets[size]       branch targets, relative to switch opcode
-   *
-   * Total size is (2+size*4) 16-bit code units.
-   */
-  } else {
-    DCHECK_EQ(static_cast<int>(switch_data[0]),
-              static_cast<int>(Instruction::kSparseSwitchSignature));
-    size = switch_data[1];
-    keyTable = reinterpret_cast<const int*>(&switch_data[2]);
-    target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]);
-    first_key = 0;   // To make the compiler happy.
-  }
-
-  if (cur_block->successor_block_list_type != kNotUsed) {
-    LOG(FATAL) << "Successor block list already in use: "
-               << static_cast<int>(cur_block->successor_block_list_type);
-  }
-  cur_block->successor_block_list_type =
-      (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?  kPackedSwitch : kSparseSwitch;
-  cur_block->successor_blocks.reserve(size);
-
-  for (i = 0; i < size; i++) {
-    BasicBlock* case_block = FindBlock(cur_offset + target_table[i],  /* create */ true,
-                                       /* immed_pred_block_p */ &cur_block,
-                                       dex_pc_to_block_map);
-    DCHECK(case_block != nullptr);
-    SuccessorBlockInfo* successor_block_info =
-        static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
-                                                       kArenaAllocSuccessors));
-    successor_block_info->block = case_block->id;
-    successor_block_info->key =
-        (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
-        first_key + i : keyTable[i];
-    cur_block->successor_blocks.push_back(successor_block_info);
-    case_block->predecessors.push_back(cur_block->id);
-  }
-
-  /* Fall-through case */
-  BasicBlock* fallthrough_block = FindBlock(cur_offset +  width, /* create */ true,
-                                            /* immed_pred_block_p */ nullptr,
-                                            dex_pc_to_block_map);
-  DCHECK(fallthrough_block != nullptr);
-  cur_block->fall_through = fallthrough_block->id;
-  fallthrough_block->predecessors.push_back(cur_block->id);
-  return cur_block;
-}
-
-/* Process instructions with the kThrow flag */
-BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block,
-                                      MIR* insn,
-                                      DexOffset cur_offset,
-                                      int width,
-                                      int flags ATTRIBUTE_UNUSED,
-                                      ArenaBitVector* try_block_addr,
-                                      const uint16_t* code_ptr,
-                                      const uint16_t* code_end,
-                                      ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
-  bool in_try_block = try_block_addr->IsBitSet(cur_offset);
-  bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
-
-  /* In try block */
-  if (in_try_block) {
-    CatchHandlerIterator iterator(*current_code_item_, cur_offset);
-
-    if (cur_block->successor_block_list_type != kNotUsed) {
-      LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-      LOG(FATAL) << "Successor block list already in use: "
-                 << static_cast<int>(cur_block->successor_block_list_type);
-    }
-
-    for (; iterator.HasNext(); iterator.Next()) {
-      BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* create */,
-                                          nullptr /* immed_pred_block_p */,
-                                          dex_pc_to_block_map);
-      if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT &&
-          IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) {
-        // Don't allow monitor-exit to catch its own exception, http://b/15745363 .
-        continue;
-      }
-      if (cur_block->successor_block_list_type == kNotUsed) {
-        cur_block->successor_block_list_type = kCatch;
-      }
-      catch_block->catch_entry = true;
-      if (kIsDebugBuild) {
-        catches_.insert(catch_block->start_offset);
-      }
-      SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
-          (arena_->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessors));
-      successor_block_info->block = catch_block->id;
-      successor_block_info->key = iterator.GetHandlerTypeIndex();
-      cur_block->successor_blocks.push_back(successor_block_info);
-      catch_block->predecessors.push_back(cur_block->id);
-    }
-    in_try_block = (cur_block->successor_block_list_type != kNotUsed);
-  }
-  bool build_all_edges =
-      (cu_->disable_opt & (1 << kSuppressExceptionEdges)) || is_throw || in_try_block;
-  if (!in_try_block && build_all_edges) {
-    BasicBlock* eh_block = CreateNewBB(kExceptionHandling);
-    cur_block->taken = eh_block->id;
-    eh_block->start_offset = cur_offset;
-    eh_block->predecessors.push_back(cur_block->id);
-  }
-
-  if (is_throw) {
-    cur_block->explicit_throw = true;
-    if (code_ptr < code_end) {
-      // Force creation of new block following THROW via side-effect.
-      FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr, dex_pc_to_block_map);
-    }
-    if (!in_try_block) {
-       // Don't split a THROW that can't rethrow - we're done.
-      return cur_block;
-    }
-  }
-
-  if (!build_all_edges) {
-    /*
-     * Even though there is an exception edge here, control cannot return to this
-     * method.  Thus, for the purposes of dataflow analysis and optimization, we can
-     * ignore the edge.  Doing this reduces compile time, and increases the scope
-     * of the basic-block level optimization pass.
-     */
-    return cur_block;
-  }
-
-  /*
-   * Split the potentially-throwing instruction into two parts.
-   * The first half will be a pseudo-op that captures the exception
-   * edges and terminates the basic block.  It always falls through.
-   * Then, create a new basic block that begins with the throwing instruction
-   * (minus exceptions).  Note: this new basic block must NOT be entered into
-   * the block_map.  If the potentially-throwing instruction is the target of a
-   * future branch, we need to find the check psuedo half.  The new
-   * basic block containing the work portion of the instruction should
-   * only be entered via fallthrough from the block containing the
-   * pseudo exception edge MIR.  Note also that this new block is
-   * not automatically terminated after the work portion, and may
-   * contain following instructions.
-   *
-   * Note also that the dex_pc_to_block_map entry for the potentially
-   * throwing instruction will refer to the original basic block.
-   */
-  BasicBlock* new_block = CreateNewBB(kDalvikByteCode);
-  new_block->start_offset = insn->offset;
-  cur_block->fall_through = new_block->id;
-  new_block->predecessors.push_back(cur_block->id);
-  MIR* new_insn = NewMIR();
-  *new_insn = *insn;
-  insn->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheck);
-  // Associate the two halves.
-  insn->meta.throw_insn = new_insn;
-  new_block->AppendMIR(new_insn);
-  return new_block;
-}
-
-/* Parse a Dex method and insert it into the MIRGraph at the current insert point. */
-void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
-                           InvokeType invoke_type ATTRIBUTE_UNUSED, uint16_t class_def_idx,
-                           uint32_t method_idx, jobject class_loader, const DexFile& dex_file,
-                           Handle<mirror::DexCache> dex_cache) {
-  current_code_item_ = code_item;
-  method_stack_.push_back(std::make_pair(current_method_, current_offset_));
-  current_method_ = m_units_.size();
-  current_offset_ = 0;
-  // TODO: will need to snapshot stack image and use that as the mir context identification.
-  m_units_.push_back(new (arena_) DexCompilationUnit(
-      cu_, class_loader, Runtime::Current()->GetClassLinker(), dex_file, current_code_item_,
-      class_def_idx, method_idx, access_flags,
-      cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx), dex_cache));
-  const uint16_t* code_ptr = current_code_item_->insns_;
-  const uint16_t* code_end =
-      current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
-
-  // TODO: need to rework expansion of block list & try_block_addr when inlining activated.
-  // TUNING: use better estimate of basic blocks for following resize.
-  block_list_.reserve(block_list_.size() + current_code_item_->insns_size_in_code_units_);
-  // FindBlock lookup cache.
-  ScopedArenaAllocator allocator(&cu_->arena_stack);
-  ScopedArenaVector<uint16_t> dex_pc_to_block_map(allocator.Adapter());
-  dex_pc_to_block_map.resize(current_code_item_->insns_size_in_code_units_ +
-                             1 /* Fall-through on last insn; dead or punt to interpreter. */);
-
-  // TODO: replace with explicit resize routine.  Using automatic extension side effect for now.
-  try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
-  try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_);
-
-  // If this is the first method, set up default entry and exit blocks.
-  if (current_method_ == 0) {
-    DCHECK(entry_block_ == nullptr);
-    DCHECK(exit_block_ == nullptr);
-    DCHECK_EQ(GetNumBlocks(), 0U);
-    // Use id 0 to represent a null block.
-    BasicBlock* null_block = CreateNewBB(kNullBlock);
-    DCHECK_EQ(null_block->id, NullBasicBlockId);
-    null_block->hidden = true;
-    entry_block_ = CreateNewBB(kEntryBlock);
-    exit_block_ = CreateNewBB(kExitBlock);
-  } else {
-    UNIMPLEMENTED(FATAL) << "Nested inlining not implemented.";
-    /*
-     * Will need to manage storage for ins & outs, push prevous state and update
-     * insert point.
-     */
-  }
-
-  /* Current block to record parsed instructions */
-  BasicBlock* cur_block = CreateNewBB(kDalvikByteCode);
-  DCHECK_EQ(current_offset_, 0U);
-  cur_block->start_offset = current_offset_;
-  // TODO: for inlining support, insert at the insert point rather than entry block.
-  entry_block_->fall_through = cur_block->id;
-  cur_block->predecessors.push_back(entry_block_->id);
-
-  /* Identify code range in try blocks and set up the empty catch blocks */
-  ProcessTryCatchBlocks(&dex_pc_to_block_map);
-
-  uint64_t merged_df_flags = 0u;
-
-  /* Parse all instructions and put them into containing basic blocks */
-  while (code_ptr < code_end) {
-    MIR *insn = NewMIR();
-    insn->offset = current_offset_;
-    insn->m_unit_index = current_method_;
-    int width = ParseInsn(code_ptr, &insn->dalvikInsn);
-    Instruction::Code opcode = insn->dalvikInsn.opcode;
-    if (opcode_count_ != nullptr) {
-      opcode_count_[static_cast<int>(opcode)]++;
-    }
-
-    int flags = insn->dalvikInsn.FlagsOf();
-    int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode);
-
-    uint64_t df_flags = GetDataFlowAttributes(insn);
-    merged_df_flags |= df_flags;
-
-    if (df_flags & DF_HAS_DEFS) {
-      def_count_ += (df_flags & DF_A_WIDE) ? 2 : 1;
-    }
-
-    if (df_flags & DF_LVN) {
-      cur_block->use_lvn = true;  // Run local value numbering on this basic block.
-    }
-
-    // Check for inline data block signatures.
-    if (opcode == Instruction::NOP) {
-      // A simple NOP will have a width of 1 at this point, embedded data NOP > 1.
-      if ((width == 1) && ((current_offset_ & 0x1) == 0x1) && ((code_end - code_ptr) > 1)) {
-        // Could be an aligning nop.  If an embedded data NOP follows, treat pair as single unit.
-        uint16_t following_raw_instruction = code_ptr[1];
-        if ((following_raw_instruction == Instruction::kSparseSwitchSignature) ||
-            (following_raw_instruction == Instruction::kPackedSwitchSignature) ||
-            (following_raw_instruction == Instruction::kArrayDataSignature)) {
-          width += Instruction::At(code_ptr + 1)->SizeInCodeUnits();
-        }
-      }
-      if (width == 1) {
-        // It is a simple nop - treat normally.
-        cur_block->AppendMIR(insn);
-      } else {
-        DCHECK(cur_block->fall_through == NullBasicBlockId);
-        DCHECK(cur_block->taken == NullBasicBlockId);
-        // Unreachable instruction, mark for no continuation and end basic block.
-        flags &= ~Instruction::kContinue;
-        FindBlock(current_offset_ + width, /* create */ true,
-                  /* immed_pred_block_p */ nullptr, &dex_pc_to_block_map);
-      }
-    } else {
-      cur_block->AppendMIR(insn);
-    }
-
-    // Associate the starting dex_pc for this opcode with its containing basic block.
-    dex_pc_to_block_map[insn->offset] = cur_block->id;
-
-    code_ptr += width;
-
-    if (flags & Instruction::kBranch) {
-      cur_block = ProcessCanBranch(cur_block, insn, current_offset_,
-                                   width, flags, code_ptr, code_end, &dex_pc_to_block_map);
-    } else if (flags & Instruction::kReturn) {
-      cur_block->terminated_by_return = true;
-      cur_block->fall_through = exit_block_->id;
-      exit_block_->predecessors.push_back(cur_block->id);
-      /*
-       * Terminate the current block if there are instructions
-       * afterwards.
-       */
-      if (code_ptr < code_end) {
-        /*
-         * Create a fallthrough block for real instructions
-         * (incl. NOP).
-         */
-         FindBlock(current_offset_ + width, /* create */ true,
-                   /* immed_pred_block_p */ nullptr, &dex_pc_to_block_map);
-      }
-    } else if (flags & Instruction::kThrow) {
-      cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
-                                  code_ptr, code_end, &dex_pc_to_block_map);
-    } else if (flags & Instruction::kSwitch) {
-      cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width,
-                                   flags, &dex_pc_to_block_map);
-    }
-    if (verify_flags & Instruction::kVerifyVarArgRange ||
-        verify_flags & Instruction::kVerifyVarArgRangeNonZero) {
-      /*
-       * The Quick backend's runtime model includes a gap between a method's
-       * argument ("in") vregs and the rest of its vregs.  Handling a range instruction
-       * which spans the gap is somewhat complicated, and should not happen
-       * in normal usage of dx.  Punt to the interpreter.
-       */
-      int first_reg_in_range = insn->dalvikInsn.vC;
-      int last_reg_in_range = first_reg_in_range + insn->dalvikInsn.vA - 1;
-      if (IsInVReg(first_reg_in_range) != IsInVReg(last_reg_in_range)) {
-        punt_to_interpreter_ = true;
-      }
-    }
-    current_offset_ += width;
-    BasicBlock* next_block = FindBlock(current_offset_, /* create */ false,
-                                       /* immed_pred_block_p */ nullptr,
-                                       &dex_pc_to_block_map);
-    if (next_block) {
-      /*
-       * The next instruction could be the target of a previously parsed
-       * forward branch so a block is already created. If the current
-       * instruction is not an unconditional branch, connect them through
-       * the fall-through link.
-       */
-      DCHECK(cur_block->fall_through == NullBasicBlockId ||
-             GetBasicBlock(cur_block->fall_through) == next_block ||
-             GetBasicBlock(cur_block->fall_through) == exit_block_);
-
-      if ((cur_block->fall_through == NullBasicBlockId) && (flags & Instruction::kContinue)) {
-        cur_block->fall_through = next_block->id;
-        next_block->predecessors.push_back(cur_block->id);
-      }
-      cur_block = next_block;
-    }
-  }
-  merged_df_flags_ = merged_df_flags;
-
-  if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
-    DumpCFG("/sdcard/1_post_parse_cfg/", true);
-  }
-
-  if (cu_->verbose) {
-    DumpMIRGraph();
-  }
-
-  // Check if there's been a fall-through out of the method code.
-  BasicBlockId out_bb_id = dex_pc_to_block_map[current_code_item_->insns_size_in_code_units_];
-  if (UNLIKELY(out_bb_id != NullBasicBlockId)) {
-    // Eagerly calculate DFS order to determine if the block is dead.
-    DCHECK(!DfsOrdersUpToDate());
-    ComputeDFSOrders();
-    BasicBlock* out_bb = GetBasicBlock(out_bb_id);
-    DCHECK(out_bb != nullptr);
-    if (out_bb->block_type != kDead) {
-      LOG(WARNING) << "Live fall-through out of method in " << PrettyMethod(method_idx, dex_file);
-      SetPuntToInterpreter(true);
-    }
-  }
-}
-
-void MIRGraph::ShowOpcodeStats() {
-  DCHECK(opcode_count_ != nullptr);
-  LOG(INFO) << "Opcode Count";
-  for (int i = 0; i < kNumPackedOpcodes; i++) {
-    if (opcode_count_[i] != 0) {
-      LOG(INFO) << "-C- " << Instruction::Name(static_cast<Instruction::Code>(i))
-                << " " << opcode_count_[i];
-    }
-  }
-}
-
-uint64_t MIRGraph::GetDataFlowAttributes(Instruction::Code opcode) {
-  DCHECK_LT((size_t) opcode, (sizeof(oat_data_flow_attributes_) / sizeof(oat_data_flow_attributes_[0])));
-  return oat_data_flow_attributes_[opcode];
-}
-
-uint64_t MIRGraph::GetDataFlowAttributes(MIR* mir) {
-  DCHECK(mir != nullptr);
-  Instruction::Code opcode = mir->dalvikInsn.opcode;
-  return GetDataFlowAttributes(opcode);
-}
-
-// The path can easily surpass FS limits because of parameters etc. Use pathconf to get FS
-// restrictions here. Note that a successful invocation will return an actual value. If the path
-// is too long for some reason, the return will be ENAMETOOLONG. Then cut off part of the name.
-//
-// It's possible the path is not valid, or some other errors appear. In that case return false.
-static bool CreateDumpFile(std::string& fname, const char* dir_prefix, NarrowDexOffset start_offset,
-                           const char *suffix, int nr, std::string* output) {
-  std::string dir = StringPrintf("./%s", dir_prefix);
-  int64_t max_name_length = pathconf(dir.c_str(), _PC_NAME_MAX);
-  if (max_name_length <= 0) {
-    PLOG(ERROR) << "Could not get file name restrictions for " << dir;
-    return false;
-  }
-
-  std::string name = StringPrintf("%s%x%s_%d.dot", fname.c_str(), start_offset,
-                                  suffix == nullptr ? "" : suffix, nr);
-  std::string fpath;
-  if (static_cast<int64_t>(name.size()) > max_name_length) {
-    std::string suffix_str = StringPrintf("_%d.dot", nr);
-    name = name.substr(0, static_cast<size_t>(max_name_length) - suffix_str.size()) + suffix_str;
-  }
-  // Sanity check.
-  DCHECK_LE(name.size(), static_cast<size_t>(max_name_length));
-
-  *output = StringPrintf("%s%s", dir_prefix, name.c_str());
-  return true;
-}
-
-// TODO: use a configurable base prefix, and adjust callers to supply pass name.
-/* Dump the CFG into a DOT graph */
-void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suffix) {
-  FILE* file;
-  static AtomicInteger cnt(0);
-
-  // Increment counter to get a unique file number.
-  cnt++;
-  int nr = cnt.LoadRelaxed();
-
-  std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
-  ReplaceSpecialChars(fname);
-  std::string fpath;
-  if (!CreateDumpFile(fname, dir_prefix, GetBasicBlock(GetEntryBlock()->fall_through)->start_offset,
-                      suffix, nr, &fpath)) {
-    LOG(ERROR) << "Could not create dump file name for " << fname;
-    return;
-  }
-  file = fopen(fpath.c_str(), "w");
-  if (file == nullptr) {
-    PLOG(ERROR) << "Could not open " << fpath << " for DumpCFG.";
-    return;
-  }
-  fprintf(file, "digraph G {\n");
-
-  fprintf(file, "  rankdir=TB\n");
-
-  int num_blocks = all_blocks ? GetNumBlocks() : num_reachable_blocks_;
-  int idx;
-
-  for (idx = 0; idx < num_blocks; idx++) {
-    int block_idx = all_blocks ? idx : dfs_order_[idx];
-    BasicBlock* bb = GetBasicBlock(block_idx);
-    if (bb == nullptr) continue;
-    if (bb->block_type == kDead) continue;
-    if (bb->hidden) continue;
-    if (bb->block_type == kEntryBlock) {
-      fprintf(file, "  entry_%d [shape=Mdiamond];\n", bb->id);
-    } else if (bb->block_type == kExitBlock) {
-      fprintf(file, "  exit_%d [shape=Mdiamond];\n", bb->id);
-    } else if (bb->block_type == kDalvikByteCode) {
-      fprintf(file, "  block%04x_%d [shape=record,label = \"{ \\\n",
-              bb->start_offset, bb->id);
-      const MIR* mir;
-        fprintf(file, "    {block id %d\\l}%s\\\n", bb->id,
-                bb->first_mir_insn ? " | " : " ");
-        for (mir = bb->first_mir_insn; mir; mir = mir->next) {
-            int opcode = mir->dalvikInsn.opcode;
-            fprintf(file, "    {%04x %s %s %s %s %s %s %s %s %s\\l}%s\\\n", mir->offset,
-                      mir->ssa_rep ? GetDalvikDisassembly(mir) :
-                      !MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
-                        Instruction::Name(mir->dalvikInsn.opcode) :
-                        extended_mir_op_names_[opcode - kMirOpFirst],
-                      (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
-                      (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
-                      (mir->optimization_flags & MIR_IGNORE_SUSPEND_CHECK) != 0 ? " no_suspendcheck" : " ",
-                      (mir->optimization_flags & MIR_STORE_NON_TEMPORAL) != 0 ? " non_temporal" : " ",
-                      (mir->optimization_flags & MIR_CALLEE) != 0 ? " inlined" : " ",
-                      (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0 ? " cl_inited" : " ",
-                      (mir->optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0 ? " cl_in_cache" : " ",
-                      (mir->optimization_flags & MIR_IGNORE_DIV_ZERO_CHECK) != 0 ? " no_div_check" : " ",
-                      mir->next ? " | " : " ");
-        }
-        fprintf(file, "  }\"];\n\n");
-    } else if (bb->block_type == kExceptionHandling) {
-      char block_name[BLOCK_NAME_LEN];
-
-      GetBlockName(bb, block_name);
-      fprintf(file, "  %s [shape=invhouse];\n", block_name);
-    }
-
-    char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
-
-    if (bb->taken != NullBasicBlockId) {
-      GetBlockName(bb, block_name1);
-      GetBlockName(GetBasicBlock(bb->taken), block_name2);
-      fprintf(file, "  %s:s -> %s:n [style=dotted]\n",
-              block_name1, block_name2);
-    }
-    if (bb->fall_through != NullBasicBlockId) {
-      GetBlockName(bb, block_name1);
-      GetBlockName(GetBasicBlock(bb->fall_through), block_name2);
-      fprintf(file, "  %s:s -> %s:n\n", block_name1, block_name2);
-    }
-
-    if (bb->successor_block_list_type != kNotUsed) {
-      fprintf(file, "  succ%04x_%d [shape=%s,label = \"{ \\\n",
-              bb->start_offset, bb->id,
-              (bb->successor_block_list_type == kCatch) ?  "Mrecord" : "record");
-
-      int last_succ_id = static_cast<int>(bb->successor_blocks.size() - 1u);
-      int succ_id = 0;
-      for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
-        BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
-        fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
-                succ_id,
-                successor_block_info->key,
-                dest_block->start_offset,
-                (succ_id != last_succ_id) ? " | " : " ");
-        ++succ_id;
-      }
-      fprintf(file, "  }\"];\n\n");
-
-      GetBlockName(bb, block_name1);
-      fprintf(file, "  %s:s -> succ%04x_%d:n [style=dashed]\n",
-              block_name1, bb->start_offset, bb->id);
-
-      // Link the successor pseudo-block with all of its potential targets.
-      succ_id = 0;
-      for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
-        BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
-
-        GetBlockName(dest_block, block_name2);
-        fprintf(file, "  succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
-                bb->id, succ_id++, block_name2);
-      }
-    }
-    fprintf(file, "\n");
-
-    if (cu_->verbose) {
-      /* Display the dominator tree */
-      GetBlockName(bb, block_name1);
-      fprintf(file, "  cfg%s [label=\"%s\", shape=none];\n",
-              block_name1, block_name1);
-      if (bb->i_dom) {
-        GetBlockName(GetBasicBlock(bb->i_dom), block_name2);
-        fprintf(file, "  cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
-      }
-    }
-  }
-  fprintf(file, "}\n");
-  fclose(file);
-}
-
-/* Insert an MIR instruction to the end of a basic block. */
-void BasicBlock::AppendMIR(MIR* mir) {
-  // Insert it after the last MIR.
-  InsertMIRListAfter(last_mir_insn, mir, mir);
-}
-
-void BasicBlock::AppendMIRList(MIR* first_list_mir, MIR* last_list_mir) {
-  // Insert it after the last MIR.
-  InsertMIRListAfter(last_mir_insn, first_list_mir, last_list_mir);
-}
-
-void BasicBlock::AppendMIRList(const std::vector<MIR*>& insns) {
-  for (std::vector<MIR*>::const_iterator it = insns.begin(); it != insns.end(); it++) {
-    MIR* new_mir = *it;
-
-    // Add a copy of each MIR.
-    InsertMIRListAfter(last_mir_insn, new_mir, new_mir);
-  }
-}
-
-/* Insert a MIR instruction after the specified MIR. */
-void BasicBlock::InsertMIRAfter(MIR* current_mir, MIR* new_mir) {
-  InsertMIRListAfter(current_mir, new_mir, new_mir);
-}
-
-void BasicBlock::InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir) {
-  // If no MIR, we are done.
-  if (first_list_mir == nullptr || last_list_mir == nullptr) {
-    return;
-  }
-
-  // If insert_after is null, assume BB is empty.
-  if (insert_after == nullptr) {
-    first_mir_insn = first_list_mir;
-    last_mir_insn = last_list_mir;
-    last_list_mir->next = nullptr;
-  } else {
-    MIR* after_list = insert_after->next;
-    insert_after->next = first_list_mir;
-    last_list_mir->next = after_list;
-    if (after_list == nullptr) {
-      last_mir_insn = last_list_mir;
-    }
-  }
-
-  // Set this BB to be the basic block of the MIRs.
-  MIR* last = last_list_mir->next;
-  for (MIR* mir = first_list_mir; mir != last; mir = mir->next) {
-    mir->bb = id;
-  }
-}
-
-/* Insert an MIR instruction to the head of a basic block. */
-void BasicBlock::PrependMIR(MIR* mir) {
-  InsertMIRListBefore(first_mir_insn, mir, mir);
-}
-
-void BasicBlock::PrependMIRList(MIR* first_list_mir, MIR* last_list_mir) {
-  // Insert it before the first MIR.
-  InsertMIRListBefore(first_mir_insn, first_list_mir, last_list_mir);
-}
-
-void BasicBlock::PrependMIRList(const std::vector<MIR*>& to_add) {
-  for (std::vector<MIR*>::const_iterator it = to_add.begin(); it != to_add.end(); it++) {
-    MIR* mir = *it;
-
-    InsertMIRListBefore(first_mir_insn, mir, mir);
-  }
-}
-
-/* Insert a MIR instruction before the specified MIR. */
-void BasicBlock::InsertMIRBefore(MIR* current_mir, MIR* new_mir) {
-  // Insert as a single element list.
-  return InsertMIRListBefore(current_mir, new_mir, new_mir);
-}
-
-MIR* BasicBlock::FindPreviousMIR(MIR* mir) {
-  MIR* current = first_mir_insn;
-
-  while (current != nullptr) {
-    MIR* next = current->next;
-
-    if (next == mir) {
-      return current;
-    }
-
-    current = next;
-  }
-
-  return nullptr;
-}
-
-void BasicBlock::InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir) {
-  // If no MIR, we are done.
-  if (first_list_mir == nullptr || last_list_mir == nullptr) {
-    return;
-  }
-
-  // If insert_before is null, assume BB is empty.
-  if (insert_before == nullptr) {
-    first_mir_insn = first_list_mir;
-    last_mir_insn = last_list_mir;
-    last_list_mir->next = nullptr;
-  } else {
-    if (first_mir_insn == insert_before) {
-      last_list_mir->next = first_mir_insn;
-      first_mir_insn = first_list_mir;
-    } else {
-      // Find the preceding MIR.
-      MIR* before_list = FindPreviousMIR(insert_before);
-      DCHECK(before_list != nullptr);
-      before_list->next = first_list_mir;
-      last_list_mir->next = insert_before;
-    }
-  }
-
-  // Set this BB to be the basic block of the MIRs.
-  for (MIR* mir = first_list_mir; mir != last_list_mir->next; mir = mir->next) {
-    mir->bb = id;
-  }
-}
-
-bool BasicBlock::RemoveMIR(MIR* mir) {
-  // Remove as a single element list.
-  return RemoveMIRList(mir, mir);
-}
-
-bool BasicBlock::RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir) {
-  if (first_list_mir == nullptr) {
-    return false;
-  }
-
-  // Try to find the MIR.
-  MIR* before_list = nullptr;
-  MIR* after_list = nullptr;
-
-  // If we are removing from the beginning of the MIR list.
-  if (first_mir_insn == first_list_mir) {
-    before_list = nullptr;
-  } else {
-    before_list = FindPreviousMIR(first_list_mir);
-    if (before_list == nullptr) {
-      // We did not find the mir.
-      return false;
-    }
-  }
-
-  // Remove the BB information and also find the after_list.
-  for (MIR* mir = first_list_mir; mir != last_list_mir->next; mir = mir->next) {
-    mir->bb = NullBasicBlockId;
-  }
-
-  after_list = last_list_mir->next;
-
-  // If there is nothing before the list, after_list is the first_mir.
-  if (before_list == nullptr) {
-    first_mir_insn = after_list;
-  } else {
-    before_list->next = after_list;
-  }
-
-  // If there is nothing after the list, before_list is last_mir.
-  if (after_list == nullptr) {
-    last_mir_insn = before_list;
-  }
-
-  return true;
-}
-
-MIR* BasicBlock::GetFirstNonPhiInsn() {
-  MIR* mir = first_mir_insn;
-  while (mir != nullptr && static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
-    mir = mir->next;
-  }
-  return mir;
-}
-
-MIR* BasicBlock::GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current) {
-  MIR* next_mir = nullptr;
-
-  if (current != nullptr) {
-    next_mir = current->next;
-  }
-
-  if (next_mir == nullptr) {
-    // Only look for next MIR that follows unconditionally.
-    if ((taken == NullBasicBlockId) && (fall_through != NullBasicBlockId)) {
-      next_mir = mir_graph->GetBasicBlock(fall_through)->first_mir_insn;
-    }
-  }
-
-  return next_mir;
-}
-
-static void FillTypeSizeString(uint32_t type_size, std::string* decoded_mir) {
-  DCHECK(decoded_mir != nullptr);
-  OpSize type = static_cast<OpSize>(type_size >> 16);
-  uint16_t vect_size = (type_size & 0xFFFF);
-
-  // Now print the type and vector size.
-  std::stringstream ss;
-  ss << " (type:";
-  ss << type;
-  ss << " vectsize:";
-  ss << vect_size;
-  ss << ")";
-
-  decoded_mir->append(ss.str());
-}
-
-void MIRGraph::DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir) {
-  DCHECK(decoded_mir != nullptr);
-  int opcode = mir->dalvikInsn.opcode;
-  SSARepresentation* ssa_rep = mir->ssa_rep;
-  int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
-  int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
-
-  if (opcode < kMirOpFirst) {
-    return;  // It is not an extended instruction.
-  }
-
-  decoded_mir->append(extended_mir_op_names_[opcode - kMirOpFirst]);
-
-  switch (opcode) {
-    case kMirOpPhi: {
-      if (defs > 0 && uses > 0) {
-        BasicBlockId* incoming = mir->meta.phi_incoming;
-        decoded_mir->append(StringPrintf(" %s = (%s",
-                           GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
-                           GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
-        decoded_mir->append(StringPrintf(":%d", incoming[0]));
-        for (int i = 1; i < uses; i++) {
-          decoded_mir->append(StringPrintf(", %s:%d", GetSSANameWithConst(ssa_rep->uses[i], true).c_str(), incoming[i]));
-        }
-        decoded_mir->append(")");
-      }
-      break;
-    }
-    case kMirOpCopy:
-      if (ssa_rep != nullptr) {
-        decoded_mir->append(" ");
-        decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
-        if (defs > 1) {
-          decoded_mir->append(", ");
-          decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
-        }
-        decoded_mir->append(" = ");
-        decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[0], false));
-        if (uses > 1) {
-          decoded_mir->append(", ");
-          decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false));
-        }
-      } else {
-        decoded_mir->append(StringPrintf(" v%d = v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      }
-      break;
-    case kMirOpFusedCmplFloat:
-    case kMirOpFusedCmpgFloat:
-    case kMirOpFusedCmplDouble:
-    case kMirOpFusedCmpgDouble:
-    case kMirOpFusedCmpLong:
-      if (ssa_rep != nullptr) {
-        decoded_mir->append(" ");
-        decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[0], false));
-        for (int i = 1; i < uses; i++) {
-          decoded_mir->append(", ");
-          decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[i], false));
-        }
-      } else {
-        decoded_mir->append(StringPrintf(" v%d, v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      }
-      break;
-    case kMirOpMoveVector:
-      decoded_mir->append(StringPrintf(" vect%d = vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedAddition:
-      decoded_mir->append(StringPrintf(" vect%d = vect%d + vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedMultiply:
-      decoded_mir->append(StringPrintf(" vect%d = vect%d * vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedSubtract:
-      decoded_mir->append(StringPrintf(" vect%d = vect%d - vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedAnd:
-      decoded_mir->append(StringPrintf(" vect%d = vect%d & vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedOr:
-      decoded_mir->append(StringPrintf(" vect%d = vect%d \\| vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedXor:
-      decoded_mir->append(StringPrintf(" vect%d = vect%d ^ vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedShiftLeft:
-      decoded_mir->append(StringPrintf(" vect%d = vect%d \\<\\< %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedUnsignedShiftRight:
-      decoded_mir->append(StringPrintf(" vect%d = vect%d \\>\\>\\> %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedSignedShiftRight:
-      decoded_mir->append(StringPrintf(" vect%d = vect%d \\>\\> %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpConstVector:
-      decoded_mir->append(StringPrintf(" vect%d = %x, %x, %x, %x", mir->dalvikInsn.vA, mir->dalvikInsn.arg[0],
-                                      mir->dalvikInsn.arg[1], mir->dalvikInsn.arg[2], mir->dalvikInsn.arg[3]));
-      break;
-    case kMirOpPackedSet:
-      if (ssa_rep != nullptr) {
-        decoded_mir->append(StringPrintf(" vect%d = %s", mir->dalvikInsn.vA,
-              GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
-        if (uses > 1) {
-          decoded_mir->append(", ");
-          decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false));
-        }
-      } else {
-        decoded_mir->append(StringPrintf(" vect%d = v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      }
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedAddReduce:
-      if (ssa_rep != nullptr) {
-        decoded_mir->append(" ");
-        decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
-        if (defs > 1) {
-          decoded_mir->append(", ");
-          decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
-        }
-        decoded_mir->append(StringPrintf(" = vect%d + %s", mir->dalvikInsn.vB,
-            GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
-        if (uses > 1) {
-          decoded_mir->append(", ");
-          decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false));
-        }
-      } else {
-        decoded_mir->append(StringPrintf("v%d = vect%d + v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB, mir->dalvikInsn.vA));
-      }
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpPackedReduce:
-      if (ssa_rep != nullptr) {
-        decoded_mir->append(" ");
-        decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
-        if (defs > 1) {
-          decoded_mir->append(", ");
-          decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
-        }
-        decoded_mir->append(StringPrintf(" = vect%d (extr_idx:%d)", mir->dalvikInsn.vB, mir->dalvikInsn.arg[0]));
-      } else {
-        decoded_mir->append(StringPrintf(" v%d = vect%d (extr_idx:%d)", mir->dalvikInsn.vA,
-                                         mir->dalvikInsn.vB, mir->dalvikInsn.arg[0]));
-      }
-      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
-      break;
-    case kMirOpReserveVectorRegisters:
-    case kMirOpReturnVectorRegisters:
-      decoded_mir->append(StringPrintf(" vect%d - vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
-      break;
-    case kMirOpMemBarrier: {
-      decoded_mir->append(" type:");
-      std::stringstream ss;
-      ss << static_cast<MemBarrierKind>(mir->dalvikInsn.vA);
-      decoded_mir->append(ss.str());
-      break;
-    }
-    case kMirOpPackedArrayGet:
-    case kMirOpPackedArrayPut:
-      decoded_mir->append(StringPrintf(" vect%d", mir->dalvikInsn.vA));
-      if (ssa_rep != nullptr) {
-        decoded_mir->append(StringPrintf(", %s[%s]",
-                                        GetSSANameWithConst(ssa_rep->uses[0], false).c_str(),
-                                        GetSSANameWithConst(ssa_rep->uses[1], false).c_str()));
-      } else {
-        decoded_mir->append(StringPrintf(", v%d[v%d]", mir->dalvikInsn.vB, mir->dalvikInsn.vC));
-      }
-      FillTypeSizeString(mir->dalvikInsn.arg[0], decoded_mir);
-      break;
-    case kMirOpMaddInt:
-    case kMirOpMsubInt:
-    case kMirOpMaddLong:
-    case kMirOpMsubLong:
-      if (ssa_rep != nullptr) {
-        decoded_mir->append(" ");
-        decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
-        if (defs > 1) {
-          decoded_mir->append(", ");
-          decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
-        }
-        for (int i = 0; i < uses; i++) {
-          decoded_mir->append(", ");
-          decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[i], false));
-        }
-      } else {
-        decoded_mir->append(StringPrintf(" v%d, v%d, v%d, v%d",
-                                         mir->dalvikInsn.vA, mir->dalvikInsn.vB,
-                                         mir->dalvikInsn.vC, mir->dalvikInsn.arg[0]));
-      }
-      break;
-    default:
-      break;
-  }
-}
-
-char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
-  MIR::DecodedInstruction insn = mir->dalvikInsn;
-  std::string str;
-  int flags = 0;
-  int opcode = insn.opcode;
-  char* ret;
-  bool nop = false;
-  SSARepresentation* ssa_rep = mir->ssa_rep;
-  Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format.
-
-  // Handle special cases that recover the original dalvik instruction.
-  if (opcode == kMirOpCheck) {
-    str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
-    str.append(": ");
-    // Recover the original Dex instruction.
-    insn = mir->meta.throw_insn->dalvikInsn;
-    ssa_rep = mir->meta.throw_insn->ssa_rep;
-    opcode = insn.opcode;
-  } else if (opcode == kMirOpNop) {
-    str.append("[");
-    if (mir->offset < current_code_item_->insns_size_in_code_units_) {
-      // Recover original opcode.
-      insn.opcode = Instruction::At(current_code_item_->insns_ + mir->offset)->Opcode();
-      opcode = insn.opcode;
-    }
-    nop = true;
-  }
-  int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
-  int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
-
-  if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
-    // Note that this does not check the MIR's opcode in all cases. In cases where it
-    // recovered dalvik instruction, it uses opcode of that instead of the extended one.
-    DisassembleExtendedInstr(mir, &str);
-  } else {
-    dalvik_format = Instruction::FormatOf(insn.opcode);
-    flags = insn.FlagsOf();
-    str.append(Instruction::Name(insn.opcode));
-
-    // For invokes-style formats, treat wide regs as a pair of singles.
-    bool show_singles = ((dalvik_format == Instruction::k35c) ||
-                         (dalvik_format == Instruction::k3rc));
-    if (defs != 0) {
-      str.append(" ");
-      str.append(GetSSANameWithConst(ssa_rep->defs[0], false));
-      if (defs > 1) {
-        str.append(", ");
-        str.append(GetSSANameWithConst(ssa_rep->defs[1], false));
-      }
-      if (uses != 0) {
-        str.append(", ");
-      }
-    }
-    for (int i = 0; i < uses; i++) {
-      str.append(" ");
-      str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles));
-      if (!show_singles && (reg_location_ != nullptr) && reg_location_[i].wide) {
-        // For the listing, skip the high sreg.
-        i++;
-      }
-      if (i != (uses - 1)) {
-        str.append(",");
-      }
-    }
-
-    switch (dalvik_format) {
-      case Instruction::k11n:  // Add one immediate from vB.
-      case Instruction::k21s:
-      case Instruction::k31i:
-      case Instruction::k21h:
-        str.append(StringPrintf(", #0x%x", insn.vB));
-        break;
-      case Instruction::k51l:  // Add one wide immediate.
-        str.append(StringPrintf(", #%" PRId64, insn.vB_wide));
-        break;
-      case Instruction::k21c:  // One register, one string/type/method index.
-      case Instruction::k31c:
-        str.append(StringPrintf(", index #0x%x", insn.vB));
-        break;
-      case Instruction::k22c:  // Two registers, one string/type/method index.
-        str.append(StringPrintf(", index #0x%x", insn.vC));
-        break;
-      case Instruction::k22s:  // Add one immediate from vC.
-      case Instruction::k22b:
-        str.append(StringPrintf(", #0x%x", insn.vC));
-        break;
-      default:
-        // Nothing left to print.
-        break;
-    }
-
-    if ((flags & Instruction::kBranch) != 0) {
-      // For branches, decode the instructions to print out the branch targets.
-      int offset = 0;
-      switch (dalvik_format) {
-        case Instruction::k21t:
-          offset = insn.vB;
-          break;
-        case Instruction::k22t:
-          offset = insn.vC;
-          break;
-        case Instruction::k10t:
-        case Instruction::k20t:
-        case Instruction::k30t:
-          offset = insn.vA;
-          break;
-        default:
-          LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
-          break;
-      }
-      str.append(StringPrintf(", 0x%x (%c%x)", mir->offset + offset,
-                              offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
-    }
-
-    if (nop) {
-      str.append("]--optimized away");
-    }
-  }
-  int length = str.length() + 1;
-  ret = arena_->AllocArray<char>(length, kArenaAllocDFInfo);
-  strncpy(ret, str.c_str(), length);
-  return ret;
-}
-
-/* Turn method name into a legal Linux file name */
-void MIRGraph::ReplaceSpecialChars(std::string& str) {
-  static const struct { const char before; const char after; } match[] = {
-    {'/', '-'}, {';', '#'}, {' ', '#'}, {'$', '+'},
-    {'(', '@'}, {')', '@'}, {'<', '='}, {'>', '='}
-  };
-  for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) {
-    std::replace(str.begin(), str.end(), match[i].before, match[i].after);
-  }
-}
-
-std::string MIRGraph::GetSSAName(int ssa_reg) {
-  // TODO: This value is needed for debugging. Currently, we compute this and then copy to the
-  //       arena. We should be smarter and just place straight into the arena, or compute the
-  //       value more lazily.
-  int vreg = SRegToVReg(ssa_reg);
-  if (vreg >= static_cast<int>(GetFirstTempVR())) {
-    return StringPrintf("t%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
-  } else {
-    return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
-  }
-}
-
-// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
-std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
-  if (reg_location_ == nullptr) {
-    // Pre-SSA - just use the standard name.
-    return GetSSAName(ssa_reg);
-  }
-  if (IsConst(reg_location_[ssa_reg])) {
-    if (!singles_only && reg_location_[ssa_reg].wide &&
-        !reg_location_[ssa_reg].high_word) {
-      return StringPrintf("v%d_%d#0x%" PRIx64, SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg),
-                          ConstantValueWide(reg_location_[ssa_reg]));
-    } else {
-      return StringPrintf("v%d_%d#0x%x", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg),
-                          ConstantValue(reg_location_[ssa_reg]));
-    }
-  } else {
-    int vreg = SRegToVReg(ssa_reg);
-    if (vreg >= static_cast<int>(GetFirstTempVR())) {
-      return StringPrintf("t%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
-    } else {
-      return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
-    }
-  }
-}
-
-void MIRGraph::GetBlockName(BasicBlock* bb, char* name) {
-  switch (bb->block_type) {
-    case kEntryBlock:
-      snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id);
-      break;
-    case kExitBlock:
-      snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id);
-      break;
-    case kDalvikByteCode:
-      snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id);
-      break;
-    case kExceptionHandling:
-      snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset,
-               bb->id);
-      break;
-    default:
-      snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id);
-      break;
-  }
-}
-
-const char* MIRGraph::GetShortyFromMethodReference(const MethodReference& target_method) {
-  const DexFile::MethodId& method_id =
-      target_method.dex_file->GetMethodId(target_method.dex_method_index);
-  return target_method.dex_file->GetShorty(method_id.proto_idx_);
-}
-
-/* Debug Utility - dump a compilation unit */
-void MIRGraph::DumpMIRGraph() {
-  const char* block_type_names[] = {
-    "Null Block",
-    "Entry Block",
-    "Code Block",
-    "Exit Block",
-    "Exception Handling",
-    "Catch Block"
-  };
-
-  LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-  LOG(INFO) << GetInsns(0) << " insns";
-  LOG(INFO) << GetNumBlocks() << " blocks in total";
-
-  for (BasicBlock* bb : block_list_) {
-    LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
-        bb->id,
-        block_type_names[bb->block_type],
-        bb->start_offset,
-        bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
-        bb->last_mir_insn ? "" : " empty");
-    if (bb->taken != NullBasicBlockId) {
-      LOG(INFO) << "  Taken branch: block " << bb->taken
-                << "(0x" << std::hex << GetBasicBlock(bb->taken)->start_offset << ")";
-    }
-    if (bb->fall_through != NullBasicBlockId) {
-      LOG(INFO) << "  Fallthrough : block " << bb->fall_through
-                << " (0x" << std::hex << GetBasicBlock(bb->fall_through)->start_offset << ")";
-    }
-  }
-}
-
-/*
- * Build an array of location records for the incoming arguments.
- * Note: one location record per word of arguments, with dummy
- * high-word loc for wide arguments.  Also pull up any following
- * MOVE_RESULT and incorporate it into the invoke.
- */
-CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range) {
-  CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
-                                                        kArenaAllocMisc));
-  MIR* move_result_mir = FindMoveResult(bb, mir);
-  if (move_result_mir == nullptr) {
-    info->result.location = kLocInvalid;
-  } else {
-    info->result = GetRawDest(move_result_mir);
-    move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-  }
-  info->num_arg_words = mir->ssa_rep->num_uses;
-  info->args = (info->num_arg_words == 0) ? nullptr :
-      arena_->AllocArray<RegLocation>(info->num_arg_words, kArenaAllocMisc);
-  for (size_t i = 0; i < info->num_arg_words; i++) {
-    info->args[i] = GetRawSrc(mir, i);
-  }
-  info->opt_flags = mir->optimization_flags;
-  info->type = type;
-  info->is_range = is_range;
-  if (IsInstructionQuickInvoke(mir->dalvikInsn.opcode)) {
-    const auto& method_info = GetMethodLoweringInfo(mir);
-    info->method_ref = method_info.GetTargetMethod();
-  } else {
-    info->method_ref = MethodReference(GetCurrentDexCompilationUnit()->GetDexFile(),
-                                       mir->dalvikInsn.vB);
-  }
-  info->index = mir->dalvikInsn.vB;
-  info->offset = mir->offset;
-  info->mir = mir;
-  return info;
-}
-
-// Allocate a new MIR.
-MIR* MIRGraph::NewMIR() {
-  MIR* mir = new (arena_) MIR();
-  return mir;
-}
-
-// Allocate a new basic block.
-BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
-  BasicBlock* bb = new (arena_) BasicBlock(block_id, block_type, arena_);
-
-  // TUNING: better estimate of the exit block predecessors?
-  bb->predecessors.reserve((block_type == kExitBlock) ? 2048 : 2);
-  block_id_map_.Put(block_id, block_id);
-  return bb;
-}
-
-void MIRGraph::InitializeConstantPropagation() {
-  is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
-  constant_values_ = arena_->AllocArray<int>(GetNumSSARegs(), kArenaAllocDFInfo);
-}
-
-void MIRGraph::InitializeMethodUses() {
-  // The gate starts by initializing the use counts.
-  int num_ssa_regs = GetNumSSARegs();
-  use_counts_.clear();
-  use_counts_.reserve(num_ssa_regs + 32);
-  use_counts_.resize(num_ssa_regs, 0u);
-  raw_use_counts_.clear();
-  raw_use_counts_.reserve(num_ssa_regs + 32);
-  raw_use_counts_.resize(num_ssa_regs, 0u);
-}
-
-void MIRGraph::SSATransformationStart() {
-  DCHECK(temp_scoped_alloc_.get() == nullptr);
-  temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-  temp_.ssa.num_vregs = GetNumOfCodeAndTempVRs();
-  temp_.ssa.work_live_vregs = new (temp_scoped_alloc_.get()) ArenaBitVector(
-      temp_scoped_alloc_.get(), temp_.ssa.num_vregs, false);
-}
-
-void MIRGraph::SSATransformationEnd() {
-  // Verify the dataflow information after the pass.
-  if (cu_->enable_debug & (1 << kDebugVerifyDataflow)) {
-    VerifyDataflow();
-  }
-
-  temp_.ssa.num_vregs = 0u;
-  temp_.ssa.work_live_vregs = nullptr;
-  DCHECK(temp_.ssa.def_block_matrix == nullptr);
-  temp_.ssa.phi_node_blocks = nullptr;
-  DCHECK(temp_scoped_alloc_.get() != nullptr);
-  temp_scoped_alloc_.reset();
-
-  // Update the maximum number of reachable blocks.
-  max_num_reachable_blocks_ = num_reachable_blocks_;
-
-  // Mark MIR SSA representations as up to date.
-  mir_ssa_rep_up_to_date_ = true;
-}
-
-size_t MIRGraph::GetNumDalvikInsns() const {
-  size_t cumulative_size = 0u;
-  bool counted_current_item = false;
-  const uint8_t size_for_null_code_item = 2u;
-
-  for (auto it : m_units_) {
-    const DexFile::CodeItem* code_item = it->GetCodeItem();
-    // Even if the code item is null, we still count non-zero value so that
-    // each m_unit is counted as having impact.
-    cumulative_size += (code_item == nullptr ?
-        size_for_null_code_item : code_item->insns_size_in_code_units_);
-    if (code_item == current_code_item_) {
-      counted_current_item = true;
-    }
-  }
-
-  // If the current code item was not counted yet, count it now.
-  // This can happen for example in unit tests where some fields like m_units_
-  // are not initialized.
-  if (counted_current_item == false) {
-    cumulative_size += (current_code_item_ == nullptr ?
-        size_for_null_code_item : current_code_item_->insns_size_in_code_units_);
-  }
-
-  return cumulative_size;
-}
-
-static BasicBlock* SelectTopologicalSortOrderFallBack(
-    MIRGraph* mir_graph, const ArenaBitVector* current_loop,
-    const ScopedArenaVector<size_t>* visited_cnt_values, ScopedArenaAllocator* allocator,
-    ScopedArenaVector<BasicBlockId>* tmp_stack) {
-  // No true loop head has been found but there may be true loop heads after the mess we need
-  // to resolve. To avoid taking one of those, pick the candidate with the highest number of
-  // reachable unvisited nodes. That candidate will surely be a part of a loop.
-  BasicBlock* fall_back = nullptr;
-  size_t fall_back_num_reachable = 0u;
-  // Reuse the same bit vector for each candidate to mark reachable unvisited blocks.
-  ArenaBitVector candidate_reachable(allocator, mir_graph->GetNumBlocks(), false);
-  AllNodesIterator iter(mir_graph);
-  for (BasicBlock* candidate = iter.Next(); candidate != nullptr; candidate = iter.Next()) {
-    if (candidate->hidden ||                            // Hidden, or
-        candidate->visited ||                           // already processed, or
-        (*visited_cnt_values)[candidate->id] == 0u ||   // no processed predecessors, or
-        (current_loop != nullptr &&                     // outside current loop.
-         !current_loop->IsBitSet(candidate->id))) {
-      continue;
-    }
-    DCHECK(tmp_stack->empty());
-    tmp_stack->push_back(candidate->id);
-    candidate_reachable.ClearAllBits();
-    size_t num_reachable = 0u;
-    while (!tmp_stack->empty()) {
-      BasicBlockId current_id = tmp_stack->back();
-      tmp_stack->pop_back();
-      BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id);
-      DCHECK(current_bb != nullptr);
-      ChildBlockIterator child_iter(current_bb, mir_graph);
-      BasicBlock* child_bb = child_iter.Next();
-      for ( ; child_bb != nullptr; child_bb = child_iter.Next()) {
-        DCHECK(!child_bb->hidden);
-        if (child_bb->visited ||                            // Already processed, or
-            (current_loop != nullptr &&                     // outside current loop.
-             !current_loop->IsBitSet(child_bb->id))) {
-          continue;
-        }
-        if (!candidate_reachable.IsBitSet(child_bb->id)) {
-          candidate_reachable.SetBit(child_bb->id);
-          tmp_stack->push_back(child_bb->id);
-          num_reachable += 1u;
-        }
-      }
-    }
-    if (fall_back_num_reachable < num_reachable) {
-      fall_back_num_reachable = num_reachable;
-      fall_back = candidate;
-    }
-  }
-  return fall_back;
-}
-
-// Compute from which unvisited blocks is bb_id reachable through unvisited blocks.
-static void ComputeUnvisitedReachableFrom(MIRGraph* mir_graph, BasicBlockId bb_id,
-                                          ArenaBitVector* reachable,
-                                          ScopedArenaVector<BasicBlockId>* tmp_stack) {
-  // NOTE: Loop heads indicated by the "visited" flag.
-  DCHECK(tmp_stack->empty());
-  reachable->ClearAllBits();
-  tmp_stack->push_back(bb_id);
-  while (!tmp_stack->empty()) {
-    BasicBlockId current_id = tmp_stack->back();
-    tmp_stack->pop_back();
-    BasicBlock* current_bb = mir_graph->GetBasicBlock(current_id);
-    DCHECK(current_bb != nullptr);
-    for (BasicBlockId pred_id : current_bb->predecessors) {
-      BasicBlock* pred_bb = mir_graph->GetBasicBlock(pred_id);
-      DCHECK(pred_bb != nullptr);
-      if (!pred_bb->visited && !reachable->IsBitSet(pred_bb->id)) {
-        reachable->SetBit(pred_bb->id);
-        tmp_stack->push_back(pred_bb->id);
-      }
-    }
-  }
-}
-
-void MIRGraph::ComputeTopologicalSortOrder() {
-  ScopedArenaAllocator allocator(&cu_->arena_stack);
-  unsigned int num_blocks = GetNumBlocks();
-
-  ScopedArenaQueue<BasicBlock*> q(allocator.Adapter());
-  ScopedArenaVector<size_t> visited_cnt_values(num_blocks, 0u, allocator.Adapter());
-  ScopedArenaVector<BasicBlockId> loop_head_stack(allocator.Adapter());
-  size_t max_nested_loops = 0u;
-  ArenaBitVector loop_exit_blocks(&allocator, num_blocks, false);
-  loop_exit_blocks.ClearAllBits();
-
-  // Count the number of blocks to process and add the entry block(s).
-  unsigned int num_blocks_to_process = 0u;
-  for (BasicBlock* bb : block_list_) {
-    if (bb->hidden == true) {
-      continue;
-    }
-
-    num_blocks_to_process += 1u;
-
-    if (bb->predecessors.size() == 0u) {
-      // Add entry block to the queue.
-      q.push(bb);
-    }
-  }
-
-  // Clear the topological order arrays.
-  topological_order_.clear();
-  topological_order_.reserve(num_blocks);
-  topological_order_loop_ends_.clear();
-  topological_order_loop_ends_.resize(num_blocks, 0u);
-  topological_order_indexes_.clear();
-  topological_order_indexes_.resize(num_blocks, static_cast<uint16_t>(-1));
-
-  // Mark all blocks as unvisited.
-  ClearAllVisitedFlags();
-
-  // For loop heads, keep track from which blocks they are reachable not going through other
-  // loop heads. Other loop heads are excluded to detect the heads of nested loops. The children
-  // in this set go into the loop body, the other children are jumping over the loop.
-  ScopedArenaVector<ArenaBitVector*> loop_head_reachable_from(allocator.Adapter());
-  loop_head_reachable_from.resize(num_blocks, nullptr);
-  // Reuse the same temp stack whenever calculating a loop_head_reachable_from[loop_head_id].
-  ScopedArenaVector<BasicBlockId> tmp_stack(allocator.Adapter());
-
-  while (num_blocks_to_process != 0u) {
-    BasicBlock* bb = nullptr;
-    if (!q.empty()) {
-      num_blocks_to_process -= 1u;
-      // Get top.
-      bb = q.front();
-      q.pop();
-      if (bb->visited) {
-        // Loop head: it was already processed, mark end and copy exit blocks to the queue.
-        DCHECK(q.empty()) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-        uint16_t idx = static_cast<uint16_t>(topological_order_.size());
-        topological_order_loop_ends_[topological_order_indexes_[bb->id]] = idx;
-        DCHECK_EQ(loop_head_stack.back(), bb->id);
-        loop_head_stack.pop_back();
-        ArenaBitVector* reachable =
-            loop_head_stack.empty() ? nullptr : loop_head_reachable_from[loop_head_stack.back()];
-        for (BasicBlockId candidate_id : loop_exit_blocks.Indexes()) {
-          if (reachable == nullptr || reachable->IsBitSet(candidate_id)) {
-            q.push(GetBasicBlock(candidate_id));
-            // NOTE: The BitVectorSet::IndexIterator will not check the pointed-to bit again,
-            // so clearing the bit has no effect on the iterator.
-            loop_exit_blocks.ClearBit(candidate_id);
-          }
-        }
-        continue;
-      }
-    } else {
-      // Find the new loop head.
-      AllNodesIterator iter(this);
-      while (true) {
-        BasicBlock* candidate = iter.Next();
-        if (candidate == nullptr) {
-          // We did not find a true loop head, fall back to a reachable block in any loop.
-          ArenaBitVector* current_loop =
-              loop_head_stack.empty() ? nullptr : loop_head_reachable_from[loop_head_stack.back()];
-          bb = SelectTopologicalSortOrderFallBack(this, current_loop, &visited_cnt_values,
-                                                  &allocator, &tmp_stack);
-          DCHECK(bb != nullptr) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-          if (kIsDebugBuild && cu_->dex_file != nullptr) {
-            LOG(INFO) << "Topological sort order: Using fall-back in "
-                << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " BB #" << bb->id
-                << " @0x" << std::hex << bb->start_offset
-                << ", num_blocks = " << std::dec << num_blocks;
-          }
-          break;
-        }
-        if (candidate->hidden ||                            // Hidden, or
-            candidate->visited ||                           // already processed, or
-            visited_cnt_values[candidate->id] == 0u ||      // no processed predecessors, or
-            (!loop_head_stack.empty() &&                    // outside current loop.
-             !loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(candidate->id))) {
-          continue;
-        }
-
-        for (BasicBlockId pred_id : candidate->predecessors) {
-          BasicBlock* pred_bb = GetBasicBlock(pred_id);
-          DCHECK(pred_bb != nullptr);
-          if (pred_bb != candidate && !pred_bb->visited &&
-              !pred_bb->dominators->IsBitSet(candidate->id)) {
-            candidate = nullptr;  // Set candidate to null to indicate failure.
-            break;
-          }
-        }
-        if (candidate != nullptr) {
-          bb = candidate;
-          break;
-        }
-      }
-      // Compute blocks from which the loop head is reachable and process those blocks first.
-      ArenaBitVector* reachable =
-          new (&allocator) ArenaBitVector(&allocator, num_blocks, false);
-      loop_head_reachable_from[bb->id] = reachable;
-      ComputeUnvisitedReachableFrom(this, bb->id, reachable, &tmp_stack);
-      // Now mark as loop head. (Even if it's only a fall back when we don't find a true loop.)
-      loop_head_stack.push_back(bb->id);
-      max_nested_loops = std::max(max_nested_loops, loop_head_stack.size());
-    }
-
-    DCHECK_EQ(bb->hidden, false);
-    DCHECK_EQ(bb->visited, false);
-    bb->visited = true;
-    bb->nesting_depth = loop_head_stack.size();
-
-    // Now add the basic block.
-    uint16_t idx = static_cast<uint16_t>(topological_order_.size());
-    topological_order_indexes_[bb->id] = idx;
-    topological_order_.push_back(bb->id);
-
-    // Update visited_cnt_values for children.
-    ChildBlockIterator succIter(bb, this);
-    BasicBlock* successor = succIter.Next();
-    for ( ; successor != nullptr; successor = succIter.Next()) {
-      if (successor->hidden) {
-        continue;
-      }
-
-      // One more predecessor was visited.
-      visited_cnt_values[successor->id] += 1u;
-      if (visited_cnt_values[successor->id] == successor->predecessors.size()) {
-        if (loop_head_stack.empty() ||
-            loop_head_reachable_from[loop_head_stack.back()]->IsBitSet(successor->id)) {
-          q.push(successor);
-        } else {
-          DCHECK(!loop_exit_blocks.IsBitSet(successor->id));
-          loop_exit_blocks.SetBit(successor->id);
-        }
-      }
-    }
-  }
-
-  // Prepare the loop head stack for iteration.
-  topological_order_loop_head_stack_.clear();
-  topological_order_loop_head_stack_.reserve(max_nested_loops);
-  max_nested_loops_ = max_nested_loops;
-  topological_order_up_to_date_ = true;
-}
-
-bool BasicBlock::IsExceptionBlock() const {
-  if (block_type == kExceptionHandling) {
-    return true;
-  }
-  return false;
-}
-
-ChildBlockIterator::ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph)
-    : basic_block_(bb), mir_graph_(mir_graph), visited_fallthrough_(false),
-      visited_taken_(false), have_successors_(false) {
-  // Check if we actually do have successors.
-  if (basic_block_ != 0 && basic_block_->successor_block_list_type != kNotUsed) {
-    have_successors_ = true;
-    successor_iter_ = basic_block_->successor_blocks.cbegin();
-  }
-}
-
-BasicBlock* ChildBlockIterator::Next() {
-  // We check if we have a basic block. If we don't we cannot get next child.
-  if (basic_block_ == nullptr) {
-    return nullptr;
-  }
-
-  // If we haven't visited fallthrough, return that.
-  if (visited_fallthrough_ == false) {
-    visited_fallthrough_ = true;
-
-    BasicBlock* result = mir_graph_->GetBasicBlock(basic_block_->fall_through);
-    if (result != nullptr) {
-      return result;
-    }
-  }
-
-  // If we haven't visited taken, return that.
-  if (visited_taken_ == false) {
-    visited_taken_ = true;
-
-    BasicBlock* result = mir_graph_->GetBasicBlock(basic_block_->taken);
-    if (result != nullptr) {
-      return result;
-    }
-  }
-
-  // We visited both taken and fallthrough. Now check if we have successors we need to visit.
-  if (have_successors_ == true) {
-    // Get information about next successor block.
-    auto end = basic_block_->successor_blocks.cend();
-    while (successor_iter_ != end) {
-      SuccessorBlockInfo* successor_block_info = *successor_iter_;
-      ++successor_iter_;
-      // If block was replaced by zero block, take next one.
-      if (successor_block_info->block != NullBasicBlockId) {
-        return mir_graph_->GetBasicBlock(successor_block_info->block);
-      }
-    }
-  }
-
-  // We do not have anything.
-  return nullptr;
-}
-
-BasicBlock* BasicBlock::Copy(CompilationUnit* c_unit) {
-  MIRGraph* mir_graph = c_unit->mir_graph.get();
-  return Copy(mir_graph);
-}
-
-BasicBlock* BasicBlock::Copy(MIRGraph* mir_graph) {
-  BasicBlock* result_bb = mir_graph->CreateNewBB(block_type);
-
-  // We don't do a memcpy style copy here because it would lead to a lot of things
-  // to clean up. Let us do it by hand instead.
-  // Copy in taken and fallthrough.
-  result_bb->fall_through = fall_through;
-  result_bb->taken = taken;
-
-  // Copy successor links if needed.
-  ArenaAllocator* arena = mir_graph->GetArena();
-
-  result_bb->successor_block_list_type = successor_block_list_type;
-  if (result_bb->successor_block_list_type != kNotUsed) {
-    result_bb->successor_blocks.reserve(successor_blocks.size());
-    for (SuccessorBlockInfo* sbi_old : successor_blocks) {
-      SuccessorBlockInfo* sbi_new = static_cast<SuccessorBlockInfo*>(
-          arena->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessors));
-      memcpy(sbi_new, sbi_old, sizeof(SuccessorBlockInfo));
-      result_bb->successor_blocks.push_back(sbi_new);
-    }
-  }
-
-  // Copy offset, method.
-  result_bb->start_offset = start_offset;
-
-  // Now copy instructions.
-  for (MIR* mir = first_mir_insn; mir != 0; mir = mir->next) {
-    // Get a copy first.
-    MIR* copy = mir->Copy(mir_graph);
-
-    // Append it.
-    result_bb->AppendMIR(copy);
-  }
-
-  return result_bb;
-}
-
-MIR* MIR::Copy(MIRGraph* mir_graph) {
-  MIR* res = mir_graph->NewMIR();
-  *res = *this;
-
-  // Remove links
-  res->next = nullptr;
-  res->bb = NullBasicBlockId;
-  res->ssa_rep = nullptr;
-
-  return res;
-}
-
-MIR* MIR::Copy(CompilationUnit* c_unit) {
-  return Copy(c_unit->mir_graph.get());
-}
-
-uint32_t SSARepresentation::GetStartUseIndex(Instruction::Code opcode) {
-  // Default result.
-  int res = 0;
-
-  // We are basically setting the iputs to their igets counterparts.
-  switch (opcode) {
-    case Instruction::IPUT:
-    case Instruction::IPUT_OBJECT:
-    case Instruction::IPUT_BOOLEAN:
-    case Instruction::IPUT_BYTE:
-    case Instruction::IPUT_CHAR:
-    case Instruction::IPUT_SHORT:
-    case Instruction::IPUT_QUICK:
-    case Instruction::IPUT_OBJECT_QUICK:
-    case Instruction::IPUT_BOOLEAN_QUICK:
-    case Instruction::IPUT_BYTE_QUICK:
-    case Instruction::IPUT_CHAR_QUICK:
-    case Instruction::IPUT_SHORT_QUICK:
-    case Instruction::APUT:
-    case Instruction::APUT_OBJECT:
-    case Instruction::APUT_BOOLEAN:
-    case Instruction::APUT_BYTE:
-    case Instruction::APUT_CHAR:
-    case Instruction::APUT_SHORT:
-    case Instruction::SPUT:
-    case Instruction::SPUT_OBJECT:
-    case Instruction::SPUT_BOOLEAN:
-    case Instruction::SPUT_BYTE:
-    case Instruction::SPUT_CHAR:
-    case Instruction::SPUT_SHORT:
-      // Skip the VR containing what to store.
-      res = 1;
-      break;
-    case Instruction::IPUT_WIDE:
-    case Instruction::IPUT_WIDE_QUICK:
-    case Instruction::APUT_WIDE:
-    case Instruction::SPUT_WIDE:
-      // Skip the two VRs containing what to store.
-      res = 2;
-      break;
-    default:
-      // Do nothing in the general case.
-      break;
-  }
-
-  return res;
-}
-
-/**
- * @brief Given a decoded instruction, it checks whether the instruction
- * sets a constant and if it does, more information is provided about the
- * constant being set.
- * @param ptr_value pointer to a 64-bit holder for the constant.
- * @param wide Updated by function whether a wide constant is being set by bytecode.
- * @return Returns false if the decoded instruction does not represent a constant bytecode.
- */
-bool MIR::DecodedInstruction::GetConstant(int64_t* ptr_value, bool* wide) const {
-  bool sets_const = true;
-  int64_t value = vB;
-
-  DCHECK(ptr_value != nullptr);
-  DCHECK(wide != nullptr);
-
-  switch (opcode) {
-    case Instruction::CONST_4:
-    case Instruction::CONST_16:
-    case Instruction::CONST:
-      *wide = false;
-      value <<= 32;      // In order to get the sign extend.
-      value >>= 32;
-      break;
-    case Instruction::CONST_HIGH16:
-      *wide = false;
-      value <<= 48;      // In order to get the sign extend.
-      value >>= 32;
-      break;
-    case Instruction::CONST_WIDE_16:
-    case Instruction::CONST_WIDE_32:
-      *wide = true;
-      value <<= 32;      // In order to get the sign extend.
-      value >>= 32;
-      break;
-    case Instruction::CONST_WIDE:
-      *wide = true;
-      value = vB_wide;
-      break;
-    case Instruction::CONST_WIDE_HIGH16:
-      *wide = true;
-      value <<= 48;      // In order to get the sign extend.
-      break;
-    default:
-      sets_const = false;
-      break;
-  }
-
-  if (sets_const) {
-    *ptr_value = value;
-  }
-
-  return sets_const;
-}
-
-void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
-  // Reset flags for all MIRs in bb.
-  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
-    mir->optimization_flags &= (~reset_flags);
-  }
-}
-
-void BasicBlock::Kill(MIRGraph* mir_graph) {
-  for (BasicBlockId pred_id : predecessors) {
-    BasicBlock* pred_bb = mir_graph->GetBasicBlock(pred_id);
-    DCHECK(pred_bb != nullptr);
-
-    // Sadly we have to go through the children by hand here.
-    pred_bb->ReplaceChild(id, NullBasicBlockId);
-  }
-  predecessors.clear();
-
-  // Mark as dead and hidden.
-  block_type = kDead;
-  hidden = true;
-
-  // Detach it from its MIRs so we don't generate code for them. Also detached MIRs
-  // are updated to know that they no longer have a parent.
-  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
-    mir->bb = NullBasicBlockId;
-  }
-  first_mir_insn = nullptr;
-  last_mir_insn = nullptr;
-
-  data_flow_info = nullptr;
-
-  // Erase this bb from all children's predecessors and kill unreachable children.
-  ChildBlockIterator iter(this, mir_graph);
-  for (BasicBlock* succ_bb = iter.Next(); succ_bb != nullptr; succ_bb = iter.Next()) {
-    succ_bb->ErasePredecessor(id);
-  }
-
-  // Remove links to children.
-  fall_through = NullBasicBlockId;
-  taken = NullBasicBlockId;
-  successor_block_list_type = kNotUsed;
-
-  if (kIsDebugBuild) {
-    if (catch_entry) {
-      DCHECK_EQ(mir_graph->catches_.count(start_offset), 1u);
-      mir_graph->catches_.erase(start_offset);
-    }
-  }
-}
-
-bool BasicBlock::IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg) {
-  // In order to determine if the ssa reg is live out, we scan all the MIRs. We remember
-  // the last SSA number of the same dalvik register. At the end, if it is different than ssa_reg,
-  // then it is not live out of this BB.
-  int dalvik_reg = c_unit->mir_graph->SRegToVReg(ssa_reg);
-
-  int last_ssa_reg = -1;
-
-  // Walk through the MIRs backwards.
-  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
-    // Get ssa rep.
-    SSARepresentation *ssa_rep = mir->ssa_rep;
-
-    // Go through the defines for this MIR.
-    for (int i = 0; i < ssa_rep->num_defs; i++) {
-      DCHECK(ssa_rep->defs != nullptr);
-
-      // Get the ssa reg.
-      int def_ssa_reg = ssa_rep->defs[i];
-
-      // Get dalvik reg.
-      int def_dalvik_reg = c_unit->mir_graph->SRegToVReg(def_ssa_reg);
-
-      // Compare dalvik regs.
-      if (dalvik_reg == def_dalvik_reg) {
-        // We found a def of the register that we are being asked about.
-        // Remember it.
-        last_ssa_reg = def_ssa_reg;
-      }
-    }
-  }
-
-  if (last_ssa_reg == -1) {
-    // If we get to this point we couldn't find a define of register user asked about.
-    // Let's assume the user knows what he's doing so we can be safe and say that if we
-    // couldn't find a def, it is live out.
-    return true;
-  }
-
-  // If it is not -1, we found a match, is it ssa_reg?
-  return (ssa_reg == last_ssa_reg);
-}
-
-bool BasicBlock::ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb) {
-  // We need to check taken, fall_through, and successor_blocks to replace.
-  bool found = false;
-  if (taken == old_bb) {
-    taken = new_bb;
-    found = true;
-  }
-
-  if (fall_through == old_bb) {
-    fall_through = new_bb;
-    found = true;
-  }
-
-  if (successor_block_list_type != kNotUsed) {
-    for (SuccessorBlockInfo* successor_block_info : successor_blocks) {
-      if (successor_block_info->block == old_bb) {
-        successor_block_info->block = new_bb;
-        found = true;
-      }
-    }
-  }
-
-  return found;
-}
-
-void BasicBlock::ErasePredecessor(BasicBlockId old_pred) {
-  auto pos = std::find(predecessors.begin(), predecessors.end(), old_pred);
-  DCHECK(pos != predecessors.end());
-  // It's faster to move the back() to *pos than erase(pos).
-  *pos = predecessors.back();
-  predecessors.pop_back();
-  size_t idx = std::distance(predecessors.begin(), pos);
-  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
-    if (static_cast<int>(mir->dalvikInsn.opcode) != kMirOpPhi) {
-      break;
-    }
-    DCHECK_EQ(mir->ssa_rep->num_uses - 1u, predecessors.size());
-    DCHECK_EQ(mir->meta.phi_incoming[idx], old_pred);
-    mir->meta.phi_incoming[idx] = mir->meta.phi_incoming[predecessors.size()];
-    mir->ssa_rep->uses[idx] = mir->ssa_rep->uses[predecessors.size()];
-    mir->ssa_rep->num_uses = predecessors.size();
-  }
-}
-
-void BasicBlock::UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred) {
-  DCHECK_NE(new_pred, NullBasicBlockId);
-  auto pos = std::find(predecessors.begin(), predecessors.end(), old_pred);
-  DCHECK(pos != predecessors.end());
-  *pos = new_pred;
-  size_t idx = std::distance(predecessors.begin(), pos);
-  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
-    if (static_cast<int>(mir->dalvikInsn.opcode) != kMirOpPhi) {
-      break;
-    }
-    DCHECK_EQ(mir->meta.phi_incoming[idx], old_pred);
-    mir->meta.phi_incoming[idx] = new_pred;
-  }
-}
-
-// Create a new basic block with block_id as num_blocks_ that is
-// post-incremented.
-BasicBlock* MIRGraph::CreateNewBB(BBType block_type) {
-  BasicBlockId id = static_cast<BasicBlockId>(block_list_.size());
-  BasicBlock* res = NewMemBB(block_type, id);
-  block_list_.push_back(res);
-  return res;
-}
-
-void MIRGraph::CalculateBasicBlockInformation(const PassManager* const post_opt_pass_manager) {
-  /* Create the pass driver and launch it */
-  PassDriverMEPostOpt driver(post_opt_pass_manager, cu_);
-  driver.Launch();
-}
-
-int MIR::DecodedInstruction::FlagsOf() const {
-  // Calculate new index.
-  int idx = static_cast<int>(opcode) - kNumPackedOpcodes;
-
-  // Check if it is an extended or not.
-  if (idx < 0) {
-    return Instruction::FlagsOf(opcode);
-  }
-
-  // For extended, we use a switch.
-  switch (static_cast<int>(opcode)) {
-    case kMirOpPhi:
-      return Instruction::kContinue;
-    case kMirOpCopy:
-      return Instruction::kContinue;
-    case kMirOpFusedCmplFloat:
-      return Instruction::kContinue | Instruction::kBranch;
-    case kMirOpFusedCmpgFloat:
-      return Instruction::kContinue | Instruction::kBranch;
-    case kMirOpFusedCmplDouble:
-      return Instruction::kContinue | Instruction::kBranch;
-    case kMirOpFusedCmpgDouble:
-      return Instruction::kContinue | Instruction::kBranch;
-    case kMirOpFusedCmpLong:
-      return Instruction::kContinue | Instruction::kBranch;
-    case kMirOpNop:
-      return Instruction::kContinue;
-    case kMirOpNullCheck:
-      return Instruction::kContinue | Instruction::kThrow;
-    case kMirOpRangeCheck:
-      return Instruction::kContinue | Instruction::kThrow;
-    case kMirOpDivZeroCheck:
-      return Instruction::kContinue | Instruction::kThrow;
-    case kMirOpCheck:
-      return Instruction::kContinue | Instruction::kThrow;
-    case kMirOpSelect:
-      return Instruction::kContinue;
-    case kMirOpConstVector:
-      return Instruction::kContinue;
-    case kMirOpMoveVector:
-      return Instruction::kContinue;
-    case kMirOpPackedMultiply:
-      return Instruction::kContinue;
-    case kMirOpPackedAddition:
-      return Instruction::kContinue;
-    case kMirOpPackedSubtract:
-      return Instruction::kContinue;
-    case kMirOpPackedShiftLeft:
-      return Instruction::kContinue;
-    case kMirOpPackedSignedShiftRight:
-      return Instruction::kContinue;
-    case kMirOpPackedUnsignedShiftRight:
-      return Instruction::kContinue;
-    case kMirOpPackedAnd:
-      return Instruction::kContinue;
-    case kMirOpPackedOr:
-      return Instruction::kContinue;
-    case kMirOpPackedXor:
-      return Instruction::kContinue;
-    case kMirOpPackedAddReduce:
-      return Instruction::kContinue;
-    case kMirOpPackedReduce:
-      return Instruction::kContinue;
-    case kMirOpPackedSet:
-      return Instruction::kContinue;
-    case kMirOpReserveVectorRegisters:
-      return Instruction::kContinue;
-    case kMirOpReturnVectorRegisters:
-      return Instruction::kContinue;
-    case kMirOpMemBarrier:
-      return Instruction::kContinue;
-    case kMirOpPackedArrayGet:
-      return Instruction::kContinue | Instruction::kThrow;
-    case kMirOpPackedArrayPut:
-      return Instruction::kContinue | Instruction::kThrow;
-    case kMirOpMaddInt:
-    case kMirOpMsubInt:
-    case kMirOpMaddLong:
-    case kMirOpMsubLong:
-      return Instruction::kContinue;
-    default:
-      LOG(WARNING) << "ExtendedFlagsOf: Unhandled case: " << static_cast<int> (opcode);
-      return 0;
-  }
-}
-
-const uint16_t* MIRGraph::GetInsns(int m_unit_index) const {
-  return m_units_[m_unit_index]->GetCodeItem()->insns_;
-}
-
-void MIRGraph::SetPuntToInterpreter(bool val) {
-  punt_to_interpreter_ = val;
-  if (val) {
-    // Disable all subsequent optimizations. They may not be safe to run. (For example,
-    // LVN/GVN assumes there are no conflicts found by the type inference pass.)
-    cu_->disable_opt = ~static_cast<decltype(cu_->disable_opt)>(0);
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
deleted file mode 100644
index 3191fe9..0000000
--- a/compiler/dex/mir_graph.h
+++ /dev/null
@@ -1,1488 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_MIR_GRAPH_H_
-#define ART_COMPILER_DEX_MIR_GRAPH_H_
-
-#include <stdint.h>
-
-#include "base/arena_bit_vector.h"
-#include "base/arena_containers.h"
-#include "base/bit_utils.h"
-#include "base/scoped_arena_containers.h"
-#include "dex_file.h"
-#include "dex_instruction.h"
-#include "dex_types.h"
-#include "invoke_type.h"
-#include "mir_field_info.h"
-#include "mir_method_info.h"
-#include "reg_location.h"
-#include "reg_storage.h"
-
-namespace art {
-
-struct CompilationUnit;
-class DexCompilationUnit;
-class DexFileMethodInliner;
-class GlobalValueNumbering;
-class GvnDeadCodeElimination;
-class PassManager;
-class TypeInference;
-
-// Forward declaration.
-class MIRGraph;
-
-enum DataFlowAttributePos {
-  kUA = 0,
-  kUB,
-  kUC,
-  kAWide,
-  kBWide,
-  kCWide,
-  kDA,
-  kIsMove,
-  kSetsConst,
-  kFormat35c,
-  kFormat3rc,
-  kFormatExtended,       // Extended format for extended MIRs.
-  kNullCheckA,           // Null check of A.
-  kNullCheckB,           // Null check of B.
-  kNullCheckOut0,        // Null check out outgoing arg0.
-  kDstNonNull,           // May assume dst is non-null.
-  kRetNonNull,           // May assume retval is non-null.
-  kNullTransferSrc0,     // Object copy src[0] -> dst.
-  kNullTransferSrcN,     // Phi null check state transfer.
-  kRangeCheckC,          // Range check of C.
-  kCheckCastA,           // Check cast of A.
-  kFPA,
-  kFPB,
-  kFPC,
-  kCoreA,
-  kCoreB,
-  kCoreC,
-  kRefA,
-  kRefB,
-  kRefC,
-  kSameTypeAB,           // A and B have the same type but it can be core/ref/fp (IF_cc).
-  kUsesMethodStar,       // Implicit use of Method*.
-  kUsesIField,           // Accesses an instance field (IGET/IPUT).
-  kUsesSField,           // Accesses a static field (SGET/SPUT).
-  kCanInitializeClass,   // Can trigger class initialization (SGET/SPUT/INVOKE_STATIC).
-  kDoLVN,                // Worth computing local value numbers.
-};
-
-#define DF_NOP                  UINT64_C(0)
-#define DF_UA                   (UINT64_C(1) << kUA)
-#define DF_UB                   (UINT64_C(1) << kUB)
-#define DF_UC                   (UINT64_C(1) << kUC)
-#define DF_A_WIDE               (UINT64_C(1) << kAWide)
-#define DF_B_WIDE               (UINT64_C(1) << kBWide)
-#define DF_C_WIDE               (UINT64_C(1) << kCWide)
-#define DF_DA                   (UINT64_C(1) << kDA)
-#define DF_IS_MOVE              (UINT64_C(1) << kIsMove)
-#define DF_SETS_CONST           (UINT64_C(1) << kSetsConst)
-#define DF_FORMAT_35C           (UINT64_C(1) << kFormat35c)
-#define DF_FORMAT_3RC           (UINT64_C(1) << kFormat3rc)
-#define DF_FORMAT_EXTENDED      (UINT64_C(1) << kFormatExtended)
-#define DF_NULL_CHK_A           (UINT64_C(1) << kNullCheckA)
-#define DF_NULL_CHK_B           (UINT64_C(1) << kNullCheckB)
-#define DF_NULL_CHK_OUT0        (UINT64_C(1) << kNullCheckOut0)
-#define DF_NON_NULL_DST         (UINT64_C(1) << kDstNonNull)
-#define DF_NON_NULL_RET         (UINT64_C(1) << kRetNonNull)
-#define DF_NULL_TRANSFER_0      (UINT64_C(1) << kNullTransferSrc0)
-#define DF_NULL_TRANSFER_N      (UINT64_C(1) << kNullTransferSrcN)
-#define DF_RANGE_CHK_C          (UINT64_C(1) << kRangeCheckC)
-#define DF_CHK_CAST             (UINT64_C(1) << kCheckCastA)
-#define DF_FP_A                 (UINT64_C(1) << kFPA)
-#define DF_FP_B                 (UINT64_C(1) << kFPB)
-#define DF_FP_C                 (UINT64_C(1) << kFPC)
-#define DF_CORE_A               (UINT64_C(1) << kCoreA)
-#define DF_CORE_B               (UINT64_C(1) << kCoreB)
-#define DF_CORE_C               (UINT64_C(1) << kCoreC)
-#define DF_REF_A                (UINT64_C(1) << kRefA)
-#define DF_REF_B                (UINT64_C(1) << kRefB)
-#define DF_REF_C                (UINT64_C(1) << kRefC)
-#define DF_SAME_TYPE_AB         (UINT64_C(1) << kSameTypeAB)
-#define DF_UMS                  (UINT64_C(1) << kUsesMethodStar)
-#define DF_IFIELD               (UINT64_C(1) << kUsesIField)
-#define DF_SFIELD               (UINT64_C(1) << kUsesSField)
-#define DF_CLINIT               (UINT64_C(1) << kCanInitializeClass)
-#define DF_LVN                  (UINT64_C(1) << kDoLVN)
-
-#define DF_HAS_USES             (DF_UA | DF_UB | DF_UC)
-
-#define DF_HAS_DEFS             (DF_DA)
-
-#define DF_HAS_NULL_CHKS        (DF_NULL_CHK_A | \
-                                 DF_NULL_CHK_B | \
-                                 DF_NULL_CHK_OUT0)
-
-#define DF_HAS_RANGE_CHKS       (DF_RANGE_CHK_C)
-
-#define DF_HAS_NR_CHKS          (DF_HAS_NULL_CHKS | \
-                                 DF_HAS_RANGE_CHKS)
-
-#define DF_A_IS_REG             (DF_UA | DF_DA)
-#define DF_B_IS_REG             (DF_UB)
-#define DF_C_IS_REG             (DF_UC)
-#define DF_USES_FP              (DF_FP_A | DF_FP_B | DF_FP_C)
-#define DF_NULL_TRANSFER        (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)
-#define DF_IS_INVOKE            (DF_FORMAT_35C | DF_FORMAT_3RC)
-
-enum OatMethodAttributes {
-  kIsLeaf,            // Method is leaf.
-};
-
-#define METHOD_IS_LEAF          (1 << kIsLeaf)
-
-// Minimum field size to contain Dalvik v_reg number.
-#define VREG_NUM_WIDTH 16
-
-#define INVALID_VREG (0xFFFFU)
-#define INVALID_OFFSET (0xDEADF00FU)
-
-#define MIR_IGNORE_NULL_CHECK           (1 << kMIRIgnoreNullCheck)
-#define MIR_IGNORE_RANGE_CHECK          (1 << kMIRIgnoreRangeCheck)
-#define MIR_IGNORE_CHECK_CAST           (1 << kMIRIgnoreCheckCast)
-#define MIR_STORE_NON_NULL_VALUE        (1 << kMIRStoreNonNullValue)
-#define MIR_CLASS_IS_INITIALIZED        (1 << kMIRClassIsInitialized)
-#define MIR_CLASS_IS_IN_DEX_CACHE       (1 << kMIRClassIsInDexCache)
-#define MIR_IGNORE_DIV_ZERO_CHECK       (1 << kMirIgnoreDivZeroCheck)
-#define MIR_INLINED                     (1 << kMIRInlined)
-#define MIR_INLINED_PRED                (1 << kMIRInlinedPred)
-#define MIR_CALLEE                      (1 << kMIRCallee)
-#define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
-#define MIR_DUP                         (1 << kMIRDup)
-#define MIR_MARK                        (1 << kMIRMark)
-#define MIR_STORE_NON_TEMPORAL          (1 << kMIRStoreNonTemporal)
-
-#define BLOCK_NAME_LEN 80
-
-typedef uint16_t BasicBlockId;
-static const BasicBlockId NullBasicBlockId = 0;
-
-// Leaf optimization is basically the removal of suspend checks from leaf methods.
-// This is incompatible with SuspendCheckElimination (SCE) which eliminates suspend
-// checks from loops that call any non-intrinsic method, since a loop that calls
-// only a leaf method would end up without any suspend checks at all. So turning
-// this on automatically disables the SCE in MIRGraph::EliminateSuspendChecksGate().
-//
-// Since the Optimizing compiler is actually applying the same optimization, Quick
-// must not run SCE anyway, so we enable this optimization as a way to disable SCE
-// while keeping a consistent behavior across the backends, b/22657404.
-static constexpr bool kLeafOptimization = true;
-
-/*
- * In general, vreg/sreg describe Dalvik registers that originated with dx.  However,
- * it is useful to have compiler-generated temporary registers and have them treated
- * in the same manner as dx-generated virtual registers.  This struct records the SSA
- * name of compiler-introduced temporaries.
- */
-struct CompilerTemp {
-  int32_t v_reg;      // Virtual register number for temporary.
-  int32_t s_reg_low;  // SSA name for low Dalvik word.
-};
-
-enum CompilerTempType {
-  kCompilerTempVR,                // A virtual register temporary.
-  kCompilerTempSpecialMethodPtr,  // Temporary that keeps track of current method pointer.
-  kCompilerTempBackend,           // Temporary that is used by backend.
-};
-
-// When debug option enabled, records effectiveness of null and range check elimination.
-struct Checkstats {
-  int32_t null_checks;
-  int32_t null_checks_eliminated;
-  int32_t range_checks;
-  int32_t range_checks_eliminated;
-};
-
-// Dataflow attributes of a basic block.
-struct BasicBlockDataFlow {
-  ArenaBitVector* use_v;
-  ArenaBitVector* def_v;
-  ArenaBitVector* live_in_v;
-  int32_t* vreg_to_ssa_map_exit;
-};
-
-/*
- * Normalized use/def for a MIR operation using SSA names rather than vregs.  Note that
- * uses/defs retain the Dalvik convention that long operations operate on a pair of 32-bit
- * vregs.  For example, "ADD_LONG v0, v2, v3" would have 2 defs (v0/v1) and 4 uses (v2/v3, v4/v5).
- * Following SSA renaming, this is the primary struct used by code generators to locate
- * operand and result registers.  This is a somewhat confusing and unhelpful convention that
- * we may want to revisit in the future.
- *
- * TODO:
- *  1. Add accessors for uses/defs and make data private
- *  2. Change fp_use/fp_def to a bit array (could help memory usage)
- *  3. Combine array storage into internal array and handled via accessors from 1.
- */
-struct SSARepresentation {
-  int32_t* uses;
-  int32_t* defs;
-  uint16_t num_uses_allocated;
-  uint16_t num_defs_allocated;
-  uint16_t num_uses;
-  uint16_t num_defs;
-
-  static uint32_t GetStartUseIndex(Instruction::Code opcode);
-};
-
-/*
- * The Midlevel Intermediate Representation node, which may be largely considered a
- * wrapper around a Dalvik byte code.
- */
-class MIR : public ArenaObject<kArenaAllocMIR> {
- public:
-  /*
-   * TODO: remove embedded DecodedInstruction to save space, keeping only opcode.  Recover
-   * additional fields on as-needed basis.  Question: how to support MIR Pseudo-ops; probably
-   * need to carry aux data pointer.
-   */
-  struct DecodedInstruction {
-    uint32_t vA;
-    uint32_t vB;
-    uint64_t vB_wide;        /* for k51l */
-    uint32_t vC;
-    uint32_t arg[5];         /* vC/D/E/F/G in invoke or filled-new-array */
-    Instruction::Code opcode;
-
-    DecodedInstruction() : vA(0), vB(0), vB_wide(0), vC(0), opcode(Instruction::NOP) {
-    }
-
-    /*
-     * Given a decoded instruction representing a const bytecode, it updates
-     * the out arguments with proper values as dictated by the constant bytecode.
-     */
-    bool GetConstant(int64_t* ptr_value, bool* wide) const;
-
-    static bool IsPseudoMirOp(Instruction::Code opcode) {
-      return static_cast<int>(opcode) >= static_cast<int>(kMirOpFirst);
-    }
-
-    static bool IsPseudoMirOp(int opcode) {
-      return opcode >= static_cast<int>(kMirOpFirst);
-    }
-
-    bool IsInvoke() const {
-      return ((FlagsOf() & Instruction::kInvoke) == Instruction::kInvoke);
-    }
-
-    bool IsStore() const {
-      return ((FlagsOf() & Instruction::kStore) == Instruction::kStore);
-    }
-
-    bool IsLoad() const {
-      return ((FlagsOf() & Instruction::kLoad) == Instruction::kLoad);
-    }
-
-    bool IsConditionalBranch() const {
-      return (FlagsOf() == (Instruction::kContinue | Instruction::kBranch));
-    }
-
-    /**
-     * @brief Is the register C component of the decoded instruction a constant?
-     */
-    bool IsCFieldOrConstant() const {
-      return ((FlagsOf() & Instruction::kRegCFieldOrConstant) == Instruction::kRegCFieldOrConstant);
-    }
-
-    /**
-     * @brief Is the register C component of the decoded instruction a constant?
-     */
-    bool IsBFieldOrConstant() const {
-      return ((FlagsOf() & Instruction::kRegBFieldOrConstant) == Instruction::kRegBFieldOrConstant);
-    }
-
-    bool IsCast() const {
-      return ((FlagsOf() & Instruction::kCast) == Instruction::kCast);
-    }
-
-    /**
-     * @brief Does the instruction clobber memory?
-     * @details Clobber means that the instruction changes the memory not in a punctual way.
-     *          Therefore any supposition on memory aliasing or memory contents should be disregarded
-     *            when crossing such an instruction.
-     */
-    bool Clobbers() const {
-      return ((FlagsOf() & Instruction::kClobber) == Instruction::kClobber);
-    }
-
-    bool IsLinear() const {
-      return (FlagsOf() & (Instruction::kAdd | Instruction::kSubtract)) != 0;
-    }
-
-    int FlagsOf() const;
-  } dalvikInsn;
-
-  NarrowDexOffset offset;         // Offset of the instruction in code units.
-  uint16_t optimization_flags;
-  int16_t m_unit_index;           // From which method was this MIR included
-  BasicBlockId bb;
-  MIR* next;
-  SSARepresentation* ssa_rep;
-  union {
-    // Incoming edges for phi node.
-    BasicBlockId* phi_incoming;
-    // Establish link from check instruction (kMirOpCheck) to the actual throwing instruction.
-    MIR* throw_insn;
-    // Branch condition for fused cmp or select.
-    ConditionCode ccode;
-    // IGET/IPUT lowering info index, points to MIRGraph::ifield_lowering_infos_. Due to limit on
-    // the number of code points (64K) and size of IGET/IPUT insn (2), this will never exceed 32K.
-    uint32_t ifield_lowering_info;
-    // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
-    // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
-    uint32_t sfield_lowering_info;
-    // INVOKE data index, points to MIRGraph::method_lowering_infos_. Also used for inlined
-    // CONST and MOVE insn (with MIR_CALLEE) to remember the invoke for type inference.
-    uint32_t method_lowering_info;
-  } meta;
-
-  MIR() : offset(0), optimization_flags(0), m_unit_index(0), bb(NullBasicBlockId),
-                 next(nullptr), ssa_rep(nullptr) {
-    memset(&meta, 0, sizeof(meta));
-  }
-
-  uint32_t GetStartUseIndex() const {
-    return SSARepresentation::GetStartUseIndex(dalvikInsn.opcode);
-  }
-
-  MIR* Copy(CompilationUnit *c_unit);
-  MIR* Copy(MIRGraph* mir_Graph);
-};
-
-struct SuccessorBlockInfo;
-
-class BasicBlock : public DeletableArenaObject<kArenaAllocBasicBlock> {
- public:
-  BasicBlock(BasicBlockId block_id, BBType type, ArenaAllocator* allocator)
-      : id(block_id),
-        dfs_id(), start_offset(), fall_through(), taken(), i_dom(), nesting_depth(),
-        block_type(type),
-        successor_block_list_type(kNotUsed),
-        visited(), hidden(), catch_entry(), explicit_throw(), conditional_branch(),
-        terminated_by_return(), dominates_return(), use_lvn(), first_mir_insn(),
-        last_mir_insn(), data_flow_info(), dominators(), i_dominated(), dom_frontier(),
-        predecessors(allocator->Adapter(kArenaAllocBBPredecessors)),
-        successor_blocks(allocator->Adapter(kArenaAllocSuccessors)) {
-  }
-  BasicBlockId id;
-  BasicBlockId dfs_id;
-  NarrowDexOffset start_offset;     // Offset in code units.
-  BasicBlockId fall_through;
-  BasicBlockId taken;
-  BasicBlockId i_dom;               // Immediate dominator.
-  uint16_t nesting_depth;
-  BBType block_type:4;
-  BlockListType successor_block_list_type:4;
-  bool visited:1;
-  bool hidden:1;
-  bool catch_entry:1;
-  bool explicit_throw:1;
-  bool conditional_branch:1;
-  bool terminated_by_return:1;  // Block ends with a Dalvik return opcode.
-  bool dominates_return:1;      // Is a member of return extended basic block.
-  bool use_lvn:1;               // Run local value numbering on this block.
-  MIR* first_mir_insn;
-  MIR* last_mir_insn;
-  BasicBlockDataFlow* data_flow_info;
-  ArenaBitVector* dominators;
-  ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
-  ArenaBitVector* dom_frontier;     // Dominance frontier.
-  ArenaVector<BasicBlockId> predecessors;
-  ArenaVector<SuccessorBlockInfo*> successor_blocks;
-
-  void AppendMIR(MIR* mir);
-  void AppendMIRList(MIR* first_list_mir, MIR* last_list_mir);
-  void AppendMIRList(const std::vector<MIR*>& insns);
-  void PrependMIR(MIR* mir);
-  void PrependMIRList(MIR* first_list_mir, MIR* last_list_mir);
-  void PrependMIRList(const std::vector<MIR*>& to_add);
-  void InsertMIRAfter(MIR* current_mir, MIR* new_mir);
-  void InsertMIRListAfter(MIR* insert_after, MIR* first_list_mir, MIR* last_list_mir);
-  MIR* FindPreviousMIR(MIR* mir);
-  void InsertMIRBefore(MIR* insert_before, MIR* list);
-  void InsertMIRListBefore(MIR* insert_before, MIR* first_list_mir, MIR* last_list_mir);
-  bool RemoveMIR(MIR* mir);
-  bool RemoveMIRList(MIR* first_list_mir, MIR* last_list_mir);
-
-  BasicBlock* Copy(CompilationUnit* c_unit);
-  BasicBlock* Copy(MIRGraph* mir_graph);
-
-  /**
-   * @brief Reset the optimization_flags field of each MIR.
-   */
-  void ResetOptimizationFlags(uint16_t reset_flags);
-
-  /**
-   * @brief Kill the BasicBlock.
-   * @details Unlink predecessors and successors, remove all MIRs, set the block type to kDead
-   *          and set hidden to true.
-   */
-  void Kill(MIRGraph* mir_graph);
-
-  /**
-   * @brief Is ssa_reg the last SSA definition of that VR in the block?
-   */
-  bool IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg);
-
-  /**
-   * @brief Replace the edge going to old_bb to now go towards new_bb.
-   */
-  bool ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb);
-
-  /**
-   * @brief Erase the predecessor old_pred.
-   */
-  void ErasePredecessor(BasicBlockId old_pred);
-
-  /**
-   * @brief Update the predecessor array from old_pred to new_pred.
-   */
-  void UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred);
-
-  /**
-   * @brief Return first non-Phi insn.
-   */
-  MIR* GetFirstNonPhiInsn();
-
-  /**
-   * @brief Checks whether the block ends with if-nez or if-eqz that branches to
-   *        the given successor only if the register in not zero.
-   */
-  bool BranchesToSuccessorOnlyIfNotZero(BasicBlockId succ_id) const {
-    if (last_mir_insn == nullptr) {
-      return false;
-    }
-    Instruction::Code last_opcode = last_mir_insn->dalvikInsn.opcode;
-    return ((last_opcode == Instruction::IF_EQZ && fall_through == succ_id) ||
-        (last_opcode == Instruction::IF_NEZ && taken == succ_id)) &&
-        // Make sure the other successor isn't the same (empty if), b/21614284.
-        (fall_through != taken);
-  }
-
-  /**
-   * @brief Used to obtain the next MIR that follows unconditionally.
-   * @details The implementation does not guarantee that a MIR does not
-   * follow even if this method returns nullptr.
-   * @param mir_graph the MIRGraph.
-   * @param current The MIR for which to find an unconditional follower.
-   * @return Returns the following MIR if one can be found.
-   */
-  MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
-  bool IsExceptionBlock() const;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(BasicBlock);
-};
-
-/*
- * The "blocks" field in "successor_block_list" points to an array of elements with the type
- * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For switch
- * blocks, key is the case value.
- */
-struct SuccessorBlockInfo {
-  BasicBlockId block;
-  int key;
-};
-
-/**
- * @class ChildBlockIterator
- * @brief Enable an easy iteration of the children.
- */
-class ChildBlockIterator {
- public:
-  /**
-   * @brief Constructs a child iterator.
-   * @param bb The basic whose children we need to iterate through.
-   * @param mir_graph The MIRGraph used to get the basic block during iteration.
-   */
-  ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph);
-  BasicBlock* Next();
-
- private:
-  BasicBlock* basic_block_;
-  MIRGraph* mir_graph_;
-  bool visited_fallthrough_;
-  bool visited_taken_;
-  bool have_successors_;
-  ArenaVector<SuccessorBlockInfo*>::const_iterator successor_iter_;
-};
-
-/*
- * Collection of information describing an invoke, and the destination of
- * the subsequent MOVE_RESULT (if applicable).  Collected as a unit to enable
- * more efficient invoke code generation.
- */
-struct CallInfo {
-  size_t num_arg_words;   // Note: word count, not arg count.
-  RegLocation* args;      // One for each word of arguments.
-  RegLocation result;     // Eventual target of MOVE_RESULT.
-  int opt_flags;
-  InvokeType type;
-  uint32_t dex_idx;
-  MethodReference method_ref;
-  uint32_t index;         // Method idx for invokes, type idx for FilledNewArray.
-  uintptr_t direct_code;
-  uintptr_t direct_method;
-  RegLocation target;     // Target of following move_result.
-  bool skip_this;
-  bool is_range;
-  DexOffset offset;       // Offset in code units.
-  MIR* mir;
-  int32_t string_init_offset;
-};
-
-
-const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, RegStorage(), INVALID_SREG,
-                             INVALID_SREG};
-
-class MIRGraph {
- public:
-  MIRGraph(CompilationUnit* cu, ArenaAllocator* arena);
-  virtual ~MIRGraph();
-
-  /*
-   * Examine the graph to determine whether it's worthwile to spend the time compiling
-   * this method.
-   */
-  bool SkipCompilation(std::string* skip_message);
-
-  /*
-   * Parse dex method and add MIR at current insert point.  Returns id (which is
-   * actually the index of the method in the m_units_ array).
-   */
-  void InlineMethod(const DexFile::CodeItem* code_item,
-                    uint32_t access_flags,
-                    InvokeType invoke_type,
-                    uint16_t class_def_idx,
-                    uint32_t method_idx,
-                    jobject class_loader,
-                    const DexFile& dex_file,
-                    Handle<mirror::DexCache> dex_cache);
-
-  /* Find existing block */
-  BasicBlock* FindBlock(DexOffset code_offset,
-                        ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
-    return FindBlock(code_offset, false, nullptr, dex_pc_to_block_map);
-  }
-
-  const uint16_t* GetCurrentInsns() const {
-    return current_code_item_->insns_;
-  }
-
-  /**
-   * @brief Used to obtain the raw dex bytecode instruction pointer.
-   * @param m_unit_index The method index in MIRGraph (caused by having multiple methods).
-   * This is guaranteed to contain index 0 which is the base method being compiled.
-   * @return Returns the raw instruction pointer.
-   */
-  const uint16_t* GetInsns(int m_unit_index) const;
-
-  /**
-   * @brief Used to obtain the raw data table.
-   * @param mir sparse switch, packed switch, of fill-array-data
-   * @param table_offset The table offset from start of method.
-   * @return Returns the raw table pointer.
-   */
-  const uint16_t* GetTable(MIR* mir, uint32_t table_offset) const {
-    return GetInsns(mir->m_unit_index) + mir->offset + static_cast<int32_t>(table_offset);
-  }
-
-  unsigned int GetNumBlocks() const {
-    return block_list_.size();
-  }
-
-  /**
-   * @brief Provides the total size in code units of all instructions in MIRGraph.
-   * @details Includes the sizes of all methods in compilation unit.
-   * @return Returns the cumulative sum of all insn sizes (in code units).
-   */
-  size_t GetNumDalvikInsns() const;
-
-  ArenaBitVector* GetTryBlockAddr() const {
-    return try_block_addr_;
-  }
-
-  BasicBlock* GetEntryBlock() const {
-    return entry_block_;
-  }
-
-  BasicBlock* GetExitBlock() const {
-    return exit_block_;
-  }
-
-  BasicBlock* GetBasicBlock(unsigned int block_id) const {
-    DCHECK_LT(block_id, block_list_.size());  // NOTE: NullBasicBlockId is 0.
-    return (block_id == NullBasicBlockId) ? nullptr : block_list_[block_id];
-  }
-
-  size_t GetBasicBlockListCount() const {
-    return block_list_.size();
-  }
-
-  const ArenaVector<BasicBlock*>& GetBlockList() {
-    return block_list_;
-  }
-
-  const ArenaVector<BasicBlockId>& GetDfsOrder() {
-    return dfs_order_;
-  }
-
-  const ArenaVector<BasicBlockId>& GetDfsPostOrder() {
-    return dfs_post_order_;
-  }
-
-  const ArenaVector<BasicBlockId>& GetDomPostOrder() {
-    return dom_post_order_traversal_;
-  }
-
-  int GetDefCount() const {
-    return def_count_;
-  }
-
-  ArenaAllocator* GetArena() const {
-    return arena_;
-  }
-
-  void EnableOpcodeCounting() {
-    opcode_count_ = arena_->AllocArray<int>(kNumPackedOpcodes, kArenaAllocMisc);
-  }
-
-  void ShowOpcodeStats();
-
-  DexCompilationUnit* GetCurrentDexCompilationUnit() const {
-    return m_units_[current_method_];
-  }
-
-  /**
-   * @brief Dump a CFG into a dot file format.
-   * @param dir_prefix the directory the file will be created in.
-   * @param all_blocks does the dumper use all the basic blocks or use the reachable blocks.
-   * @param suffix does the filename require a suffix or not (default = nullptr).
-   */
-  void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
-
-  bool HasCheckCast() const {
-    return (merged_df_flags_ & DF_CHK_CAST) != 0u;
-  }
-
-  bool HasFieldAccess() const {
-    return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u;
-  }
-
-  bool HasStaticFieldAccess() const {
-    return (merged_df_flags_ & DF_SFIELD) != 0u;
-  }
-
-  bool HasInvokes() const {
-    // NOTE: These formats include the rare filled-new-array/range.
-    return (merged_df_flags_ & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0u;
-  }
-
-  void DoCacheFieldLoweringInfo();
-
-  const MirIFieldLoweringInfo& GetIFieldLoweringInfo(MIR* mir) const {
-    return GetIFieldLoweringInfo(mir->meta.ifield_lowering_info);
-  }
-
-  const MirIFieldLoweringInfo& GetIFieldLoweringInfo(uint32_t lowering_info) const {
-    DCHECK_LT(lowering_info, ifield_lowering_infos_.size());
-    return ifield_lowering_infos_[lowering_info];
-  }
-
-  size_t GetIFieldLoweringInfoCount() const {
-    return ifield_lowering_infos_.size();
-  }
-
-  const MirSFieldLoweringInfo& GetSFieldLoweringInfo(MIR* mir) const {
-    return GetSFieldLoweringInfo(mir->meta.sfield_lowering_info);
-  }
-
-  const MirSFieldLoweringInfo& GetSFieldLoweringInfo(uint32_t lowering_info) const {
-    DCHECK_LT(lowering_info, sfield_lowering_infos_.size());
-    return sfield_lowering_infos_[lowering_info];
-  }
-
-  size_t GetSFieldLoweringInfoCount() const {
-    return sfield_lowering_infos_.size();
-  }
-
-  void DoCacheMethodLoweringInfo();
-
-  const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const {
-    return GetMethodLoweringInfo(mir->meta.method_lowering_info);
-  }
-
-  const MirMethodLoweringInfo& GetMethodLoweringInfo(uint32_t lowering_info) const {
-    DCHECK_LT(lowering_info, method_lowering_infos_.size());
-    return method_lowering_infos_[lowering_info];
-  }
-
-  size_t GetMethodLoweringInfoCount() const {
-    return method_lowering_infos_.size();
-  }
-
-  void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput);
-
-  void InitRegLocations();
-
-  void RemapRegLocations();
-
-  void DumpRegLocTable(RegLocation* table, int count);
-
-  void BasicBlockOptimizationStart();
-  void BasicBlockOptimization();
-  void BasicBlockOptimizationEnd();
-
-  void StringChange();
-
-  const ArenaVector<BasicBlockId>& GetTopologicalSortOrder() {
-    DCHECK(!topological_order_.empty());
-    return topological_order_;
-  }
-
-  const ArenaVector<BasicBlockId>& GetTopologicalSortOrderLoopEnds() {
-    DCHECK(!topological_order_loop_ends_.empty());
-    return topological_order_loop_ends_;
-  }
-
-  const ArenaVector<BasicBlockId>& GetTopologicalSortOrderIndexes() {
-    DCHECK(!topological_order_indexes_.empty());
-    return topological_order_indexes_;
-  }
-
-  ArenaVector<std::pair<uint16_t, bool>>* GetTopologicalSortOrderLoopHeadStack() {
-    DCHECK(!topological_order_.empty());  // Checking the main array, not the stack.
-    return &topological_order_loop_head_stack_;
-  }
-
-  size_t GetMaxNestedLoops() const {
-    return max_nested_loops_;
-  }
-
-  bool IsLoopHead(BasicBlockId bb_id) {
-    return topological_order_loop_ends_[topological_order_indexes_[bb_id]] != 0u;
-  }
-
-  bool IsConst(int32_t s_reg) const {
-    return is_constant_v_->IsBitSet(s_reg);
-  }
-
-  bool IsConst(RegLocation loc) const {
-    return loc.orig_sreg < 0 ? false : IsConst(loc.orig_sreg);
-  }
-
-  int32_t ConstantValue(RegLocation loc) const {
-    DCHECK(IsConst(loc));
-    return constant_values_[loc.orig_sreg];
-  }
-
-  int32_t ConstantValue(int32_t s_reg) const {
-    DCHECK(IsConst(s_reg));
-    return constant_values_[s_reg];
-  }
-
-  /**
-   * @brief Used to obtain 64-bit value of a pair of ssa registers.
-   * @param s_reg_low The ssa register representing the low bits.
-   * @param s_reg_high The ssa register representing the high bits.
-   * @return Retusn the 64-bit constant value.
-   */
-  int64_t ConstantValueWide(int32_t s_reg_low, int32_t s_reg_high) const {
-    DCHECK(IsConst(s_reg_low));
-    DCHECK(IsConst(s_reg_high));
-    return (static_cast<int64_t>(constant_values_[s_reg_high]) << 32) |
-        Low32Bits(static_cast<int64_t>(constant_values_[s_reg_low]));
-  }
-
-  int64_t ConstantValueWide(RegLocation loc) const {
-    DCHECK(IsConst(loc));
-    DCHECK(!loc.high_word);  // Do not allow asking for the high partner.
-    DCHECK_LT(loc.orig_sreg + 1, GetNumSSARegs());
-    return (static_cast<int64_t>(constant_values_[loc.orig_sreg + 1]) << 32) |
-        Low32Bits(static_cast<int64_t>(constant_values_[loc.orig_sreg]));
-  }
-
-  /**
-   * @brief Used to mark ssa register as being constant.
-   * @param ssa_reg The ssa register.
-   * @param value The constant value of ssa register.
-   */
-  void SetConstant(int32_t ssa_reg, int32_t value);
-
-  /**
-   * @brief Used to mark ssa register and its wide counter-part as being constant.
-   * @param ssa_reg The ssa register.
-   * @param value The 64-bit constant value of ssa register and its pair.
-   */
-  void SetConstantWide(int32_t ssa_reg, int64_t value);
-
-  bool IsConstantNullRef(RegLocation loc) const {
-    return loc.ref && loc.is_const && (ConstantValue(loc) == 0);
-  }
-
-  int GetNumSSARegs() const {
-    return num_ssa_regs_;
-  }
-
-  void SetNumSSARegs(int new_num) {
-     /*
-      * TODO: It's theoretically possible to exceed 32767, though any cases which did
-      * would be filtered out with current settings.  When orig_sreg field is removed
-      * from RegLocation, expand s_reg_low to handle all possible cases and remove DCHECK().
-      */
-    CHECK_EQ(new_num, static_cast<int16_t>(new_num));
-    num_ssa_regs_ = new_num;
-  }
-
-  unsigned int GetNumReachableBlocks() const {
-    return num_reachable_blocks_;
-  }
-
-  uint32_t GetUseCount(int sreg) const {
-    DCHECK_LT(static_cast<size_t>(sreg), use_counts_.size());
-    return use_counts_[sreg];
-  }
-
-  uint32_t GetRawUseCount(int sreg) const {
-    DCHECK_LT(static_cast<size_t>(sreg), raw_use_counts_.size());
-    return raw_use_counts_[sreg];
-  }
-
-  int GetSSASubscript(int ssa_reg) const {
-    DCHECK_LT(static_cast<size_t>(ssa_reg), ssa_subscripts_.size());
-    return ssa_subscripts_[ssa_reg];
-  }
-
-  RegLocation GetRawSrc(MIR* mir, int num) {
-    DCHECK(num < mir->ssa_rep->num_uses);
-    RegLocation res = reg_location_[mir->ssa_rep->uses[num]];
-    return res;
-  }
-
-  RegLocation GetRawDest(MIR* mir) {
-    DCHECK_GT(mir->ssa_rep->num_defs, 0);
-    RegLocation res = reg_location_[mir->ssa_rep->defs[0]];
-    return res;
-  }
-
-  RegLocation GetDest(MIR* mir) {
-    RegLocation res = GetRawDest(mir);
-    DCHECK(!res.wide);
-    return res;
-  }
-
-  RegLocation GetSrc(MIR* mir, int num) {
-    RegLocation res = GetRawSrc(mir, num);
-    DCHECK(!res.wide);
-    return res;
-  }
-
-  RegLocation GetDestWide(MIR* mir) {
-    RegLocation res = GetRawDest(mir);
-    DCHECK(res.wide);
-    return res;
-  }
-
-  RegLocation GetSrcWide(MIR* mir, int low) {
-    RegLocation res = GetRawSrc(mir, low);
-    DCHECK(res.wide);
-    return res;
-  }
-
-  RegLocation GetBadLoc() {
-    return bad_loc;
-  }
-
-  int GetMethodSReg() const {
-    return method_sreg_;
-  }
-
-  /**
-   * @brief Used to obtain the number of compiler temporaries being used.
-   * @return Returns the number of compiler temporaries.
-   */
-  size_t GetNumUsedCompilerTemps() const {
-    // Assume that the special temps will always be used.
-    return GetNumNonSpecialCompilerTemps() + max_available_special_compiler_temps_;
-  }
-
-  /**
-   * @brief Used to obtain number of bytes needed for special temps.
-   * @details This space is always needed because temps have special location on stack.
-   * @return Returns number of bytes for the special temps.
-   */
-  size_t GetNumBytesForSpecialTemps() const;
-
-  /**
-   * @brief Used by backend as a hint for maximum number of bytes for non-special temps.
-   * @details Returns 4 bytes for each temp because that is the maximum amount needed
-   * for storing each temp. The BE could be smarter though and allocate a smaller
-   * spill region.
-   * @return Returns the maximum number of bytes needed for non-special temps.
-   */
-  size_t GetMaximumBytesForNonSpecialTemps() const {
-    return GetNumNonSpecialCompilerTemps() * sizeof(uint32_t);
-  }
-
-  /**
-   * @brief Used to obtain the number of non-special compiler temporaries being used.
-   * @return Returns the number of non-special compiler temporaries.
-   */
-  size_t GetNumNonSpecialCompilerTemps() const {
-    return num_non_special_compiler_temps_;
-  }
-
-  /**
-   * @brief Used to set the total number of available non-special compiler temporaries.
-   * @details Can fail setting the new max if there are more temps being used than the new_max.
-   * @param new_max The new maximum number of non-special compiler temporaries.
-   * @return Returns true if the max was set and false if failed to set.
-   */
-  bool SetMaxAvailableNonSpecialCompilerTemps(size_t new_max) {
-    // Make sure that enough temps still exist for backend and also that the
-    // new max can still keep around all of the already requested temps.
-    if (new_max < (GetNumNonSpecialCompilerTemps() + reserved_temps_for_backend_)) {
-      return false;
-    } else {
-      max_available_non_special_compiler_temps_ = new_max;
-      return true;
-    }
-  }
-
-  /**
-   * @brief Provides the number of non-special compiler temps available for use by ME.
-   * @details Even if this returns zero, special compiler temps are guaranteed to be available.
-   * Additionally, this makes sure to not use any temps reserved for BE only.
-   * @return Returns the number of available temps.
-   */
-  size_t GetNumAvailableVRTemps();
-
-  /**
-   * @brief Used to obtain the maximum number of compiler temporaries that can be requested.
-   * @return Returns the maximum number of compiler temporaries, whether used or not.
-   */
-  size_t GetMaxPossibleCompilerTemps() const {
-    return max_available_special_compiler_temps_ + max_available_non_special_compiler_temps_;
-  }
-
-  /**
-   * @brief Used to signal that the compiler temps have been committed.
-   * @details This should be used once the number of temps can no longer change,
-   * such as after frame size is committed and cannot be changed.
-   */
-  void CommitCompilerTemps() {
-    compiler_temps_committed_ = true;
-  }
-
-  /**
-   * @brief Used to obtain a new unique compiler temporary.
-   * @details Two things are done for convenience when allocating a new compiler
-   * temporary. The ssa register is automatically requested and the information
-   * about reg location is filled. This helps when the temp is requested post
-   * ssa initialization, such as when temps are requested by the backend.
-   * @warning If the temp requested will be used for ME and have multiple versions,
-   * the sreg provided by the temp will be invalidated on next ssa recalculation.
-   * @param ct_type Type of compiler temporary requested.
-   * @param wide Whether we should allocate a wide temporary.
-   * @return Returns the newly created compiler temporary.
-   */
-  CompilerTemp* GetNewCompilerTemp(CompilerTempType ct_type, bool wide);
-
-  /**
-   * @brief Used to remove last created compiler temporary when it's not needed.
-   * @param temp the temporary to remove.
-   */
-  void RemoveLastCompilerTemp(CompilerTempType ct_type, bool wide, CompilerTemp* temp);
-
-  bool MethodIsLeaf() {
-    return attributes_ & METHOD_IS_LEAF;
-  }
-
-  RegLocation GetRegLocation(int index) {
-    DCHECK((index >= 0) && (index < num_ssa_regs_));
-    return reg_location_[index];
-  }
-
-  RegLocation GetMethodLoc() {
-    return reg_location_[method_sreg_];
-  }
-
-  bool IsBackEdge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
-    DCHECK_NE(target_bb_id, NullBasicBlockId);
-    DCHECK_LT(target_bb_id, topological_order_indexes_.size());
-    DCHECK_LT(branch_bb->id, topological_order_indexes_.size());
-    return topological_order_indexes_[target_bb_id] <= topological_order_indexes_[branch_bb->id];
-  }
-
-  bool IsSuspendCheckEdge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
-    if (!IsBackEdge(branch_bb, target_bb_id)) {
-      return false;
-    }
-    if (suspend_checks_in_loops_ == nullptr) {
-      // We didn't run suspend check elimination.
-      return true;
-    }
-    uint16_t target_depth = GetBasicBlock(target_bb_id)->nesting_depth;
-    return (suspend_checks_in_loops_[branch_bb->id] & (1u << (target_depth - 1u))) == 0;
-  }
-
-  void CountBranch(DexOffset target_offset) {
-    if (target_offset <= current_offset_) {
-      backward_branches_++;
-    } else {
-      forward_branches_++;
-    }
-  }
-
-  int GetBranchCount() {
-    return backward_branches_ + forward_branches_;
-  }
-
-  // Is this vreg in the in set?
-  bool IsInVReg(uint32_t vreg) {
-    return (vreg >= GetFirstInVR()) && (vreg < GetFirstTempVR());
-  }
-
-  uint32_t GetNumOfCodeVRs() const {
-    return current_code_item_->registers_size_;
-  }
-
-  uint32_t GetNumOfCodeAndTempVRs() const {
-    // Include all of the possible temps so that no structures overflow when initialized.
-    return GetNumOfCodeVRs() + GetMaxPossibleCompilerTemps();
-  }
-
-  uint32_t GetNumOfLocalCodeVRs() const {
-    // This also refers to the first "in" VR.
-    return GetNumOfCodeVRs() - current_code_item_->ins_size_;
-  }
-
-  uint32_t GetNumOfInVRs() const {
-    return current_code_item_->ins_size_;
-  }
-
-  uint32_t GetNumOfOutVRs() const {
-    return current_code_item_->outs_size_;
-  }
-
-  uint32_t GetFirstInVR() const {
-    return GetNumOfLocalCodeVRs();
-  }
-
-  uint32_t GetFirstTempVR() const {
-    // Temp VRs immediately follow code VRs.
-    return GetNumOfCodeVRs();
-  }
-
-  uint32_t GetFirstSpecialTempVR() const {
-    // Special temps appear first in the ordering before non special temps.
-    return GetFirstTempVR();
-  }
-
-  uint32_t GetFirstNonSpecialTempVR() const {
-    // We always leave space for all the special temps before the non-special ones.
-    return GetFirstSpecialTempVR() + max_available_special_compiler_temps_;
-  }
-
-  bool HasTryCatchBlocks() const {
-    return current_code_item_->tries_size_ != 0;
-  }
-
-  void DumpCheckStats();
-  MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
-
-  /* Return the base virtual register for a SSA name */
-  int SRegToVReg(int ssa_reg) const {
-    return ssa_base_vregs_[ssa_reg];
-  }
-
-  void VerifyDataflow();
-  void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
-  bool EliminateNullChecksGate();
-  bool EliminateNullChecks(BasicBlock* bb);
-  void EliminateNullChecksEnd();
-  void InferTypesStart();
-  bool InferTypes(BasicBlock* bb);
-  void InferTypesEnd();
-  bool EliminateClassInitChecksGate();
-  bool EliminateClassInitChecks(BasicBlock* bb);
-  void EliminateClassInitChecksEnd();
-  bool ApplyGlobalValueNumberingGate();
-  bool ApplyGlobalValueNumbering(BasicBlock* bb);
-  void ApplyGlobalValueNumberingEnd();
-  bool EliminateDeadCodeGate();
-  bool EliminateDeadCode(BasicBlock* bb);
-  void EliminateDeadCodeEnd();
-  void GlobalValueNumberingCleanup();
-  bool EliminateSuspendChecksGate();
-  bool EliminateSuspendChecks(BasicBlock* bb);
-
-  uint16_t GetGvnIFieldId(MIR* mir) const {
-    DCHECK(IsInstructionIGetOrIPut(mir->dalvikInsn.opcode));
-    DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.size());
-    DCHECK(temp_.gvn.ifield_ids != nullptr);
-    return temp_.gvn.ifield_ids[mir->meta.ifield_lowering_info];
-  }
-
-  uint16_t GetGvnSFieldId(MIR* mir) const {
-    DCHECK(IsInstructionSGetOrSPut(mir->dalvikInsn.opcode));
-    DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.size());
-    DCHECK(temp_.gvn.sfield_ids != nullptr);
-    return temp_.gvn.sfield_ids[mir->meta.sfield_lowering_info];
-  }
-
-  bool PuntToInterpreter() {
-    return punt_to_interpreter_;
-  }
-
-  void SetPuntToInterpreter(bool val);
-
-  void DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir);
-  char* GetDalvikDisassembly(const MIR* mir);
-  void ReplaceSpecialChars(std::string& str);
-  std::string GetSSAName(int ssa_reg);
-  std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
-  void GetBlockName(BasicBlock* bb, char* name);
-  const char* GetShortyFromMethodReference(const MethodReference& target_method);
-  void DumpMIRGraph();
-  CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
-  BasicBlock* NewMemBB(BBType block_type, int block_id);
-  MIR* NewMIR();
-  MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir);
-  BasicBlock* NextDominatedBlock(BasicBlock* bb);
-  bool LayoutBlocks(BasicBlock* bb);
-  void ComputeTopologicalSortOrder();
-  BasicBlock* CreateNewBB(BBType block_type);
-
-  bool InlineSpecialMethodsGate();
-  void InlineSpecialMethodsStart();
-  void InlineSpecialMethods(BasicBlock* bb);
-  void InlineSpecialMethodsEnd();
-
-  /**
-   * @brief Perform the initial preparation for the Method Uses.
-   */
-  void InitializeMethodUses();
-
-  /**
-   * @brief Perform the initial preparation for the Constant Propagation.
-   */
-  void InitializeConstantPropagation();
-
-  /**
-   * @brief Perform the initial preparation for the SSA Transformation.
-   */
-  void SSATransformationStart();
-
-  /**
-   * @brief Insert a the operands for the Phi nodes.
-   * @param bb the considered BasicBlock.
-   * @return true
-   */
-  bool InsertPhiNodeOperands(BasicBlock* bb);
-
-  /**
-   * @brief Perform the cleanup after the SSA Transformation.
-   */
-  void SSATransformationEnd();
-
-  /**
-   * @brief Perform constant propagation on a BasicBlock.
-   * @param bb the considered BasicBlock.
-   */
-  void DoConstantPropagation(BasicBlock* bb);
-
-  /**
-   * @brief Get use count weight for a given block.
-   * @param bb the BasicBlock.
-   */
-  uint32_t GetUseCountWeight(BasicBlock* bb) const;
-
-  /**
-   * @brief Count the uses in the BasicBlock
-   * @param bb the BasicBlock
-   */
-  void CountUses(BasicBlock* bb);
-
-  static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
-  static uint64_t GetDataFlowAttributes(MIR* mir);
-
-  /**
-   * @brief Combine BasicBlocks
-   * @param the BasicBlock we are considering
-   */
-  void CombineBlocks(BasicBlock* bb);
-
-  void ClearAllVisitedFlags();
-
-  void AllocateSSAUseData(MIR *mir, int num_uses);
-  void AllocateSSADefData(MIR *mir, int num_defs);
-  void CalculateBasicBlockInformation(const PassManager* const post_opt);
-  void ComputeDFSOrders();
-  void ComputeDefBlockMatrix();
-  void ComputeDominators();
-  void CompilerInitializeSSAConversion();
-  virtual void InitializeBasicBlockDataFlow();
-  void FindPhiNodeBlocks();
-  void DoDFSPreOrderSSARename(BasicBlock* block);
-
-  bool DfsOrdersUpToDate() const {
-    return dfs_orders_up_to_date_;
-  }
-
-  bool DominationUpToDate() const {
-    return domination_up_to_date_;
-  }
-
-  bool MirSsaRepUpToDate() const {
-    return mir_ssa_rep_up_to_date_;
-  }
-
-  bool TopologicalOrderUpToDate() const {
-    return topological_order_up_to_date_;
-  }
-
-  /*
-   * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
-   * we can verify that all catch entries have native PC entries.
-   */
-  std::set<uint32_t> catches_;
-
-  // TODO: make these private.
-  RegLocation* reg_location_;                               // Map SSA names to location.
-  ArenaSafeMap<unsigned int, unsigned int> block_id_map_;   // Block collapse lookup cache.
-
-  static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
-
-  void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
-
- protected:
-  int FindCommonParent(int block1, int block2);
-  void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
-                         const ArenaBitVector* src2);
-  void HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
-                       ArenaBitVector* live_in_v, int dalvik_reg_id);
-  void HandleDef(ArenaBitVector* def_v, int dalvik_reg_id);
-  void HandleExtended(ArenaBitVector* use_v, ArenaBitVector* def_v,
-                      ArenaBitVector* live_in_v,
-                      const MIR::DecodedInstruction& d_insn);
-  bool DoSSAConversion(BasicBlock* bb);
-  int ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction);
-  bool ContentIsInsn(const uint16_t* code_ptr);
-  BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
-                         BasicBlock** immed_pred_block_p);
-  BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p,
-                        ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
-  void ProcessTryCatchBlocks(ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
-  bool IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, NarrowDexOffset catch_offset);
-  BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
-                               int flags, const uint16_t* code_ptr, const uint16_t* code_end,
-                               ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
-  BasicBlock* ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
-                               int flags,
-                               ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
-  BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
-                              int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
-                              const uint16_t* code_end,
-                              ScopedArenaVector<uint16_t>* dex_pc_to_block_map);
-  int AddNewSReg(int v_reg);
-  void HandleSSAUse(int* uses, int dalvik_reg, int reg_index);
-  void DataFlowSSAFormat35C(MIR* mir);
-  void DataFlowSSAFormat3RC(MIR* mir);
-  void DataFlowSSAFormatExtended(MIR* mir);
-  bool FindLocalLiveIn(BasicBlock* bb);
-  bool VerifyPredInfo(BasicBlock* bb);
-  BasicBlock* NeedsVisit(BasicBlock* bb);
-  BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb);
-  void MarkPreOrder(BasicBlock* bb);
-  void RecordDFSOrders(BasicBlock* bb);
-  void ComputeDomPostOrderTraversal(BasicBlock* bb);
-  int GetSSAUseCount(int s_reg);
-  bool BasicBlockOpt(BasicBlock* bb);
-  void MultiplyAddOpt(BasicBlock* bb);
-
-  /**
-   * @brief Check whether the given MIR is possible to throw an exception.
-   * @param mir The mir to check.
-   * @return Returns 'true' if the given MIR might throw an exception.
-   */
-  bool CanThrow(MIR* mir) const;
-
-  /**
-   * @brief Combine multiply and add/sub MIRs into corresponding extended MAC MIR.
-   * @param mul_mir The multiply MIR to be combined.
-   * @param add_mir The add/sub MIR to be combined.
-   * @param mul_is_first_addend 'true' if multiply product is the first addend of add operation.
-   * @param is_wide 'true' if the operations are long type.
-   * @param is_sub 'true' if it is a multiply-subtract operation.
-   */
-  void CombineMultiplyAdd(MIR* mul_mir, MIR* add_mir, bool mul_is_first_addend,
-                          bool is_wide, bool is_sub);
-  /*
-   * @brief Check whether the first MIR anti-depends on the second MIR.
-   * @details To check whether one of first MIR's uses of vregs is redefined by the second MIR,
-   * i.e. there is a write-after-read dependency.
-   * @param first The first MIR.
-   * @param second The second MIR.
-   * @param Returns true if there is a write-after-read dependency.
-   */
-  bool HasAntiDependency(MIR* first, MIR* second);
-
-  bool BuildExtendedBBList(class BasicBlock* bb);
-  bool FillDefBlockMatrix(BasicBlock* bb);
-  void InitializeDominationInfo(BasicBlock* bb);
-  bool ComputeblockIDom(BasicBlock* bb);
-  bool ComputeBlockDominators(BasicBlock* bb);
-  bool SetDominators(BasicBlock* bb);
-  bool ComputeBlockLiveIns(BasicBlock* bb);
-  bool ComputeDominanceFrontier(BasicBlock* bb);
-
-  void CountChecks(BasicBlock* bb);
-  void AnalyzeBlock(BasicBlock* bb, struct MethodStats* stats);
-  bool ComputeSkipCompilation(struct MethodStats* stats, bool skip_default,
-                              std::string* skip_message);
-
-  CompilationUnit* const cu_;
-  ArenaVector<int> ssa_base_vregs_;
-  ArenaVector<int> ssa_subscripts_;
-  // Map original Dalvik virtual reg i to the current SSA name.
-  int32_t* vreg_to_ssa_map_;        // length == method->registers_size
-  int* ssa_last_defs_;              // length == method->registers_size
-  ArenaBitVector* is_constant_v_;   // length == num_ssa_reg
-  int* constant_values_;            // length == num_ssa_reg
-  // Use counts of ssa names.
-  ArenaVector<uint32_t> use_counts_;      // Weighted by nesting depth
-  ArenaVector<uint32_t> raw_use_counts_;  // Not weighted
-  unsigned int num_reachable_blocks_;
-  unsigned int max_num_reachable_blocks_;
-  bool dfs_orders_up_to_date_;
-  bool domination_up_to_date_;
-  bool mir_ssa_rep_up_to_date_;
-  bool topological_order_up_to_date_;
-  ArenaVector<BasicBlockId> dfs_order_;
-  ArenaVector<BasicBlockId> dfs_post_order_;
-  ArenaVector<BasicBlockId> dom_post_order_traversal_;
-  ArenaVector<BasicBlockId> topological_order_;
-  // Indexes in topological_order_ need to be only as big as the BasicBlockId.
-  static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "Assuming 16 bit BasicBlockId");
-  // For each loop head, remember the past-the-end index of the end of the loop. 0 if not loop head.
-  ArenaVector<uint16_t> topological_order_loop_ends_;
-  // Map BB ids to topological_order_ indexes. 0xffff if not included (hidden or null block).
-  ArenaVector<uint16_t> topological_order_indexes_;
-  // Stack of the loop head indexes and recalculation flags for RepeatingTopologicalSortIterator.
-  ArenaVector<std::pair<uint16_t, bool>> topological_order_loop_head_stack_;
-  size_t max_nested_loops_;
-  int* i_dom_list_;
-  std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
-  // Union of temporaries used by different passes.
-  union {
-    // Class init check elimination.
-    struct {
-      size_t num_class_bits;  // 2 bits per class: class initialized and class in dex cache.
-      ArenaBitVector* work_classes_to_check;
-      ArenaBitVector** ending_classes_to_check_matrix;  // num_blocks_ x num_class_bits.
-      uint16_t* indexes;
-    } cice;
-    // Null check elimination.
-    struct {
-      size_t num_vregs;
-      ArenaBitVector* work_vregs_to_check;
-      ArenaBitVector** ending_vregs_to_check_matrix;  // num_blocks_ x num_vregs.
-    } nce;
-    // Special method inlining.
-    struct {
-      size_t num_indexes;
-      ArenaBitVector* processed_indexes;
-      uint16_t* lowering_infos;
-    } smi;
-    // SSA transformation.
-    struct {
-      size_t num_vregs;
-      ArenaBitVector* work_live_vregs;
-      ArenaBitVector** def_block_matrix;  // num_vregs x num_blocks_.
-      ArenaBitVector** phi_node_blocks;  // num_vregs x num_blocks_.
-      TypeInference* ti;
-    } ssa;
-    // Global value numbering.
-    struct {
-      GlobalValueNumbering* gvn;
-      uint16_t* ifield_ids;  // Part of GVN/LVN but cached here for LVN to avoid recalculation.
-      uint16_t* sfield_ids;  // Ditto.
-      GvnDeadCodeElimination* dce;
-    } gvn;
-  } temp_;
-  static const int kInvalidEntry = -1;
-  ArenaVector<BasicBlock*> block_list_;
-  ArenaBitVector* try_block_addr_;
-  BasicBlock* entry_block_;
-  BasicBlock* exit_block_;
-  const DexFile::CodeItem* current_code_item_;
-  ArenaVector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
-  typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
-  ArenaVector<MIRLocation> method_stack_;        // Include stack
-  int current_method_;
-  DexOffset current_offset_;                     // Offset in code units
-  int def_count_;                                // Used to estimate size of ssa name storage.
-  int* opcode_count_;                            // Dex opcode coverage stats.
-  int num_ssa_regs_;                             // Number of names following SSA transformation.
-  ArenaVector<BasicBlockId> extended_basic_blocks_;  // Heads of block "traces".
-  int method_sreg_;
-  unsigned int attributes_;
-  Checkstats* checkstats_;
-  ArenaAllocator* const arena_;
-  int backward_branches_;
-  int forward_branches_;
-  size_t num_non_special_compiler_temps_;  // Keeps track of allocated non-special compiler temps. These are VRs that are in compiler temp region on stack.
-  size_t max_available_non_special_compiler_temps_;  // Keeps track of maximum available non-special temps.
-  size_t max_available_special_compiler_temps_;      // Keeps track of maximum available special temps.
-  bool requested_backend_temp_;            // Keeps track whether BE temps have been requested.
-  size_t reserved_temps_for_backend_;      // Keeps track of the remaining temps that are reserved for BE.
-  bool compiler_temps_committed_;          // Keeps track whether number of temps has been frozen (for example post frame size calculation).
-  bool punt_to_interpreter_;               // Difficult or not worthwhile - just interpret.
-  uint64_t merged_df_flags_;
-  ArenaVector<MirIFieldLoweringInfo> ifield_lowering_infos_;
-  ArenaVector<MirSFieldLoweringInfo> sfield_lowering_infos_;
-  ArenaVector<MirMethodLoweringInfo> method_lowering_infos_;
-
-  // In the suspend check elimination pass we determine for each basic block and enclosing
-  // loop whether there's guaranteed to be a suspend check on the path from the loop head
-  // to this block. If so, we can eliminate the back-edge suspend check.
-  // The bb->id is index into suspend_checks_in_loops_ and the loop head's depth is bit index
-  // in a suspend_checks_in_loops_[bb->id].
-  uint32_t* suspend_checks_in_loops_;
-
-  static const uint64_t oat_data_flow_attributes_[kMirOpLast];
-
-  friend class MirOptimizationTest;
-  friend class ClassInitCheckEliminationTest;
-  friend class SuspendCheckEliminationTest;
-  friend class NullCheckEliminationTest;
-  friend class GlobalValueNumberingTest;
-  friend class GvnDeadCodeEliminationTest;
-  friend class LocalValueNumberingTest;
-  friend class TopologicalSortOrderTest;
-  friend class TypeInferenceTest;
-  friend class QuickCFITest;
-  friend class QuickAssembleX86TestBase;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_MIR_GRAPH_H_
diff --git a/compiler/dex/mir_graph_test.cc b/compiler/dex/mir_graph_test.cc
deleted file mode 100644
index 7858681..0000000
--- a/compiler/dex/mir_graph_test.cc
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler_ir.h"
-#include "dataflow_iterator-inl.h"
-#include "mir_graph.h"
-#include "gtest/gtest.h"
-
-namespace art {
-
-class TopologicalSortOrderTest : public testing::Test {
- protected:
-  struct BBDef {
-    static constexpr size_t kMaxSuccessors = 4;
-    static constexpr size_t kMaxPredecessors = 4;
-
-    BBType type;
-    size_t num_successors;
-    BasicBlockId successors[kMaxPredecessors];
-    size_t num_predecessors;
-    BasicBlockId predecessors[kMaxPredecessors];
-  };
-
-#define DEF_SUCC0() \
-    0u, { }
-#define DEF_SUCC1(s1) \
-    1u, { s1 }
-#define DEF_SUCC2(s1, s2) \
-    2u, { s1, s2 }
-#define DEF_SUCC3(s1, s2, s3) \
-    3u, { s1, s2, s3 }
-#define DEF_SUCC4(s1, s2, s3, s4) \
-    4u, { s1, s2, s3, s4 }
-#define DEF_PRED0() \
-    0u, { }
-#define DEF_PRED1(p1) \
-    1u, { p1 }
-#define DEF_PRED2(p1, p2) \
-    2u, { p1, p2 }
-#define DEF_PRED3(p1, p2, p3) \
-    3u, { p1, p2, p3 }
-#define DEF_PRED4(p1, p2, p3, p4) \
-    4u, { p1, p2, p3, p4 }
-#define DEF_BB(type, succ, pred) \
-    { type, succ, pred }
-
-  void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
-    cu_.mir_graph->block_id_map_.clear();
-    cu_.mir_graph->block_list_.clear();
-    ASSERT_LT(3u, count);  // null, entry, exit and at least one bytecode block.
-    ASSERT_EQ(kNullBlock, defs[0].type);
-    ASSERT_EQ(kEntryBlock, defs[1].type);
-    ASSERT_EQ(kExitBlock, defs[2].type);
-    for (size_t i = 0u; i != count; ++i) {
-      const BBDef* def = &defs[i];
-      BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
-      if (def->num_successors <= 2) {
-        bb->successor_block_list_type = kNotUsed;
-        bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
-        bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
-      } else {
-        bb->successor_block_list_type = kPackedSwitch;
-        bb->fall_through = 0u;
-        bb->taken = 0u;
-        bb->successor_blocks.reserve(def->num_successors);
-        for (size_t j = 0u; j != def->num_successors; ++j) {
-          SuccessorBlockInfo* successor_block_info =
-              static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
-                                                               kArenaAllocSuccessors));
-          successor_block_info->block = j;
-          successor_block_info->key = 0u;  // Not used by class init check elimination.
-          bb->successor_blocks.push_back(successor_block_info);
-        }
-      }
-      bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
-      if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
-        bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
-            cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
-      }
-    }
-    ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
-    cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
-    ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
-    cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
-    ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
-
-    DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(cu_.arena.Alloc(sizeof(DexFile::CodeItem),
-                                                                                   kArenaAllocMisc));
-    cu_.mir_graph->current_code_item_ = code_item;
-  }
-
-  template <size_t count>
-  void PrepareBasicBlocks(const BBDef (&defs)[count]) {
-    DoPrepareBasicBlocks(defs, count);
-  }
-
-  void ComputeTopologicalSortOrder() {
-    cu_.mir_graph->SSATransformationStart();
-    cu_.mir_graph->ComputeDFSOrders();
-    cu_.mir_graph->ComputeDominators();
-    cu_.mir_graph->ComputeTopologicalSortOrder();
-    cu_.mir_graph->SSATransformationEnd();
-    ASSERT_FALSE(cu_.mir_graph->topological_order_.empty());
-    ASSERT_FALSE(cu_.mir_graph->topological_order_loop_ends_.empty());
-    ASSERT_FALSE(cu_.mir_graph->topological_order_indexes_.empty());
-    ASSERT_EQ(cu_.mir_graph->GetNumBlocks(), cu_.mir_graph->topological_order_indexes_.size());
-    for (size_t i = 0, size = cu_.mir_graph->GetTopologicalSortOrder().size(); i != size; ++i) {
-      ASSERT_LT(cu_.mir_graph->topological_order_[i], cu_.mir_graph->GetNumBlocks());
-      BasicBlockId id = cu_.mir_graph->topological_order_[i];
-      EXPECT_EQ(i, cu_.mir_graph->topological_order_indexes_[id]);
-    }
-  }
-
-  void DoCheckOrder(const BasicBlockId* ids, size_t count) {
-    ASSERT_EQ(count, cu_.mir_graph->GetTopologicalSortOrder().size());
-    for (size_t i = 0; i != count; ++i) {
-      EXPECT_EQ(ids[i], cu_.mir_graph->GetTopologicalSortOrder()[i]) << i;
-    }
-  }
-
-  template <size_t count>
-  void CheckOrder(const BasicBlockId (&ids)[count]) {
-    DoCheckOrder(ids, count);
-  }
-
-  void DoCheckLoopEnds(const uint16_t* ends, size_t count) {
-    for (size_t i = 0; i != count; ++i) {
-      ASSERT_LT(i, cu_.mir_graph->GetTopologicalSortOrderLoopEnds().size());
-      EXPECT_EQ(ends[i], cu_.mir_graph->GetTopologicalSortOrderLoopEnds()[i]) << i;
-    }
-  }
-
-  template <size_t count>
-  void CheckLoopEnds(const uint16_t (&ends)[count]) {
-    DoCheckLoopEnds(ends, count);
-  }
-
-  TopologicalSortOrderTest()
-      : pool_(),
-        cu_(&pool_, kRuntimeISA, nullptr, nullptr) {
-    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
-  }
-
-  ArenaPool pool_;
-  CompilationUnit cu_;
-};
-
-TEST_F(TopologicalSortOrderTest, DoWhile) {
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)),  // "taken" loops to self.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-  };
-  const BasicBlockId expected_order[] = {
-      1, 3, 4, 5, 2
-  };
-  const uint16_t loop_ends[] = {
-      0, 0, 3, 0, 0
-  };
-
-  PrepareBasicBlocks(bbs);
-  ComputeTopologicalSortOrder();
-  CheckOrder(expected_order);
-  CheckLoopEnds(loop_ends);
-}
-
-TEST_F(TopologicalSortOrderTest, While) {
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED2(1, 4)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(3), DEF_PRED1(3)),     // Loops to 3.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
-  };
-  const BasicBlockId expected_order[] = {
-      1, 3, 4, 5, 2
-  };
-  const uint16_t loop_ends[] = {
-      0, 3, 0, 0, 0
-  };
-
-  PrepareBasicBlocks(bbs);
-  ComputeTopologicalSortOrder();
-  CheckOrder(expected_order);
-  CheckLoopEnds(loop_ends);
-}
-
-TEST_F(TopologicalSortOrderTest, WhileWithTwoBackEdges) {
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 6), DEF_PRED3(1, 4, 5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 3), DEF_PRED1(3)),     // Loops to 3.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(3), DEF_PRED1(4)),        // Loops to 3.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
-  };
-  const BasicBlockId expected_order[] = {
-      1, 3, 4, 5, 6, 2
-  };
-  const uint16_t loop_ends[] = {
-      0, 4, 0, 0, 0, 0
-  };
-
-  PrepareBasicBlocks(bbs);
-  ComputeTopologicalSortOrder();
-  CheckOrder(expected_order);
-  CheckLoopEnds(loop_ends);
-}
-
-TEST_F(TopologicalSortOrderTest, NestedLoop) {
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(7)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 7), DEF_PRED2(1, 6)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 6), DEF_PRED2(3, 5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(4)),            // Loops to 4.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(3), DEF_PRED1(4)),            // Loops to 3.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
-  };
-  const BasicBlockId expected_order[] = {
-      1, 3, 4, 5, 6, 7, 2
-  };
-  const uint16_t loop_ends[] = {
-      0, 5, 4, 0, 0, 0, 0
-  };
-
-  PrepareBasicBlocks(bbs);
-  ComputeTopologicalSortOrder();
-  CheckOrder(expected_order);
-  CheckLoopEnds(loop_ends);
-}
-
-TEST_F(TopologicalSortOrderTest, NestedLoopHeadLoops) {
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 6), DEF_PRED2(1, 4)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 3), DEF_PRED2(3, 5)),      // Nested head, loops to 3.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(4)),            // Loops to 4.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
-  };
-  const BasicBlockId expected_order[] = {
-      1, 3, 4, 5, 6, 2
-  };
-  const uint16_t loop_ends[] = {
-      0, 4, 4, 0, 0, 0
-  };
-
-  PrepareBasicBlocks(bbs);
-  ComputeTopologicalSortOrder();
-  CheckOrder(expected_order);
-  CheckLoopEnds(loop_ends);
-}
-
-TEST_F(TopologicalSortOrderTest, NestedLoopSameBackBranchBlock) {
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 6), DEF_PRED2(1, 5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED2(3, 5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 3), DEF_PRED1(4)),         // Loops to 4 and 3.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
-  };
-  const BasicBlockId expected_order[] = {
-      1, 3, 4, 5, 6, 2
-  };
-  const uint16_t loop_ends[] = {
-      0, 4, 4, 0, 0, 0
-  };
-
-  PrepareBasicBlocks(bbs);
-  ComputeTopologicalSortOrder();
-  CheckOrder(expected_order);
-  CheckLoopEnds(loop_ends);
-}
-
-TEST_F(TopologicalSortOrderTest, TwoReorderedInnerLoops) {
-  // This is a simplified version of real code graph where the branch from 8 to 5 must prevent
-  // the block 5 from being considered a loop head before processing the loop 7-8.
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(9)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 9), DEF_PRED2(1, 5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 7), DEF_PRED1(3)),         // Branch over loop in 5.
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 3), DEF_PRED3(4, 6, 8)),   // Loops to 4; inner loop.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(5)),            // Loops to 5.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(8), DEF_PRED2(4, 8)),         // Loop head.
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(7, 5), DEF_PRED1(7)),         // Loops to 7; branches to 5.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(3)),
-  };
-  const BasicBlockId expected_order[] = {
-      1, 3, 4, 7, 8, 5, 6, 9, 2
-  };
-  const uint16_t loop_ends[] = {
-      0, 7, 0, 5, 0, 7, 0, 0, 0
-  };
-
-  PrepareBasicBlocks(bbs);
-  ComputeTopologicalSortOrder();
-  CheckOrder(expected_order);
-  CheckLoopEnds(loop_ends);
-}
-
-TEST_F(TopologicalSortOrderTest, NestedLoopWithBackEdgeAfterOuterLoopBackEdge) {
-  // This is a simplified version of real code graph. The back-edge from 7 to the inner
-  // loop head 4 comes after the back-edge from 6 to the outer loop head 3. To make this
-  // appear a bit more complex, there's also a back-edge from 5 to 4.
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(7)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED2(1, 6)),         // Outer loop head.
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 6), DEF_PRED3(3, 5, 7)),   // Inner loop head.
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(4)),            // Loops to inner loop head 4.
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(7, 3), DEF_PRED1(4)),         // Loops to outer loop head 3.
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(2, 4), DEF_PRED1(6)),         // Loops to inner loop head 4.
-  };
-  const BasicBlockId expected_order[] = {
-      // NOTE: The 5 goes before 6 only because 5 is a "fall-through" from 4 while 6 is "taken".
-      1, 3, 4, 5, 6, 7, 2
-  };
-  const uint16_t loop_ends[] = {
-      0, 6, 6, 0, 0, 0, 0
-  };
-
-  PrepareBasicBlocks(bbs);
-  ComputeTopologicalSortOrder();
-  CheckOrder(expected_order);
-  CheckLoopEnds(loop_ends);
-}
-
-TEST_F(TopologicalSortOrderTest, LoopWithTwoEntryPoints) {
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(7)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED1(1)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED2(3, 6)),  // Fall-back block is chosen as
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED2(3, 4)),  // the earlier from these two.
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 7), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(6)),
-  };
-  const BasicBlockId expected_order[] = {
-      1, 3, 4, 5, 6, 7, 2
-  };
-  const uint16_t loop_ends[] = {
-      0, 0, 5, 0, 0, 0, 0
-  };
-
-  PrepareBasicBlocks(bbs);
-  ComputeTopologicalSortOrder();
-  CheckOrder(expected_order);
-  CheckLoopEnds(loop_ends);
-}
-
-TEST_F(TopologicalSortOrderTest, UnnaturalLoops) {
-  const BBDef bbs[] = {
-      DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-      DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-      DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(10)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED1(1)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED2(11, 3)),  // Unnatural loop head (top-level).
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED2(3, 4)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(9, 7), DEF_PRED1(5)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(8), DEF_PRED1(6)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(9), DEF_PRED2(10, 7)),  // Unnatural loop head (nested).
-      DEF_BB(kDalvikByteCode, DEF_SUCC1(10), DEF_PRED2(6, 8)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(8, 11), DEF_PRED1(9)),
-      DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 2), DEF_PRED1(10)),
-  };
-  const BasicBlockId expected_order[] = {
-      1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 2
-  };
-  const uint16_t loop_ends[] = {
-      0, 0, 10, 0, 0, 0, 9, 0, 0, 0, 0,
-  };
-
-  PrepareBasicBlocks(bbs);
-  ComputeTopologicalSortOrder();
-  CheckOrder(expected_order);
-  CheckLoopEnds(loop_ends);
-
-  const std::pair<BasicBlockId, bool> expected_and_change[] = {
-      { 1, false },
-      { 3, false },
-      { 4, true },    // Initial run of the outer loop.
-      { 5, true },
-      { 6, true },
-      { 7, true },
-      { 8, true },    // Initial run of the inner loop.
-      { 9, true },
-      { 10, true },
-      { 8, true },    // Recalculation of the inner loop - changed.
-      { 9, true },
-      { 10, true },
-      { 8, false },   // Recalculation of the inner loop - unchanged.
-      { 11, true },
-      { 4, true },    // Recalculation of the outer loop - changed.
-      { 5, true },
-      { 6, true },
-      { 7, false },   // No change: skip inner loop head because inputs are unchanged.
-      { 9, true },
-      { 10, true },
-      { 8, true },    // Recalculation of the inner loop - changed.
-      { 9, true },
-      { 10, true },
-      { 8, false },   // Recalculation of the inner loop - unchanged.
-      { 11, true },
-      { 4, false },   // Recalculation of the outer loop - unchanged.
-      { 2, false },
-  };
-  size_t pos = 0;
-  LoopRepeatingTopologicalSortIterator iter(cu_.mir_graph.get());
-  bool change = false;
-  for (BasicBlock* bb = iter.Next(change); bb != nullptr; bb = iter.Next(change)) {
-    ASSERT_NE(arraysize(expected_and_change), pos);
-    ASSERT_EQ(expected_and_change[pos].first, bb->id) << pos;
-    change = expected_and_change[pos].second;
-    ++pos;
-  }
-  ASSERT_EQ(arraysize(expected_and_change), pos);
-}
-
-}  // namespace art
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
deleted file mode 100644
index c250bd9..0000000
--- a/compiler/dex/mir_method_info.cc
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-# include "mir_method_info.h"
-
-#include "dex/compiler_ir.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex/verified_method.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "driver/compiler_driver-inl.h"
-#include "driver/compiler_options.h"
-#include "mirror/class_loader.h"  // Only to allow casts in Handle<ClassLoader>.
-#include "mirror/dex_cache.h"     // Only to allow casts in Handle<DexCache>.
-#include "scoped_thread_state_change.h"
-#include "handle_scope-inl.h"
-
-namespace art {
-
-void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver,
-                                    const DexCompilationUnit* mUnit,
-                                    MirMethodLoweringInfo* method_infos, size_t count) {
-  if (kIsDebugBuild) {
-    DCHECK(method_infos != nullptr);
-    DCHECK_NE(count, 0u);
-    for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
-      MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType(), it->IsQuickened());
-      unresolved.declaring_dex_file_ = it->declaring_dex_file_;
-      unresolved.vtable_idx_ = it->vtable_idx_;
-      if (it->target_dex_file_ != nullptr) {
-        unresolved.target_dex_file_ = it->target_dex_file_;
-        unresolved.target_method_idx_ = it->target_method_idx_;
-      }
-      if (kIsDebugBuild) {
-        unresolved.CheckEquals(*it);
-      }
-    }
-  }
-
-  // We're going to resolve methods and check access in a tight loop. It's better to hold
-  // the lock and needed references once than re-acquiring them again and again.
-  ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<4> hs(soa.Self());
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
-  Handle<mirror::Class> referrer_class(hs.NewHandle(
-      compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
-  auto current_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
-  // Even if the referrer class is unresolved (i.e. we're compiling a method without class
-  // definition) we still want to resolve methods and record all available info.
-  Runtime* const runtime = Runtime::Current();
-  const DexFile* const dex_file = mUnit->GetDexFile();
-  const bool use_jit = runtime->UseJit();
-  const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod();
-  DexFileToMethodInlinerMap* inliner_map = compiler_driver->GetMethodInlinerMap();
-  DexFileMethodInliner* default_inliner =
-      (inliner_map != nullptr) ? inliner_map->GetMethodInliner(dex_file) : nullptr;
-
-  for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
-    // For quickened invokes, the dex method idx is actually the mir offset.
-    if (it->IsQuickened()) {
-      const auto* dequicken_ref = verified_method->GetDequickenIndex(it->method_idx_);
-      CHECK(dequicken_ref != nullptr);
-      it->target_dex_file_ = dequicken_ref->dex_file;
-      it->target_method_idx_ = dequicken_ref->index;
-    }
-    // Remember devirtualized invoke target and set the called method to the default.
-    MethodReference devirt_ref(it->target_dex_file_, it->target_method_idx_);
-    MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr;
-    InvokeType invoke_type = it->GetInvokeType();
-    ArtMethod* resolved_method = nullptr;
-
-    bool string_init = false;
-    if (default_inliner->IsStringInitMethodIndex(it->MethodIndex())) {
-      string_init = true;
-      invoke_type = kDirect;
-    }
-
-    if (!it->IsQuickened()) {
-      it->target_dex_file_ = dex_file;
-      it->target_method_idx_ = it->MethodIndex();
-      current_dex_cache.Assign(dex_cache.Get());
-      resolved_method = compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit,
-                                                       it->target_method_idx_, invoke_type, true);
-    } else {
-      // The method index is actually the dex PC in this case.
-      // Calculate the proper dex file and target method idx.
-
-      // We must be in JIT mode if we get here.
-      CHECK(use_jit);
-
-      // The invoke type better be virtual, except for the string init special case above.
-      CHECK_EQ(invoke_type, string_init ? kDirect : kVirtual);
-      // Don't devirt if we are in a different dex file since we can't have direct invokes in
-      // another dex file unless we always put a direct / patch pointer.
-      devirt_target = nullptr;
-      current_dex_cache.Assign(runtime->GetClassLinker()->FindDexCache(
-          soa.Self(), *it->target_dex_file_));
-      CHECK(current_dex_cache.Get() != nullptr);
-      DexCompilationUnit cu(
-          mUnit->GetCompilationUnit(), mUnit->GetClassLoader(), mUnit->GetClassLinker(),
-          *it->target_dex_file_, nullptr /* code_item not used */, 0u /* class_def_idx not used */,
-          it->target_method_idx_, 0u /* access_flags not used */,
-          nullptr /* verified_method not used */,
-          current_dex_cache);
-      resolved_method = compiler_driver->ResolveMethod(soa, current_dex_cache, class_loader, &cu,
-                                                       it->target_method_idx_, invoke_type, false);
-      if (resolved_method == nullptr) {
-        // If the method is null then it should be a miranda method, in this case try
-        // re-loading it, this time as an interface method. The actual miranda method is in the
-        // vtable, but it will resolve to an interface method.
-        resolved_method = compiler_driver->ResolveMethod(
-            soa, current_dex_cache, class_loader, &cu, it->target_method_idx_, kInterface, false);
-        CHECK(resolved_method != nullptr);
-      }
-      if (resolved_method != nullptr) {
-        // Since this was a dequickened virtual, it is guaranteed to be resolved. However, it may be
-        // resolved to an interface method. If this is the case then change the invoke type to
-        // interface with the assumption that sharp_type will be kVirtual.
-        if (resolved_method->GetInvokeType() == kInterface) {
-          it->flags_ = (it->flags_ & ~(kInvokeTypeMask << kBitInvokeTypeBegin)) |
-              (static_cast<uint16_t>(kInterface) << kBitInvokeTypeBegin);
-        }
-      }
-    }
-    if (UNLIKELY(resolved_method == nullptr)) {
-      continue;
-    }
-
-    compiler_driver->GetResolvedMethodDexFileLocation(resolved_method,
-        &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_method_idx_);
-    if (!it->IsQuickened()) {
-      // For quickened invoke virtuals we may have desharpened to an interface method which
-      // wont give us the right method index, in this case blindly dispatch or else we can't
-      // compile the method. Converting the invoke to interface dispatch doesn't work since we
-      // have no way to get the dex method index for quickened invoke virtuals in the interface
-      // trampolines.
-      it->vtable_idx_ =
-          compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type);
-    }
-
-    MethodReference target_method(it->target_dex_file_, it->target_method_idx_);
-    int fast_path_flags = compiler_driver->IsFastInvoke(
-        soa, current_dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method,
-        &invoke_type, &target_method, devirt_target, &it->direct_code_, &it->direct_method_);
-    const bool is_referrers_class = referrer_class.Get() == resolved_method->GetDeclaringClass();
-    const bool is_class_initialized =
-        compiler_driver->IsMethodsClassInitialized(referrer_class.Get(), resolved_method);
-
-    // Check if the target method is intrinsic or special.
-    InlineMethodFlags is_intrinsic_or_special = kNoInlineMethodFlags;
-    if (inliner_map != nullptr) {
-      auto* inliner = (target_method.dex_file == dex_file)
-          ? default_inliner
-          : inliner_map->GetMethodInliner(target_method.dex_file);
-      is_intrinsic_or_special = inliner->IsIntrinsicOrSpecial(target_method.dex_method_index);
-    }
-
-    uint16_t other_flags = it->flags_ &
-        ~(kFlagFastPath | kFlagIsIntrinsic | kFlagIsSpecial | kFlagClassIsInitialized |
-            (kInvokeTypeMask << kBitSharpTypeBegin));
-    it->flags_ = other_flags |
-        // String init path is a special always-fast path.
-        (fast_path_flags != 0 || string_init ? kFlagFastPath : 0u) |
-        ((is_intrinsic_or_special & kInlineIntrinsic) != 0 ? kFlagIsIntrinsic : 0u) |
-        ((is_intrinsic_or_special & kInlineSpecial) != 0 ? kFlagIsSpecial : 0u) |
-        (static_cast<uint16_t>(invoke_type) << kBitSharpTypeBegin) |
-        (is_referrers_class ? kFlagIsReferrersClass : 0u) |
-        (is_class_initialized ? kFlagClassIsInitialized : 0u);
-    it->target_dex_file_ = target_method.dex_file;
-    it->target_method_idx_ = target_method.dex_method_index;
-    it->stats_flags_ = fast_path_flags;
-    if (string_init) {
-      it->direct_code_ = 0;
-    }
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
deleted file mode 100644
index 4512f35..0000000
--- a/compiler/dex/mir_method_info.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_MIR_METHOD_INFO_H_
-#define ART_COMPILER_DEX_MIR_METHOD_INFO_H_
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "invoke_type.h"
-#include "method_reference.h"
-
-namespace art {
-
-class CompilerDriver;
-class DexCompilationUnit;
-class DexFile;
-
-class MirMethodInfo {
- public:
-  uint16_t MethodIndex() const {
-    return method_idx_;
-  }
-
-  bool IsStatic() const {
-    return (flags_ & kFlagIsStatic) != 0u;
-  }
-
-  bool IsResolved() const {
-    return declaring_dex_file_ != nullptr;
-  }
-
-  const DexFile* DeclaringDexFile() const {
-    return declaring_dex_file_;
-  }
-  void SetDeclaringDexFile(const DexFile* dex_file) {
-    declaring_dex_file_ = dex_file;
-  }
-
-  uint16_t DeclaringClassIndex() const {
-    return declaring_class_idx_;
-  }
-
-  uint16_t DeclaringMethodIndex() const {
-    return declaring_method_idx_;
-  }
-
- protected:
-  enum {
-    kBitIsStatic = 0,
-    kMethodInfoBitEnd
-  };
-  static_assert(kMethodInfoBitEnd <= 16, "Too many flags");
-  static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
-
-  MirMethodInfo(uint16_t method_idx, uint16_t flags)
-      : method_idx_(method_idx),
-        flags_(flags),
-        declaring_method_idx_(0u),
-        declaring_class_idx_(0u),
-        declaring_dex_file_(nullptr) {
-  }
-
-  // Make copy-ctor/assign/dtor protected to avoid slicing.
-  MirMethodInfo(const MirMethodInfo& other) = default;
-  MirMethodInfo& operator=(const MirMethodInfo& other) = default;
-  ~MirMethodInfo() = default;
-
-  // The method index in the compiling method's dex file.
-  uint16_t method_idx_;
-  // Flags, for volatility and derived class data.
-  uint16_t flags_;
-  // The method index in the dex file that defines the method, 0 if unresolved.
-  uint16_t declaring_method_idx_;
-  // The type index of the class declaring the method, 0 if unresolved.
-  uint16_t declaring_class_idx_;
-  // The dex file that defines the class containing the method and the method,
-  // null if unresolved.
-  const DexFile* declaring_dex_file_;
-};
-
-class MirMethodLoweringInfo : public MirMethodInfo {
- public:
-  // For each requested method retrieve the method's declaring location (dex file, class
-  // index and method index) and compute whether we can fast path the method call. For fast
-  // path methods, retrieve the method's vtable index and direct code and method when applicable.
-  static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
-                      MirMethodLoweringInfo* method_infos, size_t count)
-      REQUIRES(!Locks::mutator_lock_);
-
-  MirMethodLoweringInfo(uint16_t method_idx, InvokeType type, bool is_quickened)
-      : MirMethodInfo(method_idx,
-                      ((type == kStatic) ? kFlagIsStatic : 0u) |
-                      (static_cast<uint16_t>(type) << kBitInvokeTypeBegin) |
-                      (static_cast<uint16_t>(type) << kBitSharpTypeBegin) |
-                      (is_quickened ? kFlagQuickened : 0u)),
-        direct_code_(0u),
-        direct_method_(0u),
-        target_dex_file_(nullptr),
-        target_method_idx_(0u),
-        vtable_idx_(0u),
-        stats_flags_(0) {
-  }
-
-  void SetDevirtualizationTarget(const MethodReference& ref) {
-    DCHECK(target_dex_file_ == nullptr);
-    DCHECK_EQ(target_method_idx_, 0u);
-    DCHECK_LE(ref.dex_method_index, 0xffffu);
-    target_dex_file_ = ref.dex_file;
-    target_method_idx_ = ref.dex_method_index;
-  }
-
-  bool FastPath() const {
-    return (flags_ & kFlagFastPath) != 0u;
-  }
-
-  bool IsIntrinsic() const {
-    return (flags_ & kFlagIsIntrinsic) != 0u;
-  }
-
-  bool IsSpecial() const {
-    return (flags_ & kFlagIsSpecial) != 0u;
-  }
-
-  bool IsReferrersClass() const {
-    return (flags_ & kFlagIsReferrersClass) != 0;
-  }
-
-  bool IsClassInitialized() const {
-    return (flags_ & kFlagClassIsInitialized) != 0u;
-  }
-
-  // Returns true iff the method invoke is INVOKE_VIRTUAL_QUICK or INVOKE_VIRTUAL_RANGE_QUICK.
-  bool IsQuickened() const {
-    return (flags_ & kFlagQuickened) != 0u;
-  }
-
-  InvokeType GetInvokeType() const {
-    return static_cast<InvokeType>((flags_ >> kBitInvokeTypeBegin) & kInvokeTypeMask);
-  }
-
-  art::InvokeType GetSharpType() const {
-    return static_cast<InvokeType>((flags_ >> kBitSharpTypeBegin) & kInvokeTypeMask);
-  }
-
-  MethodReference GetTargetMethod() const {
-    return MethodReference(target_dex_file_, target_method_idx_);
-  }
-
-  uint16_t VTableIndex() const {
-    return vtable_idx_;
-  }
-  void SetVTableIndex(uint16_t index) {
-    vtable_idx_ = index;
-  }
-
-  uintptr_t DirectCode() const {
-    return direct_code_;
-  }
-
-  uintptr_t DirectMethod() const {
-    return direct_method_;
-  }
-
-  int StatsFlags() const {
-    return stats_flags_;
-  }
-
-  void CheckEquals(const MirMethodLoweringInfo& info) const {
-    CHECK_EQ(method_idx_, info.method_idx_);
-    CHECK_EQ(flags_, info.flags_);
-    CHECK_EQ(declaring_method_idx_, info.declaring_method_idx_);
-    CHECK_EQ(declaring_class_idx_, info.declaring_class_idx_);
-    CHECK_EQ(declaring_dex_file_, info.declaring_dex_file_);
-    CHECK_EQ(direct_code_, info.direct_code_);
-    CHECK_EQ(direct_method_, info.direct_method_);
-    CHECK_EQ(target_dex_file_, info.target_dex_file_);
-    CHECK_EQ(target_method_idx_, info.target_method_idx_);
-    CHECK_EQ(vtable_idx_, info.vtable_idx_);
-    CHECK_EQ(stats_flags_, info.stats_flags_);
-  }
-
- private:
-  enum {
-    kBitFastPath = kMethodInfoBitEnd,
-    kBitIsIntrinsic,
-    kBitIsSpecial,
-    kBitInvokeTypeBegin,
-    kBitInvokeTypeEnd = kBitInvokeTypeBegin + 3,  // 3 bits for invoke type.
-    kBitSharpTypeBegin = kBitInvokeTypeEnd,
-    kBitSharpTypeEnd = kBitSharpTypeBegin + 3,  // 3 bits for sharp type.
-    kBitIsReferrersClass = kBitSharpTypeEnd,
-    kBitClassIsInitialized,
-    kBitQuickened,
-    kMethodLoweringInfoBitEnd
-  };
-  static_assert(kMethodLoweringInfoBitEnd <= 16, "Too many flags");
-  static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath;
-  static constexpr uint16_t kFlagIsIntrinsic = 1u << kBitIsIntrinsic;
-  static constexpr uint16_t kFlagIsSpecial = 1u << kBitIsSpecial;
-  static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
-  static constexpr uint16_t kFlagClassIsInitialized = 1u << kBitClassIsInitialized;
-  static constexpr uint16_t kFlagQuickened = 1u << kBitQuickened;
-  static constexpr uint16_t kInvokeTypeMask = 7u;
-  static_assert((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
-                "assert invoke type bits failed");
-  static_assert((1u << (kBitSharpTypeEnd - kBitSharpTypeBegin)) - 1u == kInvokeTypeMask,
-                "assert sharp type bits failed");
-
-  uintptr_t direct_code_;
-  uintptr_t direct_method_;
-  // Before Resolve(), target_dex_file_ and target_method_idx_ hold the verification-based
-  // devirtualized invoke target if available, null and 0u otherwise.
-  // After Resolve() they hold the actual target method that will be called; it will be either
-  // a devirtualized target method or the compilation's unit's dex file and MethodIndex().
-  const DexFile* target_dex_file_;
-  uint16_t target_method_idx_;
-  uint16_t vtable_idx_;
-  int stats_flags_;
-
-  friend class MirOptimizationTest;
-  friend class TypeInferenceTest;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_MIR_METHOD_INFO_H_
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
deleted file mode 100644
index 0e74a48..0000000
--- a/compiler/dex/mir_optimization.cc
+++ /dev/null
@@ -1,1997 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/bit_vector-inl.h"
-#include "base/logging.h"
-#include "base/scoped_arena_containers.h"
-#include "class_linker-inl.h"
-#include "dataflow_iterator-inl.h"
-#include "dex/verified_method.h"
-#include "dex_flags.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "global_value_numbering.h"
-#include "gvn_dead_code_elimination.h"
-#include "local_value_numbering.h"
-#include "mir_field_info.h"
-#include "mirror/string.h"
-#include "quick/dex_file_method_inliner.h"
-#include "quick/dex_file_to_method_inliner_map.h"
-#include "stack.h"
-#include "thread-inl.h"
-#include "type_inference.h"
-#include "utils.h"
-
-namespace art {
-
-static unsigned int Predecessors(BasicBlock* bb) {
-  return bb->predecessors.size();
-}
-
-/* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
-void MIRGraph::SetConstant(int32_t ssa_reg, int32_t value) {
-  is_constant_v_->SetBit(ssa_reg);
-  constant_values_[ssa_reg] = value;
-  reg_location_[ssa_reg].is_const = true;
-}
-
-void MIRGraph::SetConstantWide(int32_t ssa_reg, int64_t value) {
-  is_constant_v_->SetBit(ssa_reg);
-  is_constant_v_->SetBit(ssa_reg + 1);
-  constant_values_[ssa_reg] = Low32Bits(value);
-  constant_values_[ssa_reg + 1] = High32Bits(value);
-  reg_location_[ssa_reg].is_const = true;
-  reg_location_[ssa_reg + 1].is_const = true;
-}
-
-void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
-  MIR* mir;
-
-  for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    // Skip pass if BB has MIR without SSA representation.
-    if (mir->ssa_rep == nullptr) {
-       return;
-    }
-
-    uint64_t df_attributes = GetDataFlowAttributes(mir);
-
-    MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
-
-    if (!(df_attributes & DF_HAS_DEFS)) continue;
-
-    /* Handle instructions that set up constants directly */
-    if (df_attributes & DF_SETS_CONST) {
-      if (df_attributes & DF_DA) {
-        int32_t vB = static_cast<int32_t>(d_insn->vB);
-        switch (d_insn->opcode) {
-          case Instruction::CONST_4:
-          case Instruction::CONST_16:
-          case Instruction::CONST:
-            SetConstant(mir->ssa_rep->defs[0], vB);
-            break;
-          case Instruction::CONST_HIGH16:
-            SetConstant(mir->ssa_rep->defs[0], vB << 16);
-            break;
-          case Instruction::CONST_WIDE_16:
-          case Instruction::CONST_WIDE_32:
-            SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB));
-            break;
-          case Instruction::CONST_WIDE:
-            SetConstantWide(mir->ssa_rep->defs[0], d_insn->vB_wide);
-            break;
-          case Instruction::CONST_WIDE_HIGH16:
-            SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB) << 48);
-            break;
-          default:
-            break;
-        }
-      }
-      /* Handle instructions that set up constants directly */
-    } else if (df_attributes & DF_IS_MOVE) {
-      int i;
-
-      for (i = 0; i < mir->ssa_rep->num_uses; i++) {
-        if (!is_constant_v_->IsBitSet(mir->ssa_rep->uses[i])) break;
-      }
-      /* Move a register holding a constant to another register */
-      if (i == mir->ssa_rep->num_uses) {
-        SetConstant(mir->ssa_rep->defs[0], constant_values_[mir->ssa_rep->uses[0]]);
-        if (df_attributes & DF_A_WIDE) {
-          SetConstant(mir->ssa_rep->defs[1], constant_values_[mir->ssa_rep->uses[1]]);
-        }
-      }
-    }
-  }
-  /* TODO: implement code to handle arithmetic operations */
-}
-
-/* Advance to next strictly dominated MIR node in an extended basic block */
-MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
-  BasicBlock* bb = *p_bb;
-  if (mir != nullptr) {
-    mir = mir->next;
-    while (mir == nullptr) {
-      bb = GetBasicBlock(bb->fall_through);
-      if ((bb == nullptr) || Predecessors(bb) != 1) {
-        // mir is null and we cannot proceed further.
-        break;
-      } else {
-        *p_bb = bb;
-        mir = bb->first_mir_insn;
-      }
-    }
-  }
-  return mir;
-}
-
-/*
- * To be used at an invoke mir.  If the logically next mir node represents
- * a move-result, return it.  Else, return nullptr.  If a move-result exists,
- * it is required to immediately follow the invoke with no intervening
- * opcodes or incoming arcs.  However, if the result of the invoke is not
- * used, a move-result may not be present.
- */
-MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
-  BasicBlock* tbb = bb;
-  mir = AdvanceMIR(&tbb, mir);
-  while (mir != nullptr) {
-    if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
-        (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
-        (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
-      break;
-    }
-    // Keep going if pseudo op, otherwise terminate
-    if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
-      mir = AdvanceMIR(&tbb, mir);
-    } else {
-      mir = nullptr;
-    }
-  }
-  return mir;
-}
-
-BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
-  if (bb->block_type == kDead) {
-    return nullptr;
-  }
-  DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
-      || (bb->block_type == kExitBlock));
-  BasicBlock* bb_taken = GetBasicBlock(bb->taken);
-  BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
-  if (((bb_fall_through == nullptr) && (bb_taken != nullptr)) &&
-      ((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
-    // Follow simple unconditional branches.
-    bb = bb_taken;
-  } else {
-    // Follow simple fallthrough
-    bb = (bb_taken != nullptr) ? nullptr : bb_fall_through;
-  }
-  if (bb == nullptr || (Predecessors(bb) != 1)) {
-    return nullptr;
-  }
-  DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
-  return bb;
-}
-
-static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
-  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
-      for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
-        if (mir->ssa_rep->uses[i] == ssa_name) {
-          return mir;
-        }
-      }
-    }
-  }
-  return nullptr;
-}
-
-static SelectInstructionKind SelectKind(MIR* mir) {
-  // Work with the case when mir is null.
-  if (mir == nullptr) {
-    return kSelectNone;
-  }
-  switch (mir->dalvikInsn.opcode) {
-    case Instruction::MOVE:
-    case Instruction::MOVE_OBJECT:
-    case Instruction::MOVE_16:
-    case Instruction::MOVE_OBJECT_16:
-    case Instruction::MOVE_FROM16:
-    case Instruction::MOVE_OBJECT_FROM16:
-      return kSelectMove;
-    case Instruction::CONST:
-    case Instruction::CONST_4:
-    case Instruction::CONST_16:
-      return kSelectConst;
-    case Instruction::GOTO:
-    case Instruction::GOTO_16:
-    case Instruction::GOTO_32:
-      return kSelectGoto;
-    default:
-      return kSelectNone;
-  }
-}
-
-static constexpr ConditionCode kIfCcZConditionCodes[] = {
-    kCondEq, kCondNe, kCondLt, kCondGe, kCondGt, kCondLe
-};
-
-static_assert(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
-              "if_ccz_ccodes_size1");
-
-static constexpr ConditionCode ConditionCodeForIfCcZ(Instruction::Code opcode) {
-  return kIfCcZConditionCodes[opcode - Instruction::IF_EQZ];
-}
-
-static_assert(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, "if_eqz ccode");
-static_assert(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, "if_nez ccode");
-static_assert(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, "if_ltz ccode");
-static_assert(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, "if_gez ccode");
-static_assert(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, "if_gtz ccode");
-static_assert(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, "if_lez ccode");
-
-int MIRGraph::GetSSAUseCount(int s_reg) {
-  DCHECK_LT(static_cast<size_t>(s_reg), ssa_subscripts_.size());
-  return raw_use_counts_[s_reg];
-}
-
-size_t MIRGraph::GetNumBytesForSpecialTemps() const {
-  // This logic is written with assumption that Method* is only special temp.
-  DCHECK_EQ(max_available_special_compiler_temps_, 1u);
-  return InstructionSetPointerSize(cu_->instruction_set);
-}
-
-size_t MIRGraph::GetNumAvailableVRTemps() {
-  // First take into account all temps reserved for backend.
-  if (max_available_non_special_compiler_temps_ < reserved_temps_for_backend_) {
-    return 0;
-  }
-
-  // Calculate remaining ME temps available.
-  size_t remaining_me_temps = max_available_non_special_compiler_temps_ -
-      reserved_temps_for_backend_;
-
-  if (num_non_special_compiler_temps_ >= remaining_me_temps) {
-    return 0;
-  } else {
-    return remaining_me_temps - num_non_special_compiler_temps_;
-  }
-}
-
-// FIXME - will probably need to revisit all uses of this, as type not defined.
-static const RegLocation temp_loc = {kLocCompilerTemp,
-                                     0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/,
-                                     RegStorage(), INVALID_SREG, INVALID_SREG};
-
-CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) {
-  // Once the compiler temps have been committed, new ones cannot be requested anymore.
-  DCHECK_EQ(compiler_temps_committed_, false);
-  // Make sure that reserved for BE set is sane.
-  DCHECK_LE(reserved_temps_for_backend_, max_available_non_special_compiler_temps_);
-
-  bool verbose = cu_->verbose;
-  const char* ct_type_str = nullptr;
-
-  if (verbose) {
-    switch (ct_type) {
-      case kCompilerTempBackend:
-        ct_type_str = "backend";
-        break;
-      case kCompilerTempSpecialMethodPtr:
-        ct_type_str = "method*";
-        break;
-      case kCompilerTempVR:
-        ct_type_str = "VR";
-        break;
-      default:
-        ct_type_str = "unknown";
-        break;
-    }
-    LOG(INFO) << "CompilerTemps: A compiler temp of type " << ct_type_str << " that is "
-        << (wide ? "wide is being requested." : "not wide is being requested.");
-  }
-
-  CompilerTemp *compiler_temp = static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp),
-                                                            kArenaAllocRegAlloc));
-
-  // Create the type of temp requested. Special temps need special handling because
-  // they have a specific virtual register assignment.
-  if (ct_type == kCompilerTempSpecialMethodPtr) {
-    // This has a special location on stack which is 32-bit or 64-bit depending
-    // on mode. However, we don't want to overlap with non-special section
-    // and thus even for 64-bit, we allow only a non-wide temp to be requested.
-    DCHECK_EQ(wide, false);
-
-    // The vreg is always the first special temp for method ptr.
-    compiler_temp->v_reg = GetFirstSpecialTempVR();
-
-    CHECK(reg_location_ == nullptr);
-  } else if (ct_type == kCompilerTempBackend) {
-    requested_backend_temp_ = true;
-
-    // Make sure that we are not exceeding temps reserved for BE.
-    // Since VR temps cannot be requested once the BE temps are requested, we
-    // allow reservation of VR temps as well for BE. We
-    size_t available_temps = reserved_temps_for_backend_ + GetNumAvailableVRTemps();
-    size_t needed_temps = wide ? 2u : 1u;
-    if (available_temps < needed_temps) {
-      if (verbose) {
-        LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
-            << " are available.";
-      }
-      return nullptr;
-    }
-
-    // Update the remaining reserved temps since we have now used them.
-    // Note that the code below is actually subtracting to remove them from reserve
-    // once they have been claimed. It is careful to not go below zero.
-    reserved_temps_for_backend_ =
-        std::max(reserved_temps_for_backend_, needed_temps) - needed_temps;
-
-    // The new non-special compiler temp must receive a unique v_reg.
-    compiler_temp->v_reg = GetFirstNonSpecialTempVR() + num_non_special_compiler_temps_;
-    num_non_special_compiler_temps_++;
-  } else if (ct_type == kCompilerTempVR) {
-    // Once we start giving out BE temps, we don't allow anymore ME temps to be requested.
-    // This is done in order to prevent problems with ssa since these structures are allocated
-    // and managed by the ME.
-    DCHECK_EQ(requested_backend_temp_, false);
-
-    // There is a limit to the number of non-special temps so check to make sure it wasn't exceeded.
-    size_t available_temps = GetNumAvailableVRTemps();
-    if (available_temps <= 0 || (available_temps <= 1 && wide)) {
-      if (verbose) {
-        LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
-            << " are available.";
-      }
-      return nullptr;
-    }
-
-    // The new non-special compiler temp must receive a unique v_reg.
-    compiler_temp->v_reg = GetFirstNonSpecialTempVR() + num_non_special_compiler_temps_;
-    num_non_special_compiler_temps_++;
-  } else {
-    UNIMPLEMENTED(FATAL) << "No handling for compiler temp type " << ct_type_str << ".";
-  }
-
-  // We allocate an sreg as well to make developer life easier.
-  // However, if this is requested from an ME pass that will recalculate ssa afterwards,
-  // this sreg is no longer valid. The caller should be aware of this.
-  compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
-
-  if (verbose) {
-    LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v"
-        << compiler_temp->v_reg << " and s" << compiler_temp->s_reg_low << " has been created.";
-  }
-
-  if (wide) {
-    // Only non-special temps are handled as wide for now.
-    // Note that the number of non special temps is incremented below.
-    DCHECK(ct_type == kCompilerTempBackend || ct_type == kCompilerTempVR);
-
-    // Ensure that the two registers are consecutive.
-    int ssa_reg_low = compiler_temp->s_reg_low;
-    int ssa_reg_high = AddNewSReg(compiler_temp->v_reg + 1);
-    num_non_special_compiler_temps_++;
-
-    if (verbose) {
-      LOG(INFO) << "CompilerTemps: The wide part of temp of type " << ct_type_str << " is v"
-          << compiler_temp->v_reg + 1 << " and s" << ssa_reg_high << ".";
-    }
-
-    if (reg_location_ != nullptr) {
-      reg_location_[ssa_reg_high] = temp_loc;
-      reg_location_[ssa_reg_high].high_word = true;
-      reg_location_[ssa_reg_high].s_reg_low = ssa_reg_low;
-      reg_location_[ssa_reg_high].wide = true;
-    }
-  }
-
-  // If the register locations have already been allocated, add the information
-  // about the temp. We will not overflow because they have been initialized
-  // to support the maximum number of temps. For ME temps that have multiple
-  // ssa versions, the structures below will be expanded on the post pass cleanup.
-  if (reg_location_ != nullptr) {
-    int ssa_reg_low = compiler_temp->s_reg_low;
-    reg_location_[ssa_reg_low] = temp_loc;
-    reg_location_[ssa_reg_low].s_reg_low = ssa_reg_low;
-    reg_location_[ssa_reg_low].wide = wide;
-  }
-
-  return compiler_temp;
-}
-
-void MIRGraph::RemoveLastCompilerTemp(CompilerTempType ct_type, bool wide, CompilerTemp* temp) {
-  // Once the compiler temps have been committed, it's too late for any modifications.
-  DCHECK_EQ(compiler_temps_committed_, false);
-
-  size_t used_temps = wide ? 2u : 1u;
-
-  if (ct_type == kCompilerTempBackend) {
-    DCHECK(requested_backend_temp_);
-
-    // Make the temps available to backend again.
-    reserved_temps_for_backend_ += used_temps;
-  } else if (ct_type == kCompilerTempVR) {
-    DCHECK(!requested_backend_temp_);
-  } else {
-    UNIMPLEMENTED(FATAL) << "No handling for compiler temp type " << static_cast<int>(ct_type);
-  }
-
-  // Reduce the number of non-special compiler temps.
-  DCHECK_LE(used_temps, num_non_special_compiler_temps_);
-  num_non_special_compiler_temps_ -= used_temps;
-
-  // Check that this was really the last temp.
-  DCHECK_EQ(static_cast<size_t>(temp->v_reg),
-            GetFirstNonSpecialTempVR() + num_non_special_compiler_temps_);
-
-  if (cu_->verbose) {
-    LOG(INFO) << "Last temporary has been removed.";
-  }
-}
-
-static bool EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
-  bool is_taken;
-  switch (opcode) {
-    case Instruction::IF_EQ: is_taken = (src1 == src2); break;
-    case Instruction::IF_NE: is_taken = (src1 != src2); break;
-    case Instruction::IF_LT: is_taken = (src1 < src2); break;
-    case Instruction::IF_GE: is_taken = (src1 >= src2); break;
-    case Instruction::IF_GT: is_taken = (src1 > src2); break;
-    case Instruction::IF_LE: is_taken = (src1 <= src2); break;
-    case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
-    case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
-    case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
-    case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
-    case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
-    case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
-    default:
-      LOG(FATAL) << "Unexpected opcode " << opcode;
-      UNREACHABLE();
-  }
-  return is_taken;
-}
-
-/* Do some MIR-level extended basic block optimizations */
-bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
-  if (bb->block_type == kDead) {
-    return true;
-  }
-  // Currently multiply-accumulate backend supports are only available on arm32 and arm64.
-  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kThumb2) {
-    MultiplyAddOpt(bb);
-  }
-  bool use_lvn = bb->use_lvn && (cu_->disable_opt & (1u << kLocalValueNumbering)) == 0u;
-  std::unique_ptr<ScopedArenaAllocator> allocator;
-  std::unique_ptr<GlobalValueNumbering> global_valnum;
-  std::unique_ptr<LocalValueNumbering> local_valnum;
-  if (use_lvn) {
-    allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-    global_valnum.reset(new (allocator.get()) GlobalValueNumbering(cu_, allocator.get(),
-                                                                   GlobalValueNumbering::kModeLvn));
-    local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
-                                                                 allocator.get()));
-  }
-  while (bb != nullptr) {
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      // TUNING: use the returned value number for CSE.
-      if (use_lvn) {
-        local_valnum->GetValueNumber(mir);
-      }
-      // Look for interesting opcodes, skip otherwise
-      Instruction::Code opcode = mir->dalvikInsn.opcode;
-      switch (opcode) {
-        case Instruction::IF_EQ:
-        case Instruction::IF_NE:
-        case Instruction::IF_LT:
-        case Instruction::IF_GE:
-        case Instruction::IF_GT:
-        case Instruction::IF_LE:
-          if (!IsConst(mir->ssa_rep->uses[1])) {
-            break;
-          }
-          FALLTHROUGH_INTENDED;
-        case Instruction::IF_EQZ:
-        case Instruction::IF_NEZ:
-        case Instruction::IF_LTZ:
-        case Instruction::IF_GEZ:
-        case Instruction::IF_GTZ:
-        case Instruction::IF_LEZ:
-          // Result known at compile time?
-          if (IsConst(mir->ssa_rep->uses[0])) {
-            int32_t rhs = (mir->ssa_rep->num_uses == 2) ? ConstantValue(mir->ssa_rep->uses[1]) : 0;
-            bool is_taken = EvaluateBranch(opcode, ConstantValue(mir->ssa_rep->uses[0]), rhs);
-            BasicBlockId edge_to_kill = is_taken ? bb->fall_through : bb->taken;
-            if (is_taken) {
-              // Replace with GOTO.
-              bb->fall_through = NullBasicBlockId;
-              mir->dalvikInsn.opcode = Instruction::GOTO;
-              mir->dalvikInsn.vA =
-                  IsInstructionIfCc(opcode) ? mir->dalvikInsn.vC : mir->dalvikInsn.vB;
-            } else {
-              // Make NOP.
-              bb->taken = NullBasicBlockId;
-              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-            }
-            mir->ssa_rep->num_uses = 0;
-            BasicBlock* successor_to_unlink = GetBasicBlock(edge_to_kill);
-            successor_to_unlink->ErasePredecessor(bb->id);
-            // We have changed the graph structure.
-            dfs_orders_up_to_date_ = false;
-            domination_up_to_date_ = false;
-            topological_order_up_to_date_ = false;
-            // Keep MIR SSA rep, the worst that can happen is a Phi with just 1 input.
-          }
-          break;
-        case Instruction::CMPL_FLOAT:
-        case Instruction::CMPL_DOUBLE:
-        case Instruction::CMPG_FLOAT:
-        case Instruction::CMPG_DOUBLE:
-        case Instruction::CMP_LONG:
-          if ((cu_->disable_opt & (1 << kBranchFusing)) != 0) {
-            // Bitcode doesn't allow this optimization.
-            break;
-          }
-          if (mir->next != nullptr) {
-            MIR* mir_next = mir->next;
-            // Make sure result of cmp is used by next insn and nowhere else
-            if (IsInstructionIfCcZ(mir_next->dalvikInsn.opcode) &&
-                (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
-                (GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
-              mir_next->meta.ccode = ConditionCodeForIfCcZ(mir_next->dalvikInsn.opcode);
-              switch (opcode) {
-                case Instruction::CMPL_FLOAT:
-                  mir_next->dalvikInsn.opcode =
-                      static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
-                  break;
-                case Instruction::CMPL_DOUBLE:
-                  mir_next->dalvikInsn.opcode =
-                      static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
-                  break;
-                case Instruction::CMPG_FLOAT:
-                  mir_next->dalvikInsn.opcode =
-                      static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
-                  break;
-                case Instruction::CMPG_DOUBLE:
-                  mir_next->dalvikInsn.opcode =
-                      static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
-                  break;
-                case Instruction::CMP_LONG:
-                  mir_next->dalvikInsn.opcode =
-                      static_cast<Instruction::Code>(kMirOpFusedCmpLong);
-                  break;
-                default: LOG(ERROR) << "Unexpected opcode: " << opcode;
-              }
-              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-              // Clear use count of temp VR.
-              use_counts_[mir->ssa_rep->defs[0]] = 0;
-              raw_use_counts_[mir->ssa_rep->defs[0]] = 0;
-              // Copy the SSA information that is relevant.
-              mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
-              mir_next->ssa_rep->uses = mir->ssa_rep->uses;
-              mir_next->ssa_rep->num_defs = 0;
-              mir->ssa_rep->num_uses = 0;
-              mir->ssa_rep->num_defs = 0;
-              // Copy in the decoded instruction information for potential SSA re-creation.
-              mir_next->dalvikInsn.vA = mir->dalvikInsn.vB;
-              mir_next->dalvikInsn.vB = mir->dalvikInsn.vC;
-            }
-          }
-          break;
-        default:
-          break;
-      }
-      // Is this the select pattern?
-      // TODO: flesh out support for Mips.  NOTE: llvm's select op doesn't quite work here.
-      // TUNING: expand to support IF_xx compare & branches
-      if ((cu_->instruction_set == kArm64 || cu_->instruction_set == kThumb2 ||
-           cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
-          IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
-        BasicBlock* ft = GetBasicBlock(bb->fall_through);
-        DCHECK(ft != nullptr);
-        BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
-        BasicBlock* ft_tk = GetBasicBlock(ft->taken);
-
-        BasicBlock* tk = GetBasicBlock(bb->taken);
-        DCHECK(tk != nullptr);
-        BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
-        BasicBlock* tk_tk = GetBasicBlock(tk->taken);
-
-        /*
-         * In the select pattern, the taken edge goes to a block that unconditionally
-         * transfers to the rejoin block and the fall_though edge goes to a block that
-         * unconditionally falls through to the rejoin block.
-         */
-        if ((tk_ft == nullptr) && (ft_tk == nullptr) && (tk_tk == ft_ft) &&
-            (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
-          /*
-           * Okay - we have the basic diamond shape.
-           */
-
-          // TODO: Add logic for LONG.
-          // Are the block bodies something we can handle?
-          if ((ft->first_mir_insn == ft->last_mir_insn) &&
-              (tk->first_mir_insn != tk->last_mir_insn) &&
-              (tk->first_mir_insn->next == tk->last_mir_insn) &&
-              ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
-              (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
-              (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
-              (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
-            // Almost there.  Are the instructions targeting the same vreg?
-            MIR* if_true = tk->first_mir_insn;
-            MIR* if_false = ft->first_mir_insn;
-            // It's possible that the target of the select isn't used - skip those (rare) cases.
-            MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
-            if ((phi != nullptr) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
-              /*
-               * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
-               * Phi node in the merge block and delete it (while using the SSA name
-               * of the merge as the target of the SELECT.  Delete both taken and
-               * fallthrough blocks, and set fallthrough to merge block.
-               * NOTE: not updating other dataflow info (no longer used at this point).
-               * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
-               */
-              mir->meta.ccode = ConditionCodeForIfCcZ(mir->dalvikInsn.opcode);
-              mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
-              bool const_form = (SelectKind(if_true) == kSelectConst);
-              if ((SelectKind(if_true) == kSelectMove)) {
-                if (IsConst(if_true->ssa_rep->uses[0]) &&
-                    IsConst(if_false->ssa_rep->uses[0])) {
-                    const_form = true;
-                    if_true->dalvikInsn.vB = ConstantValue(if_true->ssa_rep->uses[0]);
-                    if_false->dalvikInsn.vB = ConstantValue(if_false->ssa_rep->uses[0]);
-                }
-              }
-              if (const_form) {
-                /*
-                 * TODO: If both constants are the same value, then instead of generating
-                 * a select, we should simply generate a const bytecode. This should be
-                 * considered after inlining which can lead to CFG of this form.
-                 */
-                // "true" set val in vB
-                mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
-                // "false" set val in vC
-                mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
-              } else {
-                DCHECK_EQ(SelectKind(if_true), kSelectMove);
-                DCHECK_EQ(SelectKind(if_false), kSelectMove);
-                int32_t* src_ssa = arena_->AllocArray<int32_t>(3, kArenaAllocDFInfo);
-                src_ssa[0] = mir->ssa_rep->uses[0];
-                src_ssa[1] = if_true->ssa_rep->uses[0];
-                src_ssa[2] = if_false->ssa_rep->uses[0];
-                mir->ssa_rep->uses = src_ssa;
-                mir->ssa_rep->num_uses = 3;
-              }
-              AllocateSSADefData(mir, 1);
-              /*
-               * There is usually a Phi node in the join block for our two cases.  If the
-               * Phi node only contains our two cases as input, we will use the result
-               * SSA name of the Phi node as our select result and delete the Phi.  If
-               * the Phi node has more than two operands, we will arbitrarily use the SSA
-               * name of the "false" path, delete the SSA name of the "true" path from the
-               * Phi node (and fix up the incoming arc list).
-               */
-              if (phi->ssa_rep->num_uses == 2) {
-                mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
-                // Rather than changing the Phi to kMirOpNop, remove it completely.
-                // This avoids leaving other Phis after kMirOpNop (i.e. a non-Phi) insn.
-                tk_tk->RemoveMIR(phi);
-                int dead_false_def = if_false->ssa_rep->defs[0];
-                raw_use_counts_[dead_false_def] = use_counts_[dead_false_def] = 0;
-              } else {
-                int live_def = if_false->ssa_rep->defs[0];
-                mir->ssa_rep->defs[0] = live_def;
-              }
-              int dead_true_def = if_true->ssa_rep->defs[0];
-              raw_use_counts_[dead_true_def] = use_counts_[dead_true_def] = 0;
-              // Update ending vreg->sreg map for GC maps generation.
-              int def_vreg = SRegToVReg(mir->ssa_rep->defs[0]);
-              bb->data_flow_info->vreg_to_ssa_map_exit[def_vreg] = mir->ssa_rep->defs[0];
-              // We want to remove ft and tk and link bb directly to ft_ft. First, we need
-              // to update all Phi inputs correctly with UpdatePredecessor(ft->id, bb->id)
-              // since the live_def above comes from ft->first_mir_insn (if_false).
-              DCHECK(if_false == ft->first_mir_insn);
-              ft_ft->UpdatePredecessor(ft->id, bb->id);
-              // Correct the rest of the links between bb, ft and ft_ft.
-              ft->ErasePredecessor(bb->id);
-              ft->fall_through = NullBasicBlockId;
-              bb->fall_through = ft_ft->id;
-              // Now we can kill tk and ft.
-              tk->Kill(this);
-              ft->Kill(this);
-              // NOTE: DFS order, domination info and topological order are still usable
-              // despite the newly dead blocks.
-            }
-          }
-        }
-      }
-    }
-    bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) :
-        nullptr;
-  }
-  if (use_lvn && UNLIKELY(!global_valnum->Good())) {
-    LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-  }
-
-  return true;
-}
-
-/* Collect stats on number of checks removed */
-void MIRGraph::CountChecks(class BasicBlock* bb) {
-  if (bb->data_flow_info != nullptr) {
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      if (mir->ssa_rep == nullptr) {
-        continue;
-      }
-      uint64_t df_attributes = GetDataFlowAttributes(mir);
-      if (df_attributes & DF_HAS_NULL_CHKS) {
-        checkstats_->null_checks++;
-        if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
-          checkstats_->null_checks_eliminated++;
-        }
-      }
-      if (df_attributes & DF_HAS_RANGE_CHKS) {
-        checkstats_->range_checks++;
-        if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
-          checkstats_->range_checks_eliminated++;
-        }
-      }
-    }
-  }
-}
-
-/* Try to make common case the fallthrough path. */
-bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
-  // TODO: For now, just looking for direct throws.  Consider generalizing for profile feedback.
-  if (!bb->explicit_throw) {
-    return false;
-  }
-
-  // If we visited it, we are done.
-  if (bb->visited) {
-    return false;
-  }
-  bb->visited = true;
-
-  BasicBlock* walker = bb;
-  while (true) {
-    // Check termination conditions.
-    if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
-      break;
-    }
-    DCHECK(!walker->predecessors.empty());
-    BasicBlock* prev = GetBasicBlock(walker->predecessors[0]);
-
-    // If we visited the predecessor, we are done.
-    if (prev->visited) {
-      return false;
-    }
-    prev->visited = true;
-
-    if (prev->conditional_branch) {
-      if (GetBasicBlock(prev->fall_through) == walker) {
-        // Already done - return.
-        break;
-      }
-      DCHECK_EQ(walker, GetBasicBlock(prev->taken));
-      // Got one.  Flip it and exit.
-      Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
-      switch (opcode) {
-        case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
-        case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
-        case Instruction::IF_LT: opcode = Instruction::IF_GE; break;
-        case Instruction::IF_GE: opcode = Instruction::IF_LT; break;
-        case Instruction::IF_GT: opcode = Instruction::IF_LE; break;
-        case Instruction::IF_LE: opcode = Instruction::IF_GT; break;
-        case Instruction::IF_EQZ: opcode = Instruction::IF_NEZ; break;
-        case Instruction::IF_NEZ: opcode = Instruction::IF_EQZ; break;
-        case Instruction::IF_LTZ: opcode = Instruction::IF_GEZ; break;
-        case Instruction::IF_GEZ: opcode = Instruction::IF_LTZ; break;
-        case Instruction::IF_GTZ: opcode = Instruction::IF_LEZ; break;
-        case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
-        default: LOG(FATAL) << "Unexpected opcode " << opcode;
-      }
-      prev->last_mir_insn->dalvikInsn.opcode = opcode;
-      BasicBlockId t_bb = prev->taken;
-      prev->taken = prev->fall_through;
-      prev->fall_through = t_bb;
-      break;
-    }
-    walker = prev;
-  }
-  return false;
-}
-
-/* Combine any basic blocks terminated by instructions that we now know can't throw */
-void MIRGraph::CombineBlocks(class BasicBlock* bb) {
-  // Loop here to allow combining a sequence of blocks
-  while ((bb->block_type == kDalvikByteCode) &&
-      (bb->last_mir_insn != nullptr) &&
-      (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) == kMirOpCheck)) {
-    MIR* mir = bb->last_mir_insn;
-    DCHECK(bb->first_mir_insn !=  nullptr);
-
-    // Get the paired insn and check if it can still throw.
-    MIR* throw_insn = mir->meta.throw_insn;
-    if (CanThrow(throw_insn)) {
-      break;
-    }
-
-    // OK - got one.  Combine
-    BasicBlock* bb_next = GetBasicBlock(bb->fall_through);
-    DCHECK(!bb_next->catch_entry);
-    DCHECK_EQ(bb_next->predecessors.size(), 1u);
-
-    // Now move instructions from bb_next to bb. Start off with doing a sanity check
-    // that kMirOpCheck's throw instruction is first one in the bb_next.
-    DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
-    // Now move all instructions (throw instruction to last one) from bb_next to bb.
-    MIR* last_to_move = bb_next->last_mir_insn;
-    bb_next->RemoveMIRList(throw_insn, last_to_move);
-    bb->InsertMIRListAfter(bb->last_mir_insn, throw_insn, last_to_move);
-    // The kMirOpCheck instruction is not needed anymore.
-    mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-    bb->RemoveMIR(mir);
-
-    // Before we overwrite successors, remove their predecessor links to bb.
-    bb_next->ErasePredecessor(bb->id);
-    if (bb->taken != NullBasicBlockId) {
-      DCHECK_EQ(bb->successor_block_list_type, kNotUsed);
-      BasicBlock* bb_taken = GetBasicBlock(bb->taken);
-      // bb->taken will be overwritten below.
-      DCHECK_EQ(bb_taken->block_type, kExceptionHandling);
-      DCHECK_EQ(bb_taken->predecessors.size(), 1u);
-      DCHECK_EQ(bb_taken->predecessors[0], bb->id);
-      bb_taken->predecessors.clear();
-      bb_taken->block_type = kDead;
-      DCHECK(bb_taken->data_flow_info == nullptr);
-    } else {
-      DCHECK_EQ(bb->successor_block_list_type, kCatch);
-      for (SuccessorBlockInfo* succ_info : bb->successor_blocks) {
-        if (succ_info->block != NullBasicBlockId) {
-          BasicBlock* succ_bb = GetBasicBlock(succ_info->block);
-          DCHECK(succ_bb->catch_entry);
-          succ_bb->ErasePredecessor(bb->id);
-        }
-      }
-    }
-    // Use the successor info from the next block
-    bb->successor_block_list_type = bb_next->successor_block_list_type;
-    bb->successor_blocks.swap(bb_next->successor_blocks);  // Swap instead of copying.
-    bb_next->successor_block_list_type = kNotUsed;
-    // Use the ending block linkage from the next block
-    bb->fall_through = bb_next->fall_through;
-    bb_next->fall_through = NullBasicBlockId;
-    bb->taken = bb_next->taken;
-    bb_next->taken = NullBasicBlockId;
-    /*
-     * If lower-half of pair of blocks to combine contained
-     * a return or a conditional branch or an explicit throw,
-     * move the flag to the newly combined block.
-     */
-    bb->terminated_by_return = bb_next->terminated_by_return;
-    bb->conditional_branch = bb_next->conditional_branch;
-    bb->explicit_throw = bb_next->explicit_throw;
-    // Merge the use_lvn flag.
-    bb->use_lvn |= bb_next->use_lvn;
-
-    // Kill the unused block.
-    bb_next->data_flow_info = nullptr;
-
-    /*
-     * NOTE: we aren't updating all dataflow info here.  Should either make sure this pass
-     * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
-     * NOTE: GVN uses bb->data_flow_info->live_in_v which is unaffected by the block merge.
-     */
-
-    // Kill bb_next and remap now-dead id to parent.
-    bb_next->block_type = kDead;
-    bb_next->data_flow_info = nullptr;  // Must be null for dead blocks. (Relied on by the GVN.)
-    block_id_map_.Overwrite(bb_next->id, bb->id);
-    // Update predecessors in children.
-    ChildBlockIterator iter(bb, this);
-    for (BasicBlock* child = iter.Next(); child != nullptr; child = iter.Next()) {
-      child->UpdatePredecessor(bb_next->id, bb->id);
-    }
-
-    // DFS orders, domination and topological order are not up to date anymore.
-    dfs_orders_up_to_date_ = false;
-    domination_up_to_date_ = false;
-    topological_order_up_to_date_ = false;
-
-    // Now, loop back and see if we can keep going
-  }
-}
-
-bool MIRGraph::EliminateNullChecksGate() {
-  if ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
-      (merged_df_flags_ & DF_HAS_NULL_CHKS) == 0) {
-    return false;
-  }
-
-  DCHECK(temp_scoped_alloc_.get() == nullptr);
-  temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-  temp_.nce.num_vregs = GetNumOfCodeAndTempVRs();
-  temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
-      temp_scoped_alloc_.get(), temp_.nce.num_vregs, false);
-  temp_.nce.ending_vregs_to_check_matrix =
-      temp_scoped_alloc_->AllocArray<ArenaBitVector*>(GetNumBlocks(), kArenaAllocMisc);
-  std::fill_n(temp_.nce.ending_vregs_to_check_matrix, GetNumBlocks(), nullptr);
-
-  // reset MIR_MARK
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      mir->optimization_flags &= ~MIR_MARK;
-    }
-  }
-
-  return true;
-}
-
-/*
- * Eliminate unnecessary null checks for a basic block.
- */
-bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
-  if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock) {
-    // Ignore the kExitBlock as well.
-    DCHECK(bb->first_mir_insn == nullptr);
-    return false;
-  }
-
-  ArenaBitVector* vregs_to_check = temp_.nce.work_vregs_to_check;
-  /*
-   * Set initial state. Catch blocks don't need any special treatment.
-   */
-  if (bb->block_type == kEntryBlock) {
-    vregs_to_check->ClearAllBits();
-    // Assume all ins are objects.
-    for (uint16_t in_reg = GetFirstInVR();
-         in_reg < GetNumOfCodeVRs(); in_reg++) {
-      vregs_to_check->SetBit(in_reg);
-    }
-    if ((cu_->access_flags & kAccStatic) == 0) {
-      // If non-static method, mark "this" as non-null.
-      int this_reg = GetFirstInVR();
-      vregs_to_check->ClearBit(this_reg);
-    }
-  } else {
-    DCHECK_EQ(bb->block_type, kDalvikByteCode);
-    // Starting state is union of all incoming arcs.
-    bool copied_first = false;
-    for (BasicBlockId pred_id : bb->predecessors) {
-      if (temp_.nce.ending_vregs_to_check_matrix[pred_id] == nullptr) {
-        continue;
-      }
-      BasicBlock* pred_bb = GetBasicBlock(pred_id);
-      DCHECK(pred_bb != nullptr);
-      MIR* null_check_insn = nullptr;
-      // Check to see if predecessor had an explicit null-check.
-      if (pred_bb->BranchesToSuccessorOnlyIfNotZero(bb->id)) {
-        // Remember the null check insn if there's no other predecessor requiring null check.
-        if (!copied_first || !vregs_to_check->IsBitSet(pred_bb->last_mir_insn->dalvikInsn.vA)) {
-          null_check_insn = pred_bb->last_mir_insn;
-          DCHECK(null_check_insn != nullptr);
-        }
-      }
-      if (!copied_first) {
-        copied_first = true;
-        vregs_to_check->Copy(temp_.nce.ending_vregs_to_check_matrix[pred_id]);
-      } else {
-        vregs_to_check->Union(temp_.nce.ending_vregs_to_check_matrix[pred_id]);
-      }
-      if (null_check_insn != nullptr) {
-        vregs_to_check->ClearBit(null_check_insn->dalvikInsn.vA);
-      }
-    }
-    DCHECK(copied_first);  // At least one predecessor must have been processed before this bb.
-  }
-  // At this point, vregs_to_check shows which sregs have an object definition with
-  // no intervening uses.
-
-  // Walk through the instruction in the block, updating as necessary
-  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    uint64_t df_attributes = GetDataFlowAttributes(mir);
-
-    if ((df_attributes & DF_NULL_TRANSFER_N) != 0u) {
-      // The algorithm was written in a phi agnostic way.
-      continue;
-    }
-
-    // Might need a null check?
-    if (df_attributes & DF_HAS_NULL_CHKS) {
-      int src_vreg;
-      if (df_attributes & DF_NULL_CHK_OUT0) {
-        DCHECK_NE(df_attributes & DF_IS_INVOKE, 0u);
-        src_vreg = mir->dalvikInsn.vC;
-      } else if (df_attributes & DF_NULL_CHK_B) {
-        DCHECK_NE(df_attributes & DF_REF_B, 0u);
-        src_vreg = mir->dalvikInsn.vB;
-      } else {
-        DCHECK_NE(df_attributes & DF_NULL_CHK_A, 0u);
-        DCHECK_NE(df_attributes & DF_REF_A, 0u);
-        src_vreg = mir->dalvikInsn.vA;
-      }
-      if (!vregs_to_check->IsBitSet(src_vreg)) {
-        // Eliminate the null check.
-        mir->optimization_flags |= MIR_MARK;
-      } else {
-        // Do the null check.
-        mir->optimization_flags &= ~MIR_MARK;
-        // Mark src_vreg as null-checked.
-        vregs_to_check->ClearBit(src_vreg);
-      }
-    }
-
-    if ((df_attributes & DF_A_WIDE) ||
-        (df_attributes & (DF_REF_A | DF_SETS_CONST | DF_NULL_TRANSFER)) == 0) {
-      continue;
-    }
-
-    /*
-     * First, mark all object definitions as requiring null check.
-     * Note: we can't tell if a CONST definition might be used as an object, so treat
-     * them all as object definitions.
-     */
-    if ((df_attributes & (DF_DA | DF_REF_A)) == (DF_DA | DF_REF_A) ||
-        (df_attributes & DF_SETS_CONST))  {
-      vregs_to_check->SetBit(mir->dalvikInsn.vA);
-    }
-
-    // Then, remove mark from all object definitions we know are non-null.
-    if (df_attributes & DF_NON_NULL_DST) {
-      // Mark target of NEW* as non-null
-      DCHECK_NE(df_attributes & DF_REF_A, 0u);
-      vregs_to_check->ClearBit(mir->dalvikInsn.vA);
-    }
-
-    // Mark non-null returns from invoke-style NEW*
-    if (df_attributes & DF_NON_NULL_RET) {
-      MIR* next_mir = mir->next;
-      // Next should be an MOVE_RESULT_OBJECT
-      if (UNLIKELY(next_mir == nullptr)) {
-        // The MethodVerifier makes sure there's no MOVE_RESULT at the catch entry or branch
-        // target, so the MOVE_RESULT cannot be broken away into another block.
-        LOG(WARNING) << "Unexpected end of block following new";
-      } else if (UNLIKELY(next_mir->dalvikInsn.opcode != Instruction::MOVE_RESULT_OBJECT)) {
-        LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
-      } else {
-        // Mark as null checked.
-        vregs_to_check->ClearBit(next_mir->dalvikInsn.vA);
-      }
-    }
-
-    // Propagate null check state on register copies.
-    if (df_attributes & DF_NULL_TRANSFER_0) {
-      DCHECK_EQ(df_attributes | ~(DF_DA | DF_REF_A | DF_UB | DF_REF_B), static_cast<uint64_t>(-1));
-      if (vregs_to_check->IsBitSet(mir->dalvikInsn.vB)) {
-        vregs_to_check->SetBit(mir->dalvikInsn.vA);
-      } else {
-        vregs_to_check->ClearBit(mir->dalvikInsn.vA);
-      }
-    }
-  }
-
-  // Did anything change?
-  bool nce_changed = false;
-  ArenaBitVector* old_ending_ssa_regs_to_check = temp_.nce.ending_vregs_to_check_matrix[bb->id];
-  if (old_ending_ssa_regs_to_check == nullptr) {
-    DCHECK(temp_scoped_alloc_.get() != nullptr);
-    nce_changed = vregs_to_check->GetHighestBitSet() != -1;
-    temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check;
-    // Create a new vregs_to_check for next BB.
-    temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
-        temp_scoped_alloc_.get(), temp_.nce.num_vregs, false);
-  } else if (!vregs_to_check->SameBitsSet(old_ending_ssa_regs_to_check)) {
-    nce_changed = true;
-    temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check;
-    temp_.nce.work_vregs_to_check = old_ending_ssa_regs_to_check;  // Reuse for next BB.
-  }
-  return nce_changed;
-}
-
-void MIRGraph::EliminateNullChecksEnd() {
-  // Clean up temporaries.
-  temp_.nce.num_vregs = 0u;
-  temp_.nce.work_vregs_to_check = nullptr;
-  temp_.nce.ending_vregs_to_check_matrix = nullptr;
-  DCHECK(temp_scoped_alloc_.get() != nullptr);
-  temp_scoped_alloc_.reset();
-
-  // converge MIR_MARK with MIR_IGNORE_NULL_CHECK
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
-      static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
-      uint16_t mirMarkAdjustedToIgnoreNullCheck =
-          (mir->optimization_flags & MIR_MARK) >> kMarkToIgnoreNullCheckShift;
-      mir->optimization_flags |= mirMarkAdjustedToIgnoreNullCheck;
-    }
-  }
-}
-
-void MIRGraph::InferTypesStart() {
-  DCHECK(temp_scoped_alloc_ != nullptr);
-  temp_.ssa.ti = new (temp_scoped_alloc_.get()) TypeInference(this, temp_scoped_alloc_.get());
-}
-
-/*
- * Perform type and size inference for a basic block.
- */
-bool MIRGraph::InferTypes(BasicBlock* bb) {
-  if (bb->data_flow_info == nullptr) return false;
-
-  DCHECK(temp_.ssa.ti != nullptr);
-  return temp_.ssa.ti->Apply(bb);
-}
-
-void MIRGraph::InferTypesEnd() {
-  DCHECK(temp_.ssa.ti != nullptr);
-  temp_.ssa.ti->Finish();
-  delete temp_.ssa.ti;
-  temp_.ssa.ti = nullptr;
-}
-
-bool MIRGraph::EliminateClassInitChecksGate() {
-  if ((cu_->disable_opt & (1 << kClassInitCheckElimination)) != 0 ||
-      (merged_df_flags_ & DF_CLINIT) == 0) {
-    return false;
-  }
-
-  DCHECK(temp_scoped_alloc_.get() == nullptr);
-  temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-
-  // Each insn we use here has at least 2 code units, offset/2 will be a unique index.
-  const size_t end = (GetNumDalvikInsns() + 1u) / 2u;
-  temp_.cice.indexes = temp_scoped_alloc_->AllocArray<uint16_t>(end, kArenaAllocGrowableArray);
-  std::fill_n(temp_.cice.indexes, end, 0xffffu);
-
-  uint32_t unique_class_count = 0u;
-  {
-    // Get unique_class_count and store indexes in temp_insn_data_ using a map on a nested
-    // ScopedArenaAllocator.
-
-    // Embed the map value in the entry to save space.
-    struct MapEntry {
-      // Map key: the class identified by the declaring dex file and type index.
-      const DexFile* declaring_dex_file;
-      uint16_t declaring_class_idx;
-      // Map value: index into bit vectors of classes requiring initialization checks.
-      uint16_t index;
-    };
-    struct MapEntryComparator {
-      bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
-        if (lhs.declaring_class_idx != rhs.declaring_class_idx) {
-          return lhs.declaring_class_idx < rhs.declaring_class_idx;
-        }
-        return lhs.declaring_dex_file < rhs.declaring_dex_file;
-      }
-    };
-
-    ScopedArenaAllocator allocator(&cu_->arena_stack);
-    ScopedArenaSet<MapEntry, MapEntryComparator> class_to_index_map(MapEntryComparator(),
-                                                                    allocator.Adapter());
-
-    // First, find all SGET/SPUTs that may need class initialization checks, record INVOKE_STATICs.
-    AllNodesIterator iter(this);
-    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-      if (bb->block_type == kDalvikByteCode) {
-        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-          if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
-            const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
-            if (!field_info.IsReferrersClass()) {
-              DCHECK_LT(class_to_index_map.size(), 0xffffu);
-              MapEntry entry = {
-                  // Treat unresolved fields as if each had its own class.
-                  field_info.IsResolved() ? field_info.DeclaringDexFile()
-                                          : nullptr,
-                  field_info.IsResolved() ? field_info.DeclaringClassIndex()
-                                          : field_info.FieldIndex(),
-                  static_cast<uint16_t>(class_to_index_map.size())
-              };
-              uint16_t index = class_to_index_map.insert(entry).first->index;
-              // Using offset/2 for index into temp_.cice.indexes.
-              temp_.cice.indexes[mir->offset / 2u] = index;
-            }
-          } else if (IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
-            const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir);
-            DCHECK(method_info.IsStatic());
-            if (method_info.FastPath() && !method_info.IsReferrersClass()) {
-              MapEntry entry = {
-                  method_info.DeclaringDexFile(),
-                  method_info.DeclaringClassIndex(),
-                  static_cast<uint16_t>(class_to_index_map.size())
-              };
-              uint16_t index = class_to_index_map.insert(entry).first->index;
-              // Using offset/2 for index into temp_.cice.indexes.
-              temp_.cice.indexes[mir->offset / 2u] = index;
-            }
-          }
-        }
-      }
-    }
-    unique_class_count = static_cast<uint32_t>(class_to_index_map.size());
-  }
-
-  if (unique_class_count == 0u) {
-    // All SGET/SPUTs refer to initialized classes. Nothing to do.
-    temp_.cice.indexes = nullptr;
-    temp_scoped_alloc_.reset();
-    return false;
-  }
-
-  // 2 bits for each class: is class initialized, is class in dex cache.
-  temp_.cice.num_class_bits = 2u * unique_class_count;
-  temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
-      temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false);
-  temp_.cice.ending_classes_to_check_matrix =
-      temp_scoped_alloc_->AllocArray<ArenaBitVector*>(GetNumBlocks(), kArenaAllocMisc);
-  std::fill_n(temp_.cice.ending_classes_to_check_matrix, GetNumBlocks(), nullptr);
-  DCHECK_GT(temp_.cice.num_class_bits, 0u);
-  return true;
-}
-
-/*
- * Eliminate unnecessary class initialization checks for a basic block.
- */
-bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
-  DCHECK_EQ((cu_->disable_opt & (1 << kClassInitCheckElimination)), 0u);
-  if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock) {
-    // Ignore the kExitBlock as well.
-    DCHECK(bb->first_mir_insn == nullptr);
-    return false;
-  }
-
-  /*
-   * Set initial state.  Catch blocks don't need any special treatment.
-   */
-  ArenaBitVector* classes_to_check = temp_.cice.work_classes_to_check;
-  DCHECK(classes_to_check != nullptr);
-  if (bb->block_type == kEntryBlock) {
-    classes_to_check->SetInitialBits(temp_.cice.num_class_bits);
-  } else {
-    // Starting state is union of all incoming arcs.
-    bool copied_first = false;
-    for (BasicBlockId pred_id : bb->predecessors) {
-      if (temp_.cice.ending_classes_to_check_matrix[pred_id] == nullptr) {
-        continue;
-      }
-      if (!copied_first) {
-        copied_first = true;
-        classes_to_check->Copy(temp_.cice.ending_classes_to_check_matrix[pred_id]);
-      } else {
-        classes_to_check->Union(temp_.cice.ending_classes_to_check_matrix[pred_id]);
-      }
-    }
-    DCHECK(copied_first);  // At least one predecessor must have been processed before this bb.
-  }
-  // At this point, classes_to_check shows which classes need clinit checks.
-
-  // Walk through the instruction in the block, updating as necessary
-  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    uint16_t index = temp_.cice.indexes[mir->offset / 2u];
-    if (index != 0xffffu) {
-      bool check_initialization = false;
-      bool check_dex_cache = false;
-
-      // NOTE: index != 0xffff does not guarantee that this is an SGET/SPUT/INVOKE_STATIC.
-      // Dex instructions with width 1 can have the same offset/2.
-
-      if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
-        check_initialization = true;
-        check_dex_cache = true;
-      } else if (IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
-        check_initialization = true;
-        // NOTE: INVOKE_STATIC doesn't guarantee that the type will be in the dex cache.
-      }
-
-      if (check_dex_cache) {
-        uint32_t check_dex_cache_index = 2u * index + 1u;
-        if (!classes_to_check->IsBitSet(check_dex_cache_index)) {
-          // Eliminate the class init check.
-          mir->optimization_flags |= MIR_CLASS_IS_IN_DEX_CACHE;
-        } else {
-          // Do the class init check.
-          mir->optimization_flags &= ~MIR_CLASS_IS_IN_DEX_CACHE;
-        }
-        classes_to_check->ClearBit(check_dex_cache_index);
-      }
-      if (check_initialization) {
-        uint32_t check_clinit_index = 2u * index;
-        if (!classes_to_check->IsBitSet(check_clinit_index)) {
-          // Eliminate the class init check.
-          mir->optimization_flags |= MIR_CLASS_IS_INITIALIZED;
-        } else {
-          // Do the class init check.
-          mir->optimization_flags &= ~MIR_CLASS_IS_INITIALIZED;
-        }
-        // Mark the class as initialized.
-        classes_to_check->ClearBit(check_clinit_index);
-      }
-    }
-  }
-
-  // Did anything change?
-  bool changed = false;
-  ArenaBitVector* old_ending_classes_to_check = temp_.cice.ending_classes_to_check_matrix[bb->id];
-  if (old_ending_classes_to_check == nullptr) {
-    DCHECK(temp_scoped_alloc_.get() != nullptr);
-    changed = classes_to_check->GetHighestBitSet() != -1;
-    temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check;
-    // Create a new classes_to_check for next BB.
-    temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
-        temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false);
-  } else if (!classes_to_check->Equal(old_ending_classes_to_check)) {
-    changed = true;
-    temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check;
-    temp_.cice.work_classes_to_check = old_ending_classes_to_check;  // Reuse for next BB.
-  }
-  return changed;
-}
-
-void MIRGraph::EliminateClassInitChecksEnd() {
-  // Clean up temporaries.
-  temp_.cice.num_class_bits = 0u;
-  temp_.cice.work_classes_to_check = nullptr;
-  temp_.cice.ending_classes_to_check_matrix = nullptr;
-  DCHECK(temp_.cice.indexes != nullptr);
-  temp_.cice.indexes = nullptr;
-  DCHECK(temp_scoped_alloc_.get() != nullptr);
-  temp_scoped_alloc_.reset();
-}
-
-static void DisableGVNDependentOptimizations(CompilationUnit* cu) {
-  cu->disable_opt |= (1u << kGvnDeadCodeElimination);
-}
-
-bool MIRGraph::ApplyGlobalValueNumberingGate() {
-  if (GlobalValueNumbering::Skip(cu_)) {
-    DisableGVNDependentOptimizations(cu_);
-    return false;
-  }
-
-  DCHECK(temp_scoped_alloc_ == nullptr);
-  temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-  temp_.gvn.ifield_ids =
-      GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), ifield_lowering_infos_);
-  temp_.gvn.sfield_ids =
-      GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
-  DCHECK(temp_.gvn.gvn == nullptr);
-  temp_.gvn.gvn = new (temp_scoped_alloc_.get()) GlobalValueNumbering(
-      cu_, temp_scoped_alloc_.get(), GlobalValueNumbering::kModeGvn);
-  return true;
-}
-
-bool MIRGraph::ApplyGlobalValueNumbering(BasicBlock* bb) {
-  DCHECK(temp_.gvn.gvn != nullptr);
-  LocalValueNumbering* lvn = temp_.gvn.gvn->PrepareBasicBlock(bb);
-  if (lvn != nullptr) {
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      lvn->GetValueNumber(mir);
-    }
-  }
-  bool change = (lvn != nullptr) && temp_.gvn.gvn->FinishBasicBlock(bb);
-  return change;
-}
-
-void MIRGraph::ApplyGlobalValueNumberingEnd() {
-  // Perform modifications.
-  DCHECK(temp_.gvn.gvn != nullptr);
-  if (temp_.gvn.gvn->Good()) {
-    temp_.gvn.gvn->StartPostProcessing();
-    if (max_nested_loops_ != 0u) {
-      TopologicalSortIterator iter(this);
-      for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-        ScopedArenaAllocator allocator(&cu_->arena_stack);  // Reclaim memory after each LVN.
-        LocalValueNumbering* lvn = temp_.gvn.gvn->PrepareBasicBlock(bb, &allocator);
-        if (lvn != nullptr) {
-          for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-            lvn->GetValueNumber(mir);
-          }
-          bool change = temp_.gvn.gvn->FinishBasicBlock(bb);
-          DCHECK(!change) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-        }
-      }
-    }
-    // GVN was successful, running the LVN would be useless.
-    cu_->disable_opt |= (1u << kLocalValueNumbering);
-  } else {
-    LOG(WARNING) << "GVN failed for " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-    DisableGVNDependentOptimizations(cu_);
-  }
-}
-
-bool MIRGraph::EliminateDeadCodeGate() {
-  if ((cu_->disable_opt & (1 << kGvnDeadCodeElimination)) != 0 || temp_.gvn.gvn == nullptr) {
-    return false;
-  }
-  DCHECK(temp_scoped_alloc_ != nullptr);
-  temp_.gvn.dce = new (temp_scoped_alloc_.get()) GvnDeadCodeElimination(temp_.gvn.gvn,
-                                                                        temp_scoped_alloc_.get());
-  return true;
-}
-
-bool MIRGraph::EliminateDeadCode(BasicBlock* bb) {
-  DCHECK(temp_scoped_alloc_ != nullptr);
-  DCHECK(temp_.gvn.gvn != nullptr);
-  if (bb->block_type != kDalvikByteCode) {
-    return false;
-  }
-  DCHECK(temp_.gvn.dce != nullptr);
-  temp_.gvn.dce->Apply(bb);
-  return false;  // No need to repeat.
-}
-
-void MIRGraph::EliminateDeadCodeEnd() {
-  if (kIsDebugBuild) {
-    // DCE can make some previously dead vregs alive again. Make sure the obsolete
-    // live-in information is not used anymore.
-    AllNodesIterator iter(this);
-    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-      if (bb->data_flow_info != nullptr) {
-        bb->data_flow_info->live_in_v = nullptr;
-      }
-    }
-  }
-}
-
-void MIRGraph::GlobalValueNumberingCleanup() {
-  // If the GVN didn't run, these pointers should be null and everything is effectively no-op.
-  delete temp_.gvn.dce;
-  temp_.gvn.dce = nullptr;
-  delete temp_.gvn.gvn;
-  temp_.gvn.gvn = nullptr;
-  temp_.gvn.ifield_ids = nullptr;
-  temp_.gvn.sfield_ids = nullptr;
-  temp_scoped_alloc_.reset();
-}
-
-void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput) {
-  uint32_t method_index = invoke->meta.method_lowering_info;
-  if (temp_.smi.processed_indexes->IsBitSet(method_index)) {
-    iget_or_iput->meta.ifield_lowering_info = temp_.smi.lowering_infos[method_index];
-    DCHECK_EQ(field_idx, GetIFieldLoweringInfo(iget_or_iput).FieldIndex());
-    return;
-  }
-
-  const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(invoke);
-  MethodReference target = method_info.GetTargetMethod();
-  ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<1> hs(soa.Self());
-  Handle<mirror::DexCache> dex_cache(
-      hs.NewHandle(cu_->class_linker->FindDexCache(hs.Self(), *target.dex_file)));
-  DexCompilationUnit inlined_unit(cu_,
-                                  cu_->class_loader,
-                                  cu_->class_linker,
-                                  *target.dex_file,
-                                  nullptr /* code_item not used */,
-                                  0u /* class_def_idx not used */,
-                                  target.dex_method_index,
-                                  0u /* access_flags not used */,
-                                  nullptr /* verified_method not used */,
-                                  dex_cache);
-  DexMemAccessType type = IGetOrIPutMemAccessType(iget_or_iput->dalvikInsn.opcode);
-  MirIFieldLoweringInfo inlined_field_info(field_idx, type, false);
-  MirIFieldLoweringInfo::Resolve(soa, cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
-  DCHECK(inlined_field_info.IsResolved());
-
-  uint32_t field_info_index = ifield_lowering_infos_.size();
-  ifield_lowering_infos_.push_back(inlined_field_info);
-  temp_.smi.processed_indexes->SetBit(method_index);
-  temp_.smi.lowering_infos[method_index] = field_info_index;
-  iget_or_iput->meta.ifield_lowering_info = field_info_index;
-}
-
-bool MIRGraph::InlineSpecialMethodsGate() {
-  if ((cu_->disable_opt & (1 << kSuppressMethodInlining)) != 0 ||
-      method_lowering_infos_.size() == 0u) {
-    return false;
-  }
-  if (cu_->compiler_driver->GetMethodInlinerMap() == nullptr) {
-    // This isn't the Quick compiler.
-    return false;
-  }
-  return true;
-}
-
-void MIRGraph::InlineSpecialMethodsStart() {
-  // Prepare for inlining getters/setters. Since we're inlining at most 1 IGET/IPUT from
-  // each INVOKE, we can index the data by the MIR::meta::method_lowering_info index.
-
-  DCHECK(temp_scoped_alloc_.get() == nullptr);
-  temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-  temp_.smi.num_indexes = method_lowering_infos_.size();
-  temp_.smi.processed_indexes = new (temp_scoped_alloc_.get()) ArenaBitVector(
-      temp_scoped_alloc_.get(), temp_.smi.num_indexes, false);
-  temp_.smi.processed_indexes->ClearAllBits();
-  temp_.smi.lowering_infos =
-      temp_scoped_alloc_->AllocArray<uint16_t>(temp_.smi.num_indexes, kArenaAllocGrowableArray);
-}
-
-void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
-  if (bb->block_type != kDalvikByteCode) {
-    return;
-  }
-  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
-      continue;
-    }
-    if (!(mir->dalvikInsn.FlagsOf() & Instruction::kInvoke)) {
-      continue;
-    }
-    const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir);
-    if (!method_info.FastPath() || !method_info.IsSpecial()) {
-      continue;
-    }
-
-    InvokeType sharp_type = method_info.GetSharpType();
-    if ((sharp_type != kDirect) && (sharp_type != kStatic)) {
-      continue;
-    }
-
-    if (sharp_type == kStatic) {
-      bool needs_clinit = !method_info.IsClassInitialized() &&
-          ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0);
-      if (needs_clinit) {
-        continue;
-      }
-    }
-
-    DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
-    MethodReference target = method_info.GetTargetMethod();
-    if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(target.dex_file)
-            ->GenInline(this, bb, mir, target.dex_method_index)) {
-      if (cu_->verbose || cu_->print_pass) {
-        LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " ("
-            << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index,
-                                                            *target.dex_file)
-            << "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
-            << "\" @0x" << std::hex << mir->offset;
-      }
-    }
-  }
-}
-
-void MIRGraph::InlineSpecialMethodsEnd() {
-  // Clean up temporaries.
-  DCHECK(temp_.smi.lowering_infos != nullptr);
-  temp_.smi.lowering_infos = nullptr;
-  temp_.smi.num_indexes = 0u;
-  DCHECK(temp_.smi.processed_indexes != nullptr);
-  temp_.smi.processed_indexes = nullptr;
-  DCHECK(temp_scoped_alloc_.get() != nullptr);
-  temp_scoped_alloc_.reset();
-}
-
-void MIRGraph::DumpCheckStats() {
-  Checkstats* stats =
-      static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
-  checkstats_ = stats;
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    CountChecks(bb);
-  }
-  if (stats->null_checks > 0) {
-    float eliminated = static_cast<float>(stats->null_checks_eliminated);
-    float checks = static_cast<float>(stats->null_checks);
-    LOG(INFO) << "Null Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
-              << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
-              << (eliminated/checks) * 100.0 << "%";
-    }
-  if (stats->range_checks > 0) {
-    float eliminated = static_cast<float>(stats->range_checks_eliminated);
-    float checks = static_cast<float>(stats->range_checks);
-    LOG(INFO) << "Range Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
-              << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
-              << (eliminated/checks) * 100.0 << "%";
-  }
-}
-
-bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
-  if (bb->visited) return false;
-  if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
-      || (bb->block_type == kExitBlock))) {
-    // Ignore special blocks
-    bb->visited = true;
-    return false;
-  }
-  // Must be head of extended basic block.
-  BasicBlock* start_bb = bb;
-  extended_basic_blocks_.push_back(bb->id);
-  bool terminated_by_return = false;
-  bool do_local_value_numbering = false;
-  // Visit blocks strictly dominated by this head.
-  while (bb != nullptr) {
-    bb->visited = true;
-    terminated_by_return |= bb->terminated_by_return;
-    do_local_value_numbering |= bb->use_lvn;
-    bb = NextDominatedBlock(bb);
-  }
-  if (terminated_by_return || do_local_value_numbering) {
-    // Do lvn for all blocks in this extended set.
-    bb = start_bb;
-    while (bb != nullptr) {
-      bb->use_lvn = do_local_value_numbering;
-      bb->dominates_return = terminated_by_return;
-      bb = NextDominatedBlock(bb);
-    }
-  }
-  return false;  // Not iterative - return value will be ignored
-}
-
-void MIRGraph::BasicBlockOptimizationStart() {
-  if ((cu_->disable_opt & (1 << kLocalValueNumbering)) == 0) {
-    temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-    temp_.gvn.ifield_ids =
-        GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), ifield_lowering_infos_);
-    temp_.gvn.sfield_ids =
-        GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
-  }
-}
-
-void MIRGraph::BasicBlockOptimization() {
-  if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
-    ClearAllVisitedFlags();
-    PreOrderDfsIterator iter2(this);
-    for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
-      BuildExtendedBBList(bb);
-    }
-    // Perform extended basic block optimizations.
-    for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) {
-      BasicBlockOpt(GetBasicBlock(extended_basic_blocks_[i]));
-    }
-  } else {
-    PreOrderDfsIterator iter(this);
-    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-      BasicBlockOpt(bb);
-    }
-  }
-}
-
-void MIRGraph::BasicBlockOptimizationEnd() {
-  // Clean up after LVN.
-  temp_.gvn.ifield_ids = nullptr;
-  temp_.gvn.sfield_ids = nullptr;
-  temp_scoped_alloc_.reset();
-}
-
-void MIRGraph::StringChange() {
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      // Look for new instance opcodes, skip otherwise
-      Instruction::Code opcode = mir->dalvikInsn.opcode;
-      if (opcode == Instruction::NEW_INSTANCE) {
-        uint32_t type_idx = mir->dalvikInsn.vB;
-        if (cu_->compiler_driver->IsStringTypeIndex(type_idx, cu_->dex_file)) {
-          LOG(FATAL) << "Quick cannot compile String allocations";
-        }
-      } else if ((opcode == Instruction::INVOKE_DIRECT) ||
-                 (opcode == Instruction::INVOKE_DIRECT_RANGE)) {
-        uint32_t method_idx = mir->dalvikInsn.vB;
-        DexFileMethodInliner* inliner =
-            cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
-        if (inliner->IsStringInitMethodIndex(method_idx)) {
-          LOG(FATAL) << "Quick cannot compile String allocations";
-        }
-      }
-    }
-  }
-}
-
-bool MIRGraph::EliminateSuspendChecksGate() {
-  if (kLeafOptimization ||           // Incompatible (could create loops without suspend checks).
-      (cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 ||  // Disabled.
-      GetMaxNestedLoops() == 0u ||   // Nothing to do.
-      GetMaxNestedLoops() >= 32u ||  // Only 32 bits in suspend_checks_in_loops_[.].
-                                     // Exclude 32 as well to keep bit shifts well-defined.
-      !HasInvokes()) {               // No invokes to actually eliminate any suspend checks.
-    return false;
-  }
-  suspend_checks_in_loops_ = arena_->AllocArray<uint32_t>(GetNumBlocks(), kArenaAllocMisc);
-  return true;
-}
-
-bool MIRGraph::EliminateSuspendChecks(BasicBlock* bb) {
-  if (bb->block_type != kDalvikByteCode) {
-    return false;
-  }
-  DCHECK_EQ(GetTopologicalSortOrderLoopHeadStack()->size(), bb->nesting_depth);
-  if (bb->nesting_depth == 0u) {
-    // Out of loops.
-    DCHECK_EQ(suspend_checks_in_loops_[bb->id], 0u);  // The array was zero-initialized.
-    return false;
-  }
-  uint32_t suspend_checks_in_loops = (1u << bb->nesting_depth) - 1u;  // Start with all loop heads.
-  bool found_invoke = false;
-  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    if ((IsInstructionInvoke(mir->dalvikInsn.opcode) ||
-        IsInstructionQuickInvoke(mir->dalvikInsn.opcode)) &&
-        !GetMethodLoweringInfo(mir).IsIntrinsic()) {
-      // Non-intrinsic invoke, rely on a suspend point in the invoked method.
-      found_invoke = true;
-      break;
-    }
-  }
-  if (!found_invoke) {
-    // Intersect suspend checks from predecessors.
-    uint16_t bb_topo_idx = topological_order_indexes_[bb->id];
-    uint32_t pred_mask_union = 0u;
-    for (BasicBlockId pred_id : bb->predecessors) {
-      uint16_t pred_topo_idx = topological_order_indexes_[pred_id];
-      if (pred_topo_idx < bb_topo_idx) {
-        // Determine the loop depth of the predecessors relative to this block.
-        size_t pred_loop_depth = topological_order_loop_head_stack_.size();
-        while (pred_loop_depth != 0u &&
-            pred_topo_idx < topological_order_loop_head_stack_[pred_loop_depth - 1].first) {
-          --pred_loop_depth;
-        }
-        DCHECK_LE(pred_loop_depth, GetBasicBlock(pred_id)->nesting_depth);
-        uint32_t pred_mask = (1u << pred_loop_depth) - 1u;
-        // Intersect pred_mask bits in suspend_checks_in_loops with
-        // suspend_checks_in_loops_[pred_id].
-        uint32_t pred_loops_without_checks = pred_mask & ~suspend_checks_in_loops_[pred_id];
-        suspend_checks_in_loops = suspend_checks_in_loops & ~pred_loops_without_checks;
-        pred_mask_union |= pred_mask;
-      }
-    }
-    // DCHECK_EQ() may not hold for unnatural loop heads, so use DCHECK_GE().
-    DCHECK_GE(((1u << (IsLoopHead(bb->id) ? bb->nesting_depth - 1u: bb->nesting_depth)) - 1u),
-              pred_mask_union);
-    suspend_checks_in_loops &= pred_mask_union;
-  }
-  suspend_checks_in_loops_[bb->id] = suspend_checks_in_loops;
-  if (suspend_checks_in_loops == 0u) {
-    return false;
-  }
-  // Apply MIR_IGNORE_SUSPEND_CHECK if appropriate.
-  if (bb->taken != NullBasicBlockId) {
-    DCHECK(bb->last_mir_insn != nullptr);
-    DCHECK(IsInstructionIfCc(bb->last_mir_insn->dalvikInsn.opcode) ||
-           IsInstructionIfCcZ(bb->last_mir_insn->dalvikInsn.opcode) ||
-           IsInstructionGoto(bb->last_mir_insn->dalvikInsn.opcode) ||
-           (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) >= kMirOpFusedCmplFloat &&
-            static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) <= kMirOpFusedCmpLong));
-    if (!IsSuspendCheckEdge(bb, bb->taken) &&
-        (bb->fall_through == NullBasicBlockId || !IsSuspendCheckEdge(bb, bb->fall_through))) {
-      bb->last_mir_insn->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
-    }
-  } else if (bb->fall_through != NullBasicBlockId && IsSuspendCheckEdge(bb, bb->fall_through)) {
-    // We've got a fall-through suspend edge. Add an artificial GOTO to force suspend check.
-    MIR* mir = NewMIR();
-    mir->dalvikInsn.opcode = Instruction::GOTO;
-    mir->dalvikInsn.vA = 0;  // Branch offset.
-    mir->offset = GetBasicBlock(bb->fall_through)->start_offset;
-    mir->m_unit_index = current_method_;
-    mir->ssa_rep = reinterpret_cast<SSARepresentation*>(
-        arena_->Alloc(sizeof(SSARepresentation), kArenaAllocDFInfo));  // Zero-initialized.
-    bb->AppendMIR(mir);
-    std::swap(bb->fall_through, bb->taken);  // The fall-through has become taken.
-  }
-  return true;
-}
-
-bool MIRGraph::CanThrow(MIR* mir) const {
-  if ((mir->dalvikInsn.FlagsOf() & Instruction::kThrow) == 0) {
-    return false;
-  }
-  const int opt_flags = mir->optimization_flags;
-  uint64_t df_attributes = GetDataFlowAttributes(mir);
-
-  // First, check if the insn can still throw NPE.
-  if (((df_attributes & DF_HAS_NULL_CHKS) != 0) && ((opt_flags & MIR_IGNORE_NULL_CHECK) == 0)) {
-    return true;
-  }
-
-  // Now process specific instructions.
-  if ((df_attributes & DF_IFIELD) != 0) {
-    // The IGET/IPUT family. We have processed the IGET/IPUT null check above.
-    DCHECK_NE(opt_flags & MIR_IGNORE_NULL_CHECK, 0);
-    // If not fast, weird things can happen and the insn can throw.
-    const MirIFieldLoweringInfo& field_info = GetIFieldLoweringInfo(mir);
-    bool fast = (df_attributes & DF_DA) != 0 ? field_info.FastGet() : field_info.FastPut();
-    return !fast;
-  } else if ((df_attributes & DF_SFIELD) != 0) {
-    // The SGET/SPUT family. Check for potentially throwing class initialization.
-    // Also, if not fast, weird things can happen and the insn can throw.
-    const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
-    bool fast = (df_attributes & DF_DA) != 0 ? field_info.FastGet() : field_info.FastPut();
-    bool is_class_initialized = field_info.IsClassInitialized() ||
-        ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0);
-    return !(fast && is_class_initialized);
-  } else if ((df_attributes & DF_HAS_RANGE_CHKS) != 0) {
-    // Only AGET/APUT have range checks. We have processed the AGET/APUT null check above.
-    DCHECK_NE(opt_flags & MIR_IGNORE_NULL_CHECK, 0);
-    // Non-throwing only if range check has been eliminated.
-    return ((opt_flags & MIR_IGNORE_RANGE_CHECK) == 0);
-  } else if (mir->dalvikInsn.opcode == Instruction::CHECK_CAST &&
-      (opt_flags & MIR_IGNORE_CHECK_CAST) != 0) {
-    return false;
-  } else if (mir->dalvikInsn.opcode == Instruction::ARRAY_LENGTH ||
-      static_cast<int>(mir->dalvikInsn.opcode) == kMirOpNullCheck) {
-    // No more checks for these (null check was processed above).
-    return false;
-  }
-  return true;
-}
-
-bool MIRGraph::HasAntiDependency(MIR* first, MIR* second) {
-  DCHECK(first->ssa_rep != nullptr);
-  DCHECK(second->ssa_rep != nullptr);
-  if ((second->ssa_rep->num_defs > 0) && (first->ssa_rep->num_uses > 0)) {
-    int vreg0 = SRegToVReg(second->ssa_rep->defs[0]);
-    int vreg1 = (second->ssa_rep->num_defs == 2) ?
-        SRegToVReg(second->ssa_rep->defs[1]) : INVALID_VREG;
-    for (int i = 0; i < first->ssa_rep->num_uses; i++) {
-      int32_t use = SRegToVReg(first->ssa_rep->uses[i]);
-      if (use == vreg0 || use == vreg1) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-void MIRGraph::CombineMultiplyAdd(MIR* mul_mir, MIR* add_mir, bool mul_is_first_addend,
-                                  bool is_wide, bool is_sub) {
-  if (is_wide) {
-    if (is_sub) {
-      add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMsubLong);
-    } else {
-      add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMaddLong);
-    }
-  } else {
-    if (is_sub) {
-      add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMsubInt);
-    } else {
-      add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMaddInt);
-    }
-  }
-  add_mir->ssa_rep->num_uses = is_wide ? 6 : 3;
-  int32_t addend0 = INVALID_SREG;
-  int32_t addend1 = INVALID_SREG;
-  if (is_wide) {
-    addend0 = mul_is_first_addend ? add_mir->ssa_rep->uses[2] : add_mir->ssa_rep->uses[0];
-    addend1 = mul_is_first_addend ? add_mir->ssa_rep->uses[3] : add_mir->ssa_rep->uses[1];
-  } else {
-    addend0 = mul_is_first_addend ? add_mir->ssa_rep->uses[1] : add_mir->ssa_rep->uses[0];
-  }
-
-  AllocateSSAUseData(add_mir, add_mir->ssa_rep->num_uses);
-  add_mir->ssa_rep->uses[0] = mul_mir->ssa_rep->uses[0];
-  add_mir->ssa_rep->uses[1] = mul_mir->ssa_rep->uses[1];
-  // Clear the original multiply product ssa use count, as it is not used anymore.
-  raw_use_counts_[mul_mir->ssa_rep->defs[0]] = 0;
-  use_counts_[mul_mir->ssa_rep->defs[0]] = 0;
-  if (is_wide) {
-    DCHECK_EQ(add_mir->ssa_rep->num_uses, 6);
-    add_mir->ssa_rep->uses[2] = mul_mir->ssa_rep->uses[2];
-    add_mir->ssa_rep->uses[3] = mul_mir->ssa_rep->uses[3];
-    add_mir->ssa_rep->uses[4] = addend0;
-    add_mir->ssa_rep->uses[5] = addend1;
-    raw_use_counts_[mul_mir->ssa_rep->defs[1]] = 0;
-    use_counts_[mul_mir->ssa_rep->defs[1]] = 0;
-  } else {
-    DCHECK_EQ(add_mir->ssa_rep->num_uses, 3);
-    add_mir->ssa_rep->uses[2] = addend0;
-  }
-  // Copy in the decoded instruction information.
-  add_mir->dalvikInsn.vB = SRegToVReg(add_mir->ssa_rep->uses[0]);
-  if (is_wide) {
-    add_mir->dalvikInsn.vC = SRegToVReg(add_mir->ssa_rep->uses[2]);
-    add_mir->dalvikInsn.arg[0] = SRegToVReg(add_mir->ssa_rep->uses[4]);
-  } else {
-    add_mir->dalvikInsn.vC = SRegToVReg(add_mir->ssa_rep->uses[1]);
-    add_mir->dalvikInsn.arg[0] = SRegToVReg(add_mir->ssa_rep->uses[2]);
-  }
-  // Original multiply MIR is set to Nop.
-  mul_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-}
-
-void MIRGraph::MultiplyAddOpt(BasicBlock* bb) {
-  if (bb->block_type == kDead) {
-    return;
-  }
-  ScopedArenaAllocator allocator(&cu_->arena_stack);
-  ScopedArenaSafeMap<uint32_t, MIR*> ssa_mul_map(std::less<uint32_t>(), allocator.Adapter());
-  ScopedArenaSafeMap<uint32_t, MIR*>::iterator map_it;
-  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    Instruction::Code opcode = mir->dalvikInsn.opcode;
-    bool is_sub = true;
-    bool is_candidate_multiply = false;
-    switch (opcode) {
-      case Instruction::MUL_INT:
-      case Instruction::MUL_INT_2ADDR:
-        is_candidate_multiply = true;
-        break;
-      case Instruction::MUL_LONG:
-      case Instruction::MUL_LONG_2ADDR:
-        if (cu_->target64) {
-          is_candidate_multiply = true;
-        }
-        break;
-      case Instruction::ADD_INT:
-      case Instruction::ADD_INT_2ADDR:
-        is_sub = false;
-        FALLTHROUGH_INTENDED;
-      case Instruction::SUB_INT:
-      case Instruction::SUB_INT_2ADDR:
-        if (((map_it = ssa_mul_map.find(mir->ssa_rep->uses[0])) != ssa_mul_map.end()) && !is_sub) {
-          // a*b+c
-          CombineMultiplyAdd(map_it->second, mir, true /* product is the first addend */,
-                             false /* is_wide */, false /* is_sub */);
-          ssa_mul_map.erase(mir->ssa_rep->uses[0]);
-        } else if ((map_it = ssa_mul_map.find(mir->ssa_rep->uses[1])) != ssa_mul_map.end()) {
-          // c+a*b or c-a*b
-          CombineMultiplyAdd(map_it->second, mir, false /* product is the second addend */,
-                             false /* is_wide */, is_sub);
-          ssa_mul_map.erase(map_it);
-        }
-        break;
-      case Instruction::ADD_LONG:
-      case Instruction::ADD_LONG_2ADDR:
-        is_sub = false;
-        FALLTHROUGH_INTENDED;
-      case Instruction::SUB_LONG:
-      case Instruction::SUB_LONG_2ADDR:
-        if (!cu_->target64) {
-          break;
-        }
-        if ((map_it = ssa_mul_map.find(mir->ssa_rep->uses[0])) != ssa_mul_map.end() && !is_sub) {
-          // a*b+c
-          CombineMultiplyAdd(map_it->second, mir, true /* product is the first addend */,
-                             true /* is_wide */, false /* is_sub */);
-          ssa_mul_map.erase(map_it);
-        } else if ((map_it = ssa_mul_map.find(mir->ssa_rep->uses[2])) != ssa_mul_map.end()) {
-          // c+a*b or c-a*b
-          CombineMultiplyAdd(map_it->second, mir, false /* product is the second addend */,
-                             true /* is_wide */, is_sub);
-          ssa_mul_map.erase(map_it);
-        }
-        break;
-      default:
-        if (!ssa_mul_map.empty() && CanThrow(mir)) {
-          // Should not combine multiply and add MIRs across potential exception.
-          ssa_mul_map.clear();
-        }
-        break;
-    }
-
-    // Exclude the case when an MIR writes a vreg which is previous candidate multiply MIR's uses.
-    // It is because that current RA may allocate the same physical register to them. For this
-    // kind of cases, the multiplier has been updated, we should not use updated value to the
-    // multiply-add insn.
-    if (ssa_mul_map.size() > 0) {
-      for (auto it = ssa_mul_map.begin(); it != ssa_mul_map.end();) {
-        MIR* mul = it->second;
-        if (HasAntiDependency(mul, mir)) {
-          it = ssa_mul_map.erase(it);
-        } else {
-          ++it;
-        }
-      }
-    }
-
-    if (is_candidate_multiply &&
-        (GetRawUseCount(mir->ssa_rep->defs[0]) == 1) && (mir->next != nullptr)) {
-      ssa_mul_map.Put(mir->ssa_rep->defs[0], mir);
-    }
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
deleted file mode 100644
index a0cedff..0000000
--- a/compiler/dex/mir_optimization_test.cc
+++ /dev/null
@@ -1,1186 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <vector>
-
-#include "base/logging.h"
-#include "dataflow_iterator.h"
-#include "dataflow_iterator-inl.h"
-#include "dex/compiler_ir.h"
-#include "dex/mir_field_info.h"
-#include "gtest/gtest.h"
-
-namespace art {
-
-class MirOptimizationTest : public testing::Test {
- protected:
-  struct BBDef {
-    static constexpr size_t kMaxSuccessors = 4;
-    static constexpr size_t kMaxPredecessors = 4;
-
-    BBType type;
-    size_t num_successors;
-    BasicBlockId successors[kMaxPredecessors];
-    size_t num_predecessors;
-    BasicBlockId predecessors[kMaxPredecessors];
-  };
-
-  struct MethodDef {
-    uint16_t method_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_class_idx;
-    uint16_t declaring_method_idx;
-    InvokeType invoke_type;
-    InvokeType sharp_type;
-    bool is_referrers_class;
-    bool is_initialized;
-  };
-
-  struct MIRDef {
-    BasicBlockId bbid;
-    Instruction::Code opcode;
-    uint32_t field_or_method_info;
-    uint32_t vA;
-    uint32_t vB;
-    uint32_t vC;
-  };
-
-#define DEF_SUCC0() \
-    0u, { }
-#define DEF_SUCC1(s1) \
-    1u, { s1 }
-#define DEF_SUCC2(s1, s2) \
-    2u, { s1, s2 }
-#define DEF_SUCC3(s1, s2, s3) \
-    3u, { s1, s2, s3 }
-#define DEF_SUCC4(s1, s2, s3, s4) \
-    4u, { s1, s2, s3, s4 }
-#define DEF_PRED0() \
-    0u, { }
-#define DEF_PRED1(p1) \
-    1u, { p1 }
-#define DEF_PRED2(p1, p2) \
-    2u, { p1, p2 }
-#define DEF_PRED3(p1, p2, p3) \
-    3u, { p1, p2, p3 }
-#define DEF_PRED4(p1, p2, p3, p4) \
-    4u, { p1, p2, p3, p4 }
-#define DEF_BB(type, succ, pred) \
-    { type, succ, pred }
-
-#define DEF_SGET_SPUT(bb, opcode, vA, field_info) \
-    { bb, opcode, field_info, vA, 0u, 0u }
-#define DEF_IGET_IPUT(bb, opcode, vA, vB, field_info) \
-    { bb, opcode, field_info, vA, vB, 0u }
-#define DEF_AGET_APUT(bb, opcode, vA, vB, vC) \
-    { bb, opcode, 0u, vA, vB, vC }
-#define DEF_INVOKE(bb, opcode, vC, method_info) \
-    { bb, opcode, method_info, 0u, 0u, vC }
-#define DEF_OTHER0(bb, opcode) \
-    { bb, opcode, 0u, 0u, 0u, 0u }
-#define DEF_OTHER1(bb, opcode, vA) \
-    { bb, opcode, 0u, vA, 0u, 0u }
-#define DEF_OTHER2(bb, opcode, vA, vB) \
-    { bb, opcode, 0u, vA, vB, 0u }
-
-  void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
-    cu_.mir_graph->block_id_map_.clear();
-    cu_.mir_graph->block_list_.clear();
-    ASSERT_LT(3u, count);  // null, entry, exit and at least one bytecode block.
-    ASSERT_EQ(kNullBlock, defs[0].type);
-    ASSERT_EQ(kEntryBlock, defs[1].type);
-    ASSERT_EQ(kExitBlock, defs[2].type);
-    for (size_t i = 0u; i != count; ++i) {
-      const BBDef* def = &defs[i];
-      BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
-      if (def->num_successors <= 2) {
-        bb->successor_block_list_type = kNotUsed;
-        bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
-        bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
-      } else {
-        bb->successor_block_list_type = kPackedSwitch;
-        bb->fall_through = 0u;
-        bb->taken = 0u;
-        bb->successor_blocks.reserve(def->num_successors);
-        for (size_t j = 0u; j != def->num_successors; ++j) {
-          SuccessorBlockInfo* successor_block_info =
-              static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
-                                                               kArenaAllocSuccessors));
-          successor_block_info->block = j;
-          successor_block_info->key = 0u;  // Not used by class init check elimination.
-          bb->successor_blocks.push_back(successor_block_info);
-        }
-      }
-      bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
-      if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
-        bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
-            cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
-      }
-    }
-    ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
-    cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
-    ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
-    cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
-    ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
-  }
-
-  template <size_t count>
-  void PrepareBasicBlocks(const BBDef (&defs)[count]) {
-    DoPrepareBasicBlocks(defs, count);
-  }
-
-  void PrepareSingleBlock() {
-    static const BBDef bbs[] = {
-        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
-    };
-    PrepareBasicBlocks(bbs);
-  }
-
-  void PrepareDiamond() {
-    static const BBDef bbs[] = {
-        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),
-    };
-    PrepareBasicBlocks(bbs);
-  }
-
-  void PrepareLoop() {
-    static const BBDef bbs[] = {
-        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)),  // "taken" loops to self.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-    };
-    PrepareBasicBlocks(bbs);
-  }
-
-  void PrepareNestedLoopsWhile_While() {
-    static const BBDef bbs[] = {
-        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(8)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 8), DEF_PRED2(3, 7)),  // Outer while loop head.
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 7), DEF_PRED2(4, 6)),  // Inner while loop head.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(5)),        // "taken" loops to inner head.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(5)),        // "taken" loops to outer head.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-    };
-    PrepareBasicBlocks(bbs);
-  }
-
-  void PrepareNestedLoopsWhile_WhileWhile() {
-    static const BBDef bbs[] = {
-        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(10)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 10), DEF_PRED2(3, 9)),   // Outer while loop head.
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 7), DEF_PRED2(4, 6)),    // Inner while loop head 1.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(5)),          // Loops to inner head 1.
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(8, 9), DEF_PRED2(5, 8)),    // Inner while loop head 2.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(7)),          // loops to inner head 2.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(7)),          // loops to outer head.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-    };
-    PrepareBasicBlocks(bbs);
-  }
-
-  void PrepareNestedLoopsWhile_WhileWhile_WithExtraEdge() {
-    // Extra edge from the first inner loop body to second inner loop body (6u->8u).
-    static const BBDef bbs[] = {
-        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(10)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 10), DEF_PRED2(3, 9)),   // Outer while loop head.
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 7), DEF_PRED2(4, 6)),    // Inner while loop head 1.
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 8), DEF_PRED1(5)),       // Loops to inner head 1.
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(8, 9), DEF_PRED2(5, 8)),    // Inner while loop head 2.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED2(7, 6)),       // loops to inner head 2.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(7)),          // loops to outer head.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-    };
-    PrepareBasicBlocks(bbs);
-  }
-
-  void PrepareCatch() {
-    static const BBDef bbs[] = {
-        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),     // The top.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // The throwing insn.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Catch handler.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),  // The merged block.
-    };
-    PrepareBasicBlocks(bbs);
-    BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(5u);
-    catch_handler->catch_entry = true;
-    // Add successor block info to the check block.
-    BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
-    check_bb->successor_block_list_type = kCatch;
-    SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
-        (cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessors));
-    successor_block_info->block = catch_handler->id;
-    check_bb->successor_blocks.push_back(successor_block_info);
-  }
-
-  void DoPrepareMethods(const MethodDef* defs, size_t count) {
-    cu_.mir_graph->method_lowering_infos_.clear();
-    cu_.mir_graph->method_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const MethodDef* def = &defs[i];
-      MirMethodLoweringInfo method_info(def->method_idx, def->invoke_type, false);
-      if (def->declaring_dex_file != 0u) {
-        method_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        method_info.declaring_class_idx_ = def->declaring_class_idx;
-        method_info.declaring_method_idx_ = def->declaring_method_idx;
-      }
-      ASSERT_EQ(def->invoke_type != kStatic, def->sharp_type != kStatic);
-      method_info.flags_ =
-          ((def->invoke_type == kStatic) ? MirMethodLoweringInfo::kFlagIsStatic : 0u) |
-          MirMethodLoweringInfo::kFlagFastPath |
-          (static_cast<uint16_t>(def->invoke_type) << MirMethodLoweringInfo::kBitInvokeTypeBegin) |
-          (static_cast<uint16_t>(def->sharp_type) << MirMethodLoweringInfo::kBitSharpTypeBegin) |
-          ((def->is_referrers_class) ? MirMethodLoweringInfo::kFlagIsReferrersClass : 0u) |
-          ((def->is_initialized == kStatic) ? MirMethodLoweringInfo::kFlagClassIsInitialized : 0u);
-      ASSERT_EQ(def->declaring_dex_file != 0u, method_info.IsResolved());
-      cu_.mir_graph->method_lowering_infos_.push_back(method_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareMethods(const MethodDef (&defs)[count]) {
-    DoPrepareMethods(defs, count);
-  }
-
-  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
-    mir_count_ = count;
-    mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
-    uint64_t merged_df_flags = 0u;
-    for (size_t i = 0u; i != count; ++i) {
-      const MIRDef* def = &defs[i];
-      MIR* mir = &mirs_[i];
-      mir->dalvikInsn.opcode = def->opcode;
-      ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
-      BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
-      bb->AppendMIR(mir);
-      if (IsInstructionIGetOrIPut(def->opcode)) {
-        ASSERT_LT(def->field_or_method_info, cu_.mir_graph->ifield_lowering_infos_.size());
-        mir->meta.ifield_lowering_info = def->field_or_method_info;
-        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_or_method_info].MemAccessType(),
-                  IGetOrIPutMemAccessType(def->opcode));
-      } else if (IsInstructionSGetOrSPut(def->opcode)) {
-        ASSERT_LT(def->field_or_method_info, cu_.mir_graph->sfield_lowering_infos_.size());
-        mir->meta.sfield_lowering_info = def->field_or_method_info;
-        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_or_method_info].MemAccessType(),
-                  SGetOrSPutMemAccessType(def->opcode));
-      } else if (IsInstructionInvoke(def->opcode)) {
-        ASSERT_LT(def->field_or_method_info, cu_.mir_graph->method_lowering_infos_.size());
-        mir->meta.method_lowering_info = def->field_or_method_info;
-      }
-      mir->dalvikInsn.vA = def->vA;
-      mir->dalvikInsn.vB = def->vB;
-      mir->dalvikInsn.vC = def->vC;
-      mir->ssa_rep = nullptr;
-      mir->offset = 2 * i;  // All insns need to be at least 2 code units long.
-      mir->optimization_flags = 0u;
-      merged_df_flags |= MIRGraph::GetDataFlowAttributes(def->opcode);
-    }
-    cu_.mir_graph->merged_df_flags_ = merged_df_flags;
-
-    code_item_ = static_cast<DexFile::CodeItem*>(
-        cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
-    memset(code_item_, 0, sizeof(DexFile::CodeItem));
-    code_item_->insns_size_in_code_units_ = 2u * count;
-    cu_.mir_graph->current_code_item_ = code_item_;
-  }
-
-  template <size_t count>
-  void PrepareMIRs(const MIRDef (&defs)[count]) {
-    DoPrepareMIRs(defs, count);
-  }
-
-  MirOptimizationTest()
-      : pool_(),
-        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
-        mir_count_(0u),
-        mirs_(nullptr),
-        code_item_(nullptr) {
-    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
-    cu_.access_flags = kAccStatic;  // Don't let "this" interfere with this test.
-  }
-
-  ArenaPool pool_;
-  CompilationUnit cu_;
-  size_t mir_count_;
-  MIR* mirs_;
-  DexFile::CodeItem* code_item_;
-};
-
-class ClassInitCheckEliminationTest : public MirOptimizationTest {
- protected:
-  struct SFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_class_idx;
-    uint16_t declaring_field_idx;
-    DexMemAccessType type;
-  };
-
-  void DoPrepareSFields(const SFieldDef* defs, size_t count) {
-    cu_.mir_graph->sfield_lowering_infos_.clear();
-    cu_.mir_graph->sfield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const SFieldDef* def = &defs[i];
-      MirSFieldLoweringInfo field_info(def->field_idx, def->type);
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_class_idx_ = def->declaring_class_idx;
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        // We don't care about the volatile flag in these tests.
-      }
-      ASSERT_EQ(def->declaring_dex_file != 0u, field_info.IsResolved());
-      ASSERT_FALSE(field_info.IsClassInitialized());
-      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareSFields(const SFieldDef (&defs)[count]) {
-    DoPrepareSFields(defs, count);
-  }
-
-  void PerformClassInitCheckElimination() {
-    cu_.mir_graph->ComputeDFSOrders();
-    bool gate_result = cu_.mir_graph->EliminateClassInitChecksGate();
-    ASSERT_TRUE(gate_result);
-    RepeatingPreOrderDfsIterator iterator(cu_.mir_graph.get());
-    bool change = false;
-    for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
-      change = cu_.mir_graph->EliminateClassInitChecks(bb);
-    }
-    cu_.mir_graph->EliminateClassInitChecksEnd();
-  }
-
-  ClassInitCheckEliminationTest()
-      : MirOptimizationTest() {
-  }
-};
-
-class NullCheckEliminationTest : public MirOptimizationTest {
- protected:
-  struct IFieldDef {
-    uint16_t field_idx;
-    uintptr_t declaring_dex_file;
-    uint16_t declaring_class_idx;
-    uint16_t declaring_field_idx;
-    DexMemAccessType type;
-  };
-
-  void DoPrepareIFields(const IFieldDef* defs, size_t count) {
-    cu_.mir_graph->ifield_lowering_infos_.clear();
-    cu_.mir_graph->ifield_lowering_infos_.reserve(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
-      if (def->declaring_dex_file != 0u) {
-        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
-        field_info.declaring_class_idx_ = def->declaring_class_idx;
-        field_info.declaring_field_idx_ = def->declaring_field_idx;
-        // We don't care about the volatile flag in these tests.
-      }
-      ASSERT_EQ(def->declaring_dex_file != 0u, field_info.IsResolved());
-      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
-    }
-  }
-
-  template <size_t count>
-  void PrepareIFields(const IFieldDef (&defs)[count]) {
-    DoPrepareIFields(defs, count);
-  }
-
-  void PerformNullCheckElimination() {
-    // Make vregs in range [100, 1000) input registers, i.e. requiring a null check.
-    code_item_->registers_size_ = 1000;
-    code_item_->ins_size_ = 900;
-
-    cu_.mir_graph->ComputeDFSOrders();
-    bool gate_result = cu_.mir_graph->EliminateNullChecksGate();
-    ASSERT_TRUE(gate_result);
-    RepeatingPreOrderDfsIterator iterator(cu_.mir_graph.get());
-    bool change = false;
-    for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
-      change = cu_.mir_graph->EliminateNullChecks(bb);
-    }
-    cu_.mir_graph->EliminateNullChecksEnd();
-  }
-
-  NullCheckEliminationTest()
-      : MirOptimizationTest() {
-    static const MethodDef methods[] = {
-        { 0u, 1u, 0u, 0u, kDirect, kDirect, false, false },  // Dummy.
-    };
-    PrepareMethods(methods);
-  }
-};
-
-class SuspendCheckEliminationTest : public MirOptimizationTest {
- protected:
-  bool IsBackEdge(BasicBlockId branch_bb, BasicBlockId target_bb) {
-    BasicBlock* branch = cu_.mir_graph->GetBasicBlock(branch_bb);
-    return target_bb != NullBasicBlockId && cu_.mir_graph->IsBackEdge(branch, target_bb);
-  }
-
-  bool IsSuspendCheckEdge(BasicBlockId branch_bb, BasicBlockId target_bb) {
-    BasicBlock* branch = cu_.mir_graph->GetBasicBlock(branch_bb);
-    return cu_.mir_graph->IsSuspendCheckEdge(branch, target_bb);
-  }
-
-  void PerformSuspendCheckElimination() {
-    cu_.mir_graph->SSATransformationStart();
-    cu_.mir_graph->ComputeDFSOrders();
-    cu_.mir_graph->ComputeDominators();
-    cu_.mir_graph->ComputeTopologicalSortOrder();
-    cu_.mir_graph->SSATransformationEnd();
-
-    bool gate_result = cu_.mir_graph->EliminateSuspendChecksGate();
-    ASSERT_NE(gate_result, kLeafOptimization);
-    if (kLeafOptimization) {
-      // Even with kLeafOptimization on and Gate() refusing to allow SCE, we want
-      // to run the SCE test to avoid bitrot, so we need to initialize explicitly.
-      cu_.mir_graph->suspend_checks_in_loops_ =
-          cu_.mir_graph->arena_->AllocArray<uint32_t>(cu_.mir_graph->GetNumBlocks(),
-                                                      kArenaAllocMisc);
-    }
-
-    TopologicalSortIterator iterator(cu_.mir_graph.get());
-    bool change = false;
-    for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
-      change = cu_.mir_graph->EliminateSuspendChecks(bb);
-    }
-  }
-
-  SuspendCheckEliminationTest()
-      : MirOptimizationTest() {
-    static const MethodDef methods[] = {
-        { 0u, 1u, 0u, 0u, kDirect, kDirect, false, false },  // Dummy.
-    };
-    PrepareMethods(methods);
-  }
-};
-
-TEST_F(ClassInitCheckEliminationTest, SingleBlock) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-      { 1u, 1u, 1u, 1u, kDexMemAccessWord },
-      { 2u, 1u, 2u, 2u, kDexMemAccessWord },
-      { 3u, 1u, 3u, 3u, kDexMemAccessWord },  // Same declaring class as sfield[4].
-      { 4u, 1u, 3u, 4u, kDexMemAccessWord },  // Same declaring class as sfield[3].
-      { 5u, 0u, 0u, 0u, kDexMemAccessWord },  // Unresolved.
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 5u),  // Unresolved.
-      DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 0u),
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 1u),
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 2u),
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 5u),  // Unresolved.
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 0u),
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 1u),
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 2u),
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 5u),  // Unresolved.
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 3u),
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 4u),
-  };
-  static const bool expected_ignore_clinit_check[] = {
-      false, false, false, false, true, true, true, true, true, false, true
-  };
-
-  PrepareSFields(sfields);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformClassInitCheckElimination();
-  ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_clinit_check[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
-    EXPECT_EQ(expected_ignore_clinit_check[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
-  }
-}
-
-TEST_F(ClassInitCheckEliminationTest, SingleBlockWithInvokes) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-      { 1u, 1u, 1u, 1u, kDexMemAccessWord },
-      { 2u, 1u, 2u, 2u, kDexMemAccessWord },
-  };
-  static const MethodDef methods[] = {
-      { 0u, 1u, 0u, 0u, kStatic, kStatic, false, false },
-      { 1u, 1u, 1u, 1u, kStatic, kStatic, false, false },
-      { 2u, 1u, 2u, 2u, kStatic, kStatic, false, false },
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 0u),
-      DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 0u),
-      DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 1u),
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 1u),
-      DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 2u),
-      DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 2u),
-  };
-  static const bool expected_class_initialized[] = {
-      false, true, false, true, false, true
-  };
-  static const bool expected_class_in_dex_cache[] = {
-      false, false, false, false, false, false
-  };
-
-  PrepareSFields(sfields);
-  PrepareMethods(methods);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformClassInitCheckElimination();
-  ASSERT_EQ(arraysize(expected_class_initialized), mir_count_);
-  ASSERT_EQ(arraysize(expected_class_in_dex_cache), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_class_initialized[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
-    EXPECT_EQ(expected_class_in_dex_cache[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
-  }
-}
-
-TEST_F(ClassInitCheckEliminationTest, Diamond) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-      { 1u, 1u, 1u, 1u, kDexMemAccessWord },
-      { 2u, 1u, 2u, 2u, kDexMemAccessWord },
-      { 3u, 1u, 3u, 3u, kDexMemAccessWord },
-      { 4u, 1u, 4u, 4u, kDexMemAccessWord },
-      { 5u, 1u, 5u, 5u, kDexMemAccessWord },
-      { 6u, 1u, 6u, 6u, kDexMemAccessWord },
-      { 7u, 1u, 7u, 7u, kDexMemAccessWord },
-      { 8u, 1u, 8u, 8u, kDexMemAccessWord },   // Same declaring class as sfield[9].
-      { 9u, 1u, 8u, 9u, kDexMemAccessWord },   // Same declaring class as sfield[8].
-      { 10u, 0u, 0u, 0u, kDexMemAccessWord },  // Unresolved.
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 10u),  // Unresolved.
-      DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 10u),  // Unresolved.
-      DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 0u),
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 0u),  // Eliminated (BB #3 dominates #6).
-      DEF_SGET_SPUT(4u, Instruction::SPUT, 0u, 1u),
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 1u),  // Not eliminated (BB #4 doesn't dominate #6).
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 2u),
-      DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 2u),  // Eliminated (BB #3 dominates #4).
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 3u),
-      DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 3u),  // Eliminated (BB #3 dominates #5).
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 4u),
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 4u),  // Eliminated (BB #3 dominates #6).
-      DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 5u),
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 5u),  // Not eliminated (BB #4 doesn't dominate #6).
-      DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 6u),
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 6u),  // Not eliminated (BB #5 doesn't dominate #6).
-      DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 7u),
-      DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 7u),
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 7u),  // Eliminated (initialized in both #3 and #4).
-      DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 8u),
-      DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 9u),
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 8u),  // Eliminated (with sfield[9] in BB #5).
-      DEF_SGET_SPUT(6u, Instruction::SPUT, 0u, 9u),  // Eliminated (with sfield[8] in BB #4).
-  };
-  static const bool expected_ignore_clinit_check[] = {
-      false, true,          // Unresolved: sfield[10]
-      false, true,          // sfield[0]
-      false, false,         // sfield[1]
-      false, true,          // sfield[2]
-      false, true,          // sfield[3]
-      false, true,          // sfield[4]
-      false, false,         // sfield[5]
-      false, false,         // sfield[6]
-      false, false, true,   // sfield[7]
-      false, false, true, true,  // sfield[8], sfield[9]
-  };
-
-  PrepareSFields(sfields);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformClassInitCheckElimination();
-  ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_clinit_check[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
-    EXPECT_EQ(expected_ignore_clinit_check[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
-  }
-}
-
-TEST_F(ClassInitCheckEliminationTest, DiamondWithInvokes) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-      { 1u, 1u, 1u, 1u, kDexMemAccessWord },
-      { 2u, 1u, 2u, 2u, kDexMemAccessWord },
-      { 3u, 1u, 3u, 3u, kDexMemAccessWord },
-      { 4u, 1u, 4u, 4u, kDexMemAccessWord },
-  };
-  static const MethodDef methods[] = {
-      { 0u, 1u, 0u, 0u, kStatic, kStatic, false, false },
-      { 1u, 1u, 1u, 1u, kStatic, kStatic, false, false },
-      { 2u, 1u, 2u, 2u, kStatic, kStatic, false, false },
-      { 3u, 1u, 3u, 3u, kStatic, kStatic, false, false },
-      { 4u, 1u, 4u, 4u, kStatic, kStatic, false, false },
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_SGET_SPUT(3u, Instruction::SPUT, 0u, 0u),
-      DEF_INVOKE(6u, Instruction::INVOKE_STATIC, 0u /* dummy */, 0u),
-      DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 1u),
-      DEF_SGET_SPUT(6u, Instruction::SPUT, 0u, 1u),
-      DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 2u),
-      DEF_INVOKE(5u, Instruction::INVOKE_STATIC, 0u /* dummy */, 2u),
-      DEF_SGET_SPUT(6u, Instruction::SPUT, 0u, 2u),
-      DEF_INVOKE(4u, Instruction::INVOKE_STATIC, 0u /* dummy */, 3u),
-      DEF_SGET_SPUT(5u, Instruction::SPUT, 0u, 3u),
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 3u),
-      DEF_SGET_SPUT(4u, Instruction::SPUT, 0u, 4u),
-      DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 4u),
-      DEF_INVOKE(6u, Instruction::INVOKE_STATIC, 0u /* dummy */, 4u),
-  };
-  static const bool expected_class_initialized[] = {
-      false, true,    // BB #3 SPUT, BB#6 INVOKE_STATIC
-      false, true,    // BB #3 INVOKE_STATIC, BB#6 SPUT
-      false, false, true,   // BB #4 SGET, BB #5 INVOKE_STATIC, BB #6 SPUT
-      false, false, true,   // BB #4 INVOKE_STATIC, BB #5 SPUT, BB #6 SGET
-      false, false, true,   // BB #4 SPUT, BB #5 SGET, BB #6 INVOKE_STATIC
-  };
-  static const bool expected_class_in_dex_cache[] = {
-      false, false,   // BB #3 SPUT, BB#6 INVOKE_STATIC
-      false, false,   // BB #3 INVOKE_STATIC, BB#6 SPUT
-      false, false, false,  // BB #4 SGET, BB #5 INVOKE_STATIC, BB #6 SPUT
-      false, false, false,  // BB #4 INVOKE_STATIC, BB #5 SPUT, BB #6 SGET
-      false, false, false,  // BB #4 SPUT, BB #5 SGET, BB #6 INVOKE_STATIC
-  };
-
-  PrepareSFields(sfields);
-  PrepareMethods(methods);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformClassInitCheckElimination();
-  ASSERT_EQ(arraysize(expected_class_initialized), mir_count_);
-  ASSERT_EQ(arraysize(expected_class_in_dex_cache), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_class_initialized[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
-    EXPECT_EQ(expected_class_in_dex_cache[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
-  }
-}
-
-TEST_F(ClassInitCheckEliminationTest, Loop) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-      { 1u, 1u, 1u, 1u, kDexMemAccessWord },
-      { 2u, 1u, 2u, 2u, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 0u),
-      DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 0u),  // Eliminated.
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 1u),
-      DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 1u),  // Eliminated.
-      DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 2u),
-      DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 2u),  // Eliminated.
-  };
-  static const bool expected_ignore_clinit_check[] = {
-      false, true, false, true, false, true,
-  };
-
-  PrepareSFields(sfields);
-  PrepareLoop();
-  PrepareMIRs(mirs);
-  PerformClassInitCheckElimination();
-  ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_clinit_check[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
-    EXPECT_EQ(expected_ignore_clinit_check[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
-  }
-}
-
-TEST_F(ClassInitCheckEliminationTest, LoopWithInvokes) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-  };
-  static const MethodDef methods[] = {
-      { 0u, 1u, 0u, 0u, kStatic, kStatic, false, false },
-      { 1u, 1u, 1u, 1u, kStatic, kStatic, false, false },
-      { 2u, 1u, 2u, 2u, kStatic, kStatic, false, false },
-  };
-  static const MIRDef mirs[] = {
-      DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 0u),
-      DEF_INVOKE(4u, Instruction::INVOKE_STATIC, 0u /* dummy */, 0u),
-      DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u /* dummy */, 1u),
-      DEF_INVOKE(5u, Instruction::INVOKE_STATIC, 0u /* dummy */, 1u),
-      DEF_INVOKE(4u, Instruction::INVOKE_STATIC, 0u /* dummy */, 2u),
-      DEF_INVOKE(5u, Instruction::INVOKE_STATIC, 0u /* dummy */, 2u),
-      DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 0u),
-  };
-  static const bool expected_class_initialized[] = {
-      false, true, false, true, false, true, true,
-  };
-  static const bool expected_class_in_dex_cache[] = {
-      false, false, false, false, false, false, false,
-  };
-
-  PrepareSFields(sfields);
-  PrepareMethods(methods);
-  PrepareLoop();
-  PrepareMIRs(mirs);
-  PerformClassInitCheckElimination();
-  ASSERT_EQ(arraysize(expected_class_initialized), mir_count_);
-  ASSERT_EQ(arraysize(expected_class_in_dex_cache), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_class_initialized[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
-    EXPECT_EQ(expected_class_in_dex_cache[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
-  }
-}
-
-TEST_F(ClassInitCheckEliminationTest, Catch) {
-  static const SFieldDef sfields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-      { 1u, 1u, 1u, 1u, kDexMemAccessWord },
-      { 2u, 1u, 2u, 2u, kDexMemAccessWord },
-      { 3u, 1u, 3u, 3u, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 0u),  // Before the exception edge.
-      DEF_SGET_SPUT(3u, Instruction::SGET, 0u, 1u),  // Before the exception edge.
-      DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 2u),  // After the exception edge.
-      DEF_SGET_SPUT(4u, Instruction::SGET, 0u, 3u),  // After the exception edge.
-      DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 0u),  // In catch handler; eliminated.
-      DEF_SGET_SPUT(5u, Instruction::SGET, 0u, 2u),  // In catch handler; not eliminated.
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 0u),  // Class init check eliminated.
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 1u),  // Class init check eliminated.
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 2u),  // Class init check eliminated.
-      DEF_SGET_SPUT(6u, Instruction::SGET, 0u, 3u),  // Class init check not eliminated.
-  };
-  static const bool expected_ignore_clinit_check[] = {
-      false, false, false, false, true, false, true, true, true, false
-  };
-
-  PrepareSFields(sfields);
-  PrepareCatch();
-  PrepareMIRs(mirs);
-  PerformClassInitCheckElimination();
-  ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_clinit_check[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0) << i;
-    EXPECT_EQ(expected_ignore_clinit_check[i],
-              (mirs_[i].optimization_flags & MIR_CLASS_IS_IN_DEX_CACHE) != 0) << i;
-  }
-}
-
-TEST_F(NullCheckEliminationTest, SingleBlock) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-      { 1u, 1u, 0u, 1u, kDexMemAccessWord },
-      { 2u, 1u, 0u, 2u, kDexMemAccessObject },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET_IPUT(3u, Instruction::IGET_OBJECT, 0u, 100u, 2u),
-      DEF_IGET_IPUT(3u, Instruction::IGET, 1u, 0u, 1u),
-      DEF_IGET_IPUT(3u, Instruction::IGET_OBJECT, 2u, 100u, 2u),  // Differs from 0u (no LVN here).
-      DEF_IGET_IPUT(3u, Instruction::IGET, 3u, 2u, 1u),
-      DEF_IGET_IPUT(3u, Instruction::IGET, 4u, 101u, 0u),
-      DEF_IGET_IPUT(3u, Instruction::IGET, 5u, 102u, 0u),
-      DEF_IGET_IPUT(3u, Instruction::IGET, 6u, 103u, 0u),
-      DEF_IGET_IPUT(3u, Instruction::IGET, 7u, 103u, 1u),
-      DEF_IGET_IPUT(3u, Instruction::IPUT, 8u, 104u, 0u),
-      DEF_IGET_IPUT(3u, Instruction::IPUT, 9u, 104u, 1u),
-      DEF_IGET_IPUT(3u, Instruction::IGET, 10u, 105u, 0u),
-      DEF_IGET_IPUT(3u, Instruction::IPUT, 11u, 105u, 1u),
-      DEF_IGET_IPUT(3u, Instruction::IPUT, 12u, 106u, 0u),
-      DEF_IGET_IPUT(3u, Instruction::IGET, 13u, 106u, 1u),
-      DEF_INVOKE(3u, Instruction::INVOKE_DIRECT, 107, 0u /* dummy */),
-      DEF_IGET_IPUT(3u, Instruction::IGET, 15u, 107u, 1u),
-      DEF_IGET_IPUT(3u, Instruction::IGET, 16u, 108u, 0u),
-      DEF_INVOKE(3u, Instruction::INVOKE_DIRECT, 108, 0u /* dummy */),
-      DEF_AGET_APUT(3u, Instruction::AGET, 18u, 109u, 110u),
-      DEF_AGET_APUT(3u, Instruction::APUT, 19u, 109u, 111u),
-      DEF_OTHER2(3u, Instruction::ARRAY_LENGTH, 20u, 112u),
-      DEF_AGET_APUT(3u, Instruction::AGET, 21u, 112u, 113u),
-      DEF_OTHER1(3u, Instruction::MONITOR_ENTER, 114u),
-      DEF_OTHER1(3u, Instruction::MONITOR_EXIT, 114u),
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, false, true, false /* Not doing LVN. */,
-      false, true /* Set before running NCE. */,
-      false, true,  // IGET, IGET
-      false, true,  // IPUT, IPUT
-      false, true,  // IGET, IPUT
-      false, true,  // IPUT, IGET
-      false, true,  // INVOKE, IGET
-      false, true,  // IGET, INVOKE
-      false, true,  // AGET, APUT
-      false, true,  // ARRAY_LENGTH, AGET
-      false, true,  // MONITOR_ENTER, MONITOR_EXIT
-  };
-
-  PrepareIFields(ifields);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-
-  // Mark IGET 5u as null-checked to test that NCE doesn't clear this flag.
-  mirs_[5u].optimization_flags |= MIR_IGNORE_NULL_CHECK;
-
-  PerformNullCheckElimination();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(NullCheckEliminationTest, Diamond) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-      { 1u, 1u, 0u, 1u, kDexMemAccessWord },
-      { 2u, 1u, 0u, 2u, kDexMemAccessObject },  // int[].
-  };
-  static const MIRDef mirs[] = {
-      // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
-      DEF_IGET_IPUT(3u, Instruction::IPUT, 0u, 100u, 0u),
-      DEF_IGET_IPUT(6u, Instruction::IGET, 1u, 100u, 1u),  // Eliminated (BB #3 dominates #6).
-      DEF_IGET_IPUT(3u, Instruction::IGET, 2u, 101u, 0u),
-      DEF_IGET_IPUT(4u, Instruction::IPUT, 3u, 101u, 0u),  // Eliminated (BB #3 dominates #4).
-      DEF_IGET_IPUT(3u, Instruction::IGET, 4u, 102u, 0u),
-      DEF_IGET_IPUT(5u, Instruction::IPUT, 5u, 102u, 1u),  // Eliminated (BB #3 dominates #5).
-      DEF_IGET_IPUT(4u, Instruction::IPUT, 6u, 103u, 0u),
-      DEF_IGET_IPUT(6u, Instruction::IPUT, 7u, 103u, 1u),  // Not eliminated (going through BB #5).
-      DEF_IGET_IPUT(5u, Instruction::IGET, 8u, 104u, 1u),
-      DEF_IGET_IPUT(6u, Instruction::IGET, 9u, 104u, 0u),  // Not eliminated (going through BB #4).
-      DEF_INVOKE(4u, Instruction::INVOKE_DIRECT, 105u, 0u /* dummy */),
-      DEF_IGET_IPUT(5u, Instruction::IGET, 11u, 105u, 1u),
-      DEF_IGET_IPUT(6u, Instruction::IPUT, 12u, 105u, 0u),  // Eliminated.
-      DEF_IGET_IPUT(3u, Instruction::IGET_OBJECT, 13u, 106u, 2u),
-      DEF_OTHER1(3u, Instruction::IF_EQZ, 13u),            // Last insn in the BB #3.
-      DEF_OTHER2(5u, Instruction::NEW_ARRAY, 13u, 107u),
-      DEF_AGET_APUT(6u, Instruction::AGET, 16u, 13u, 108u),  // Eliminated.
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, true,   // BB #3 IPUT, BB #6 IGET
-      false, true,   // BB #3 IGET, BB #4 IPUT
-      false, true,   // BB #3 IGET, BB #5 IPUT
-      false, false,  // BB #4 IPUT, BB #6 IPUT
-      false, false,  // BB #5 IGET, BB #6 IGET
-      false, false, true,  // BB #4 INVOKE, BB #5 IGET, BB #6 IPUT
-      false, false,  // BB #3 IGET_OBJECT & IF_EQZ
-      false, true,   // BB #5 NEW_ARRAY, BB #6 AGET
-  };
-
-  PrepareIFields(ifields);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformNullCheckElimination();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(NullCheckEliminationTest, Loop) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-      { 1u, 1u, 1u, 1u, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET_IPUT(3u, Instruction::IGET, 0u, 100u, 0u),
-      DEF_IGET_IPUT(4u, Instruction::IGET, 1u, 101u, 0u),
-      DEF_IGET_IPUT(5u, Instruction::IGET, 2u, 100u, 1u),  // Eliminated.
-      DEF_IGET_IPUT(5u, Instruction::IGET, 3u, 101u, 1u),  // Eliminated.
-      DEF_IGET_IPUT(3u, Instruction::IGET, 4u, 102u, 0u),
-      DEF_IGET_IPUT(4u, Instruction::IGET, 5u, 102u, 1u),  // Not eliminated (MOVE_OBJECT_16).
-      DEF_OTHER2(4u, Instruction::MOVE_OBJECT_16, 102u, 103u),
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, false, true, true,
-      false, false, false,
-  };
-
-  PrepareIFields(ifields);
-  PrepareLoop();
-  PrepareMIRs(mirs);
-  PerformNullCheckElimination();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(NullCheckEliminationTest, Catch) {
-  static const IFieldDef ifields[] = {
-      { 0u, 1u, 0u, 0u, kDexMemAccessWord },
-      { 1u, 1u, 1u, 1u, kDexMemAccessWord },
-  };
-  static const MIRDef mirs[] = {
-      DEF_IGET_IPUT(3u, Instruction::IGET, 0u, 100u, 0u),  // Before the exception edge.
-      DEF_IGET_IPUT(3u, Instruction::IGET, 1u, 101u, 0u),  // Before the exception edge.
-      DEF_IGET_IPUT(4u, Instruction::IGET, 2u, 102u, 0u),  // After the exception edge.
-      DEF_IGET_IPUT(4u, Instruction::IGET, 3u, 103u, 0u),  // After the exception edge.
-      DEF_IGET_IPUT(5u, Instruction::IGET, 4u, 100u, 1u),  // In catch handler; eliminated.
-      DEF_IGET_IPUT(5u, Instruction::IGET, 5u, 102u, 1u),  // In catch handler; not eliminated.
-      DEF_IGET_IPUT(6u, Instruction::IGET, 6u, 100u, 0u),  // Null check eliminated.
-      DEF_IGET_IPUT(6u, Instruction::IGET, 6u, 101u, 1u),  // Null check eliminated.
-      DEF_IGET_IPUT(6u, Instruction::IGET, 6u, 102u, 0u),  // Null check eliminated.
-      DEF_IGET_IPUT(6u, Instruction::IGET, 6u, 103u, 1u),  // Null check not eliminated.
-  };
-  static const bool expected_ignore_null_check[] = {
-      false, false, false, false, true, false, true, true, true, false
-  };
-
-  PrepareIFields(ifields);
-  PrepareCatch();
-  PrepareMIRs(mirs);
-  PerformNullCheckElimination();
-  ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
-  for (size_t i = 0u; i != arraysize(mirs); ++i) {
-    EXPECT_EQ(expected_ignore_null_check[i],
-              (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
-  }
-}
-
-TEST_F(SuspendCheckEliminationTest, LoopNoElimination) {
-  static const MIRDef mirs[] = {
-    DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u, 0u),  // Force the pass to run.
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge back.
-  };
-
-  PrepareLoop();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(4u, 4u));
-  EXPECT_TRUE(IsSuspendCheckEdge(4u, 4u));  // Suspend point on loop to self.
-}
-
-TEST_F(SuspendCheckEliminationTest, LoopElimination) {
-  static const MIRDef mirs[] = {
-    DEF_INVOKE(4u, Instruction::INVOKE_STATIC, 0u, 0u),  // Invoke in the loop.
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge back.
-  };
-
-  PrepareLoop();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(4u, 4u));
-  EXPECT_FALSE(IsSuspendCheckEdge(4u, 4u));  // No suspend point on loop to self.
-}
-
-TEST_F(SuspendCheckEliminationTest, While_While_NoElimination) {
-  static const MIRDef mirs[] = {
-    DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u, 0u),  // Force the pass to run.
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge out of outer loop.
-    DEF_OTHER1(5u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop.
-    DEF_OTHER0(6u, Instruction::GOTO),                   // Edge back to inner loop head.
-    DEF_OTHER0(7u, Instruction::GOTO),                   // Edge back to outer loop head.
-  };
-
-  PrepareNestedLoopsWhile_While();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(6u, 5u));
-  EXPECT_TRUE(IsSuspendCheckEdge(6u, 5u));
-  ASSERT_TRUE(IsBackEdge(7u, 4u));
-  EXPECT_TRUE(IsSuspendCheckEdge(7u, 4u));
-}
-
-TEST_F(SuspendCheckEliminationTest, While_While_InvokeInOuterLoopHead) {
-  static const MIRDef mirs[] = {
-    DEF_INVOKE(4u, Instruction::INVOKE_STATIC, 0u, 0u),  // Invoke in outer loop head.
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge out of outer loop.
-    DEF_OTHER1(5u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop.
-    DEF_OTHER0(6u, Instruction::GOTO),                   // Edge back to inner loop head.
-    DEF_OTHER0(7u, Instruction::GOTO),                   // Edge back to outer loop head.
-  };
-
-  PrepareNestedLoopsWhile_While();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(6u, 5u));
-  EXPECT_TRUE(IsSuspendCheckEdge(6u, 5u));
-  ASSERT_TRUE(IsBackEdge(7u, 4u));
-  EXPECT_FALSE(IsSuspendCheckEdge(7u, 4u));
-}
-
-TEST_F(SuspendCheckEliminationTest, While_While_InvokeInOuterLoopBody) {
-  static const MIRDef mirs[] = {
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge out of outer loop.
-    DEF_OTHER1(5u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop.
-    DEF_OTHER0(6u, Instruction::GOTO),                   // Edge back to inner loop head.
-    DEF_INVOKE(7u, Instruction::INVOKE_STATIC, 0u, 0u),  // Invoke in outer loop body.
-    DEF_OTHER0(7u, Instruction::GOTO),                   // Edge back to outer loop head.
-  };
-
-  PrepareNestedLoopsWhile_While();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(6u, 5u));
-  EXPECT_TRUE(IsSuspendCheckEdge(6u, 5u));
-  ASSERT_TRUE(IsBackEdge(7u, 4u));
-  EXPECT_FALSE(IsSuspendCheckEdge(7u, 4u));
-}
-
-TEST_F(SuspendCheckEliminationTest, While_While_InvokeInInnerLoopHead) {
-  static const MIRDef mirs[] = {
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge out of outer loop.
-    DEF_INVOKE(5u, Instruction::INVOKE_STATIC, 0u, 0u),  // Invoke in inner loop head.
-    DEF_OTHER1(5u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop.
-    DEF_OTHER0(6u, Instruction::GOTO),                   // Edge back to inner loop head.
-    DEF_OTHER0(7u, Instruction::GOTO),                   // Edge back to outer loop head.
-  };
-
-  PrepareNestedLoopsWhile_While();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(6u, 5u));
-  EXPECT_FALSE(IsSuspendCheckEdge(6u, 5u));
-  ASSERT_TRUE(IsBackEdge(7u, 4u));
-  EXPECT_FALSE(IsSuspendCheckEdge(7u, 4u));
-}
-
-TEST_F(SuspendCheckEliminationTest, While_While_InvokeInInnerLoopBody) {
-  static const MIRDef mirs[] = {
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge out of outer loop.
-    DEF_OTHER1(5u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop.
-    DEF_INVOKE(6u, Instruction::INVOKE_STATIC, 0u, 0u),  // Invoke in inner loop body.
-    DEF_OTHER0(6u, Instruction::GOTO),                   // Edge back to inner loop head.
-    DEF_OTHER0(7u, Instruction::GOTO),                   // Edge back to outer loop head.
-  };
-
-  PrepareNestedLoopsWhile_While();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(6u, 5u));
-  EXPECT_FALSE(IsSuspendCheckEdge(6u, 5u));
-  ASSERT_TRUE(IsBackEdge(7u, 4u));
-  EXPECT_TRUE(IsSuspendCheckEdge(7u, 4u));
-}
-
-TEST_F(SuspendCheckEliminationTest, While_WhileWhile_InvokeInFirstInnerLoopHead) {
-  static const MIRDef mirs[] = {
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge out of outer loop.
-    DEF_INVOKE(5u, Instruction::INVOKE_STATIC, 0u, 0u),  // Invoke in first inner loop head.
-    DEF_OTHER1(5u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop 1.
-    DEF_OTHER0(6u, Instruction::GOTO),                   // Edge back to inner loop head.
-    DEF_OTHER1(7u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop 2.
-    DEF_OTHER0(8u, Instruction::GOTO),                   // Edge back to inner loop 2 head.
-    DEF_OTHER0(9u, Instruction::GOTO),                   // Edge back to outer loop head.
-  };
-
-  PrepareNestedLoopsWhile_WhileWhile();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(6u, 5u));
-  EXPECT_FALSE(IsSuspendCheckEdge(6u, 5u));
-  ASSERT_TRUE(IsBackEdge(8u, 7u));
-  EXPECT_TRUE(IsSuspendCheckEdge(8u, 7u));
-  ASSERT_TRUE(IsBackEdge(9u, 4u));
-  EXPECT_FALSE(IsSuspendCheckEdge(9u, 4u));
-}
-
-TEST_F(SuspendCheckEliminationTest, While_WhileWhile_InvokeInFirstInnerLoopBody) {
-  static const MIRDef mirs[] = {
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge out of outer loop.
-    DEF_OTHER1(5u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop 1.
-    DEF_INVOKE(6u, Instruction::INVOKE_STATIC, 0u, 0u),  // Invoke in first inner loop body.
-    DEF_OTHER0(6u, Instruction::GOTO),                   // Edge back to inner loop head.
-    DEF_OTHER1(7u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop 2.
-    DEF_OTHER0(8u, Instruction::GOTO),                   // Edge back to inner loop 2 head.
-    DEF_OTHER0(9u, Instruction::GOTO),                   // Edge back to outer loop head.
-  };
-
-  PrepareNestedLoopsWhile_WhileWhile();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(6u, 5u));
-  EXPECT_FALSE(IsSuspendCheckEdge(6u, 5u));
-  ASSERT_TRUE(IsBackEdge(8u, 7u));
-  EXPECT_TRUE(IsSuspendCheckEdge(8u, 7u));
-  ASSERT_TRUE(IsBackEdge(9u, 4u));
-  EXPECT_TRUE(IsSuspendCheckEdge(9u, 4u));
-}
-
-TEST_F(SuspendCheckEliminationTest, While_WhileWhile_WithExtraEdge_InvokeInFirstInnerLoopBody) {
-  static const MIRDef mirs[] = {
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge out of outer loop.
-    DEF_OTHER1(5u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop 1.
-    DEF_INVOKE(6u, Instruction::INVOKE_STATIC, 0u, 0u),  // Invoke in first inner loop body.
-    DEF_OTHER0(6u, Instruction::GOTO),                   // Edge back to inner loop head.
-    DEF_OTHER1(7u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop 2.
-    DEF_OTHER0(8u, Instruction::GOTO),                   // Edge back to inner loop 2 head.
-    DEF_OTHER0(9u, Instruction::GOTO),                   // Edge back to outer loop head.
-  };
-
-  PrepareNestedLoopsWhile_WhileWhile_WithExtraEdge();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(6u, 5u));
-  EXPECT_FALSE(IsSuspendCheckEdge(6u, 5u));
-  ASSERT_TRUE(IsBackEdge(8u, 7u));
-  EXPECT_TRUE(IsSuspendCheckEdge(8u, 7u));  // Unaffected by the extra edge.
-  ASSERT_TRUE(IsBackEdge(9u, 4u));
-  EXPECT_TRUE(IsSuspendCheckEdge(9u, 4u));
-}
-
-TEST_F(SuspendCheckEliminationTest, While_WhileWhile_WithExtraEdge_InvokeInSecondInnerLoopHead) {
-  static const MIRDef mirs[] = {
-    DEF_OTHER1(4u, Instruction::IF_NEZ, 1u),             // Edge out of outer loop.
-    DEF_OTHER1(5u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop 1.
-    DEF_OTHER0(6u, Instruction::GOTO),                   // Edge back to inner loop head.
-    DEF_INVOKE(7u, Instruction::INVOKE_STATIC, 0u, 0u),  // Invoke in second inner loop head.
-    DEF_OTHER1(7u, Instruction::IF_NEZ, 2u),             // Edge out of inner loop 2.
-    DEF_OTHER0(8u, Instruction::GOTO),                   // Edge back to inner loop 2 head.
-    DEF_OTHER0(9u, Instruction::GOTO),                   // Edge back to outer loop head.
-  };
-
-  PrepareNestedLoopsWhile_WhileWhile_WithExtraEdge();
-  PrepareMIRs(mirs);
-  PerformSuspendCheckElimination();
-  ASSERT_TRUE(IsBackEdge(6u, 5u));
-  EXPECT_TRUE(IsSuspendCheckEdge(6u, 5u));
-  ASSERT_TRUE(IsBackEdge(8u, 7u));
-  EXPECT_FALSE(IsSuspendCheckEdge(8u, 7u));  // Unaffected by the extra edge.
-  ASSERT_TRUE(IsBackEdge(9u, 4u));
-  EXPECT_FALSE(IsSuspendCheckEdge(9u, 4u));
-}
-
-}  // namespace art
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
deleted file mode 100644
index 16414ef..0000000
--- a/compiler/dex/pass.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_PASS_H_
-#define ART_COMPILER_DEX_PASS_H_
-
-#include <string>
-
-#include "base/logging.h"
-
-namespace art {
-
-// Forward declarations.
-class BasicBlock;
-class Pass;
-
-// Empty Pass Data Class, can be extended by any pass extending the base Pass class.
-class PassDataHolder {
-};
-
-/**
- * @class Pass
- * @brief Base Pass class, can be extended to perform a more defined way of doing the work call.
- */
-class Pass {
- public:
-  explicit Pass(const char* name)
-    : pass_name_(name) {
-  }
-
-  virtual ~Pass() {
-  }
-
-  virtual const char* GetName() const {
-    return pass_name_;
-  }
-
-  /**
-   * @brief Gate for the pass: determines whether to execute the pass or not considering a CompilationUnit
-   * @param data the PassDataHolder.
-   * @return whether or not to execute the pass.
-   */
-  virtual bool Gate(const PassDataHolder* data ATTRIBUTE_UNUSED) const {
-    // Base class says yes.
-    return true;
-  }
-
-  /**
-   * @brief Start of the pass: called before the Worker function.
-   */
-  virtual void Start(PassDataHolder* data ATTRIBUTE_UNUSED) const {
-  }
-
-  /**
-   * @brief End of the pass: called after the WalkBasicBlocks function.
-   */
-  virtual void End(PassDataHolder* data ATTRIBUTE_UNUSED) const {
-  }
-
-  /**
-   * @param data the object containing data necessary for the pass.
-   * @return whether or not there is a change when walking the BasicBlock
-   */
-  virtual bool Worker(PassDataHolder* data ATTRIBUTE_UNUSED) const {
-    // Passes that do all their work in Start() or End() should not allow useless node iteration.
-    LOG(FATAL) << "Unsupported default Worker() used for " << GetName();
-    UNREACHABLE();
-  }
-
- protected:
-  /** @brief The pass name: used for searching for a pass when running a particular pass or debugging. */
-  const char* const pass_name_;
-
- private:
-  // In order to make the all passes not copy-friendly.
-  DISALLOW_COPY_AND_ASSIGN(Pass);
-};
-}  // namespace art
-#endif  // ART_COMPILER_DEX_PASS_H_
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
deleted file mode 100644
index 34a6f63..0000000
--- a/compiler/dex/pass_driver.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_PASS_DRIVER_H_
-#define ART_COMPILER_DEX_PASS_DRIVER_H_
-
-#include <vector>
-
-#include "base/logging.h"
-#include "pass.h"
-#include "pass_manager.h"
-
-namespace art {
-
-class Pass;
-class PassDataHolder;
-class PassDriver;
-class PassManager;
-
-// Empty holder for the constructor.
-class PassDriverDataHolder {
-};
-
-/**
- * @class PassDriver
- * @brief PassDriver is the wrapper around all Pass instances in order to execute them
- */
-class PassDriver {
- public:
-  explicit PassDriver(const PassManager* const pass_manager) : pass_manager_(pass_manager) {
-    pass_list_ = *pass_manager_->GetDefaultPassList();
-    DCHECK(!pass_list_.empty());
-  }
-
-  virtual ~PassDriver() {
-  }
-
-  /**
-   * @brief Insert a Pass: can warn if multiple passes have the same name.
-   */
-  void InsertPass(const Pass* new_pass) {
-    DCHECK(new_pass != nullptr);
-    DCHECK(new_pass->GetName() != nullptr);
-    DCHECK_NE(new_pass->GetName()[0], 0);
-
-    // It is an error to override an existing pass.
-    DCHECK(GetPass(new_pass->GetName()) == nullptr)
-        << "Pass name " << new_pass->GetName() << " already used.";
-    // Now add to the list.
-    pass_list_.push_back(new_pass);
-  }
-
-  /**
-   * @brief Run a pass using the name as key.
-   * @return whether the pass was applied.
-   */
-  virtual bool RunPass(const char* pass_name) {
-    // Paranoid: c_unit cannot be null and we need a pass name.
-    DCHECK(pass_name != nullptr);
-    DCHECK_NE(pass_name[0], 0);
-
-    const Pass* cur_pass = GetPass(pass_name);
-
-    if (cur_pass != nullptr) {
-      return RunPass(cur_pass);
-    }
-
-    // Return false, we did not find the pass.
-    return false;
-  }
-
-  /**
-   * @brief Runs all the passes with the pass_list_.
-   */
-  void Launch() {
-    for (const Pass* cur_pass : pass_list_) {
-      RunPass(cur_pass);
-    }
-  }
-
-  /**
-   * @brief Searches for a particular pass.
-   * @param the name of the pass to be searched for.
-   */
-  const Pass* GetPass(const char* name) const {
-    for (const Pass* cur_pass : pass_list_) {
-      if (strcmp(name, cur_pass->GetName()) == 0) {
-        return cur_pass;
-      }
-    }
-    return nullptr;
-  }
-
-  /**
-   * @brief Run a pass using the Pass itself.
-   * @param time_split do we want a time split request(default: false)?
-   * @return whether the pass was applied.
-   */
-  virtual bool RunPass(const Pass* pass, bool time_split = false) = 0;
-
- protected:
-  /**
-   * @brief Apply a patch: perform start/work/end functions.
-   */
-  virtual void ApplyPass(PassDataHolder* data, const Pass* pass) {
-    pass->Start(data);
-    DispatchPass(pass);
-    pass->End(data);
-  }
-
-  /**
-   * @brief Dispatch a patch.
-   * Gives the ability to add logic when running the patch.
-   */
-  virtual void DispatchPass(const Pass* pass ATTRIBUTE_UNUSED) {
-  }
-
-  /** @brief List of passes: provides the order to execute the passes.
-   *  Passes are owned by pass_manager_. */
-  std::vector<const Pass*> pass_list_;
-
-  const PassManager* const pass_manager_;
-};
-
-}  // namespace art
-#endif  // ART_COMPILER_DEX_PASS_DRIVER_H_
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
deleted file mode 100644
index d0af71c..0000000
--- a/compiler/dex/pass_driver_me.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_PASS_DRIVER_ME_H_
-#define ART_COMPILER_DEX_PASS_DRIVER_ME_H_
-
-#include <cstdlib>
-#include <cstring>
-
-#include "bb_optimizations.h"
-#include "dataflow_iterator.h"
-#include "dataflow_iterator-inl.h"
-#include "dex_flags.h"
-#include "pass_driver.h"
-#include "pass_manager.h"
-#include "pass_me.h"
-#include "safe_map.h"
-
-namespace art {
-
-class PassManager;
-class PassManagerOptions;
-
-class PassDriverME: public PassDriver {
- public:
-  PassDriverME(const PassManager* const pass_manager, CompilationUnit* cu)
-      : PassDriver(pass_manager), pass_me_data_holder_(), dump_cfg_folder_("/sdcard/") {
-        pass_me_data_holder_.bb = nullptr;
-        pass_me_data_holder_.c_unit = cu;
-  }
-
-  ~PassDriverME() {
-  }
-
-  void DispatchPass(const Pass* pass) {
-    VLOG(compiler) << "Dispatching " << pass->GetName();
-    const PassME* me_pass = down_cast<const PassME*>(pass);
-
-    DataFlowAnalysisMode mode = me_pass->GetTraversal();
-
-    switch (mode) {
-      case kPreOrderDFSTraversal:
-        DoWalkBasicBlocks<PreOrderDfsIterator>(&pass_me_data_holder_, me_pass);
-        break;
-      case kRepeatingPreOrderDFSTraversal:
-        DoWalkBasicBlocks<RepeatingPreOrderDfsIterator>(&pass_me_data_holder_, me_pass);
-        break;
-      case kRepeatingPostOrderDFSTraversal:
-        DoWalkBasicBlocks<RepeatingPostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
-        break;
-      case kReversePostOrderDFSTraversal:
-        DoWalkBasicBlocks<ReversePostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
-        break;
-      case kRepeatingReversePostOrderDFSTraversal:
-        DoWalkBasicBlocks<RepeatingReversePostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
-        break;
-      case kPostOrderDOMTraversal:
-        DoWalkBasicBlocks<PostOrderDOMIterator>(&pass_me_data_holder_, me_pass);
-        break;
-      case kTopologicalSortTraversal:
-        DoWalkBasicBlocks<TopologicalSortIterator>(&pass_me_data_holder_, me_pass);
-        break;
-      case kLoopRepeatingTopologicalSortTraversal:
-        DoWalkBasicBlocks<LoopRepeatingTopologicalSortIterator>(&pass_me_data_holder_, me_pass);
-        break;
-      case kAllNodes:
-        DoWalkBasicBlocks<AllNodesIterator>(&pass_me_data_holder_, me_pass);
-        break;
-      case kNoNodes:
-        break;
-      default:
-        LOG(FATAL) << "Iterator mode not handled in dispatcher: " << mode;
-        break;
-    }
-  }
-
-  bool RunPass(const Pass* pass, bool time_split) OVERRIDE {
-    // Paranoid: c_unit and pass cannot be null, and the pass should have a name.
-    DCHECK(pass != nullptr);
-    DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
-    CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
-    DCHECK(c_unit != nullptr);
-
-    // Do we perform a time split
-    if (time_split) {
-      c_unit->NewTimingSplit(pass->GetName());
-    }
-
-    // First, work on determining pass verbosity.
-    bool old_print_pass = c_unit->print_pass;
-    c_unit->print_pass = pass_manager_->GetOptions().GetPrintAllPasses();
-    auto* const options = &pass_manager_->GetOptions();
-    const std::string& print_pass_list = options->GetPrintPassList();
-    if (!print_pass_list.empty() && strstr(print_pass_list.c_str(), pass->GetName()) != nullptr) {
-      c_unit->print_pass = true;
-    }
-
-    // Next, check if there are any overridden settings for the pass that change default
-    // configuration.
-    c_unit->overridden_pass_options.clear();
-    FillOverriddenPassSettings(options, pass->GetName(), c_unit->overridden_pass_options);
-    if (c_unit->print_pass) {
-      for (auto setting_it : c_unit->overridden_pass_options) {
-        LOG(INFO) << "Overridden option \"" << setting_it.first << ":"
-          << setting_it.second << "\" for pass \"" << pass->GetName() << "\"";
-      }
-    }
-
-    // Check the pass gate first.
-    bool should_apply_pass = pass->Gate(&pass_me_data_holder_);
-    if (should_apply_pass) {
-      // Applying the pass: first start, doWork, and end calls.
-      this->ApplyPass(&pass_me_data_holder_, pass);
-
-      bool should_dump = (c_unit->enable_debug & (1 << kDebugDumpCFG)) != 0;
-
-      const std::string& dump_pass_list = pass_manager_->GetOptions().GetDumpPassList();
-      if (!dump_pass_list.empty()) {
-        const bool found = strstr(dump_pass_list.c_str(), pass->GetName());
-        should_dump = should_dump || found;
-      }
-
-      if (should_dump) {
-        // Do we want to log it?
-        if ((c_unit->enable_debug&  (1 << kDebugDumpCFG)) != 0) {
-          // Do we have a pass folder?
-          const PassME* me_pass = (down_cast<const PassME*>(pass));
-          const char* passFolder = me_pass->GetDumpCFGFolder();
-          DCHECK(passFolder != nullptr);
-
-          if (passFolder[0] != 0) {
-            // Create directory prefix.
-            std::string prefix = GetDumpCFGFolder();
-            prefix += passFolder;
-            prefix += "/";
-
-            c_unit->mir_graph->DumpCFG(prefix.c_str(), false);
-          }
-        }
-      }
-    }
-
-    // Before wrapping up with this pass, restore old pass verbosity flag.
-    c_unit->print_pass = old_print_pass;
-
-    // If the pass gate passed, we can declare success.
-    return should_apply_pass;
-  }
-
-  static void PrintPassOptions(PassManager* manager) {
-    for (const auto* pass : *manager->GetDefaultPassList()) {
-      const PassME* me_pass = down_cast<const PassME*>(pass);
-      if (me_pass->HasOptions()) {
-        LOG(INFO) << "Pass options for \"" << me_pass->GetName() << "\" are:";
-        SafeMap<const std::string, const OptionContent> overridden_settings;
-        FillOverriddenPassSettings(&manager->GetOptions(), me_pass->GetName(),
-                                   overridden_settings);
-        me_pass->PrintPassOptions(overridden_settings);
-      }
-    }
-  }
-
-  const char* GetDumpCFGFolder() const {
-    return dump_cfg_folder_;
-  }
-
- protected:
-  /** @brief The data holder that contains data needed for the PassDriverME. */
-  PassMEDataHolder pass_me_data_holder_;
-
-  /** @brief Dump CFG base folder: where is the base folder for dumping CFGs. */
-  const char* dump_cfg_folder_;
-
-  static void DoWalkBasicBlocks(PassMEDataHolder* data, const PassME* pass,
-                                DataflowIterator* iterator) {
-    // Paranoid: Check the iterator before walking the BasicBlocks.
-    DCHECK(iterator != nullptr);
-    bool change = false;
-    for (BasicBlock* bb = iterator->Next(change); bb != nullptr; bb = iterator->Next(change)) {
-      data->bb = bb;
-      change = pass->Worker(data);
-    }
-  }
-
-  template <typename Iterator>
-  inline static void DoWalkBasicBlocks(PassMEDataHolder* data, const PassME* pass) {
-      DCHECK(data != nullptr);
-      CompilationUnit* c_unit = data->c_unit;
-      DCHECK(c_unit != nullptr);
-      Iterator iterator(c_unit->mir_graph.get());
-      DoWalkBasicBlocks(data, pass, &iterator);
-    }
-
-  /**
-   * @brief Fills the settings_to_fill by finding all of the applicable options in the
-   * overridden_pass_options_list_.
-   * @param pass_name The pass name for which to fill settings.
-   * @param settings_to_fill Fills the options to contain the mapping of name of option to the new
-   * configuration.
-   */
-  static void FillOverriddenPassSettings(
-      const PassManagerOptions* options, const char* pass_name,
-      SafeMap<const std::string, const OptionContent>& settings_to_fill) {
-    const std::string& settings = options->GetOverriddenPassOptions();
-    const size_t settings_len = settings.size();
-
-    // Before anything, check if we care about anything right now.
-    if (settings_len == 0) {
-      return;
-    }
-
-    const size_t pass_name_len = strlen(pass_name);
-    const size_t min_setting_size = 4;  // 2 delimiters, 1 setting name, 1 setting
-    size_t search_pos = 0;
-
-    // If there is no room for pass options, exit early.
-    if (settings_len < pass_name_len + min_setting_size) {
-      return;
-    }
-
-    do {
-      search_pos = settings.find(pass_name, search_pos);
-
-      // Check if we found this pass name in rest of string.
-      if (search_pos == std::string::npos) {
-        // No more settings for this pass.
-        break;
-      }
-
-      // The string contains the pass name. Now check that there is
-      // room for the settings: at least one char for setting name,
-      // two chars for two delimiter, and at least one char for setting.
-      if (search_pos + pass_name_len + min_setting_size >= settings_len) {
-        // No more settings for this pass.
-        break;
-      }
-
-      // Update the current search position to not include the pass name.
-      search_pos += pass_name_len;
-
-      // The format must be "PassName:SettingName:#" where # is the setting.
-      // Thus look for the first ":" which must exist.
-      if (settings[search_pos] != ':') {
-        // Missing delimiter right after pass name.
-        continue;
-      } else {
-        search_pos += 1;
-      }
-
-      // Now look for the actual setting by finding the next ":" delimiter.
-      const size_t setting_name_pos = search_pos;
-      size_t setting_pos = settings.find(':', setting_name_pos);
-
-      if (setting_pos == std::string::npos) {
-        // Missing a delimiter that would capture where setting starts.
-        continue;
-      } else if (setting_pos == setting_name_pos) {
-        // Missing setting thus did not move from setting name
-        continue;
-      } else {
-        // Skip the delimiter.
-        setting_pos += 1;
-      }
-
-      // Look for the terminating delimiter which must be a comma.
-      size_t next_configuration_separator = settings.find(',', setting_pos);
-      if (next_configuration_separator == std::string::npos) {
-        next_configuration_separator = settings_len;
-      }
-
-      // Prevent end of string errors.
-      if (next_configuration_separator == setting_pos) {
-          continue;
-      }
-
-      // Get the actual setting itself.
-      std::string setting_string =
-          settings.substr(setting_pos, next_configuration_separator - setting_pos);
-
-      std::string setting_name =
-          settings.substr(setting_name_pos, setting_pos - setting_name_pos - 1);
-
-      // We attempt to convert the option value to integer. Strtoll is being used to
-      // convert because it is exception safe.
-      char* end_ptr = nullptr;
-      const char* setting_ptr = setting_string.c_str();
-      DCHECK(setting_ptr != nullptr);  // Paranoid: setting_ptr must be a valid pointer.
-      int64_t int_value = strtoll(setting_ptr, &end_ptr, 0);
-      DCHECK(end_ptr != nullptr);  // Paranoid: end_ptr must be set by the strtoll call.
-
-      // If strtoll call succeeded, the option is now considered as integer.
-      if (*setting_ptr != '\0' && end_ptr != setting_ptr && *end_ptr == '\0') {
-        settings_to_fill.Put(setting_name, OptionContent(int_value));
-      } else {
-        // Otherwise, it is considered as a string.
-        settings_to_fill.Put(setting_name, OptionContent(setting_string.c_str()));
-      }
-      search_pos = next_configuration_separator;
-    } while (true);
-  }
-};
-}  // namespace art
-#endif  // ART_COMPILER_DEX_PASS_DRIVER_ME_H_
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
deleted file mode 100644
index 375003b..0000000
--- a/compiler/dex/pass_driver_me_opts.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "pass_driver_me_opts.h"
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "bb_optimizations.h"
-#include "dataflow_iterator.h"
-#include "dataflow_iterator-inl.h"
-#include "pass_driver_me_opts.h"
-#include "pass_manager.h"
-#include "post_opt_passes.h"
-
-namespace art {
-
-void PassDriverMEOpts::SetupPasses(PassManager* pass_manager) {
-  /*
-   * Create the pass list. These passes are immutable and are shared across the threads.
-   *
-   * Advantage is that there will be no race conditions here.
-   * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
-   *   - This is not yet an issue: no current pass would require it.
-   */
-  pass_manager->AddPass(new StringChange);
-  pass_manager->AddPass(new CacheFieldLoweringInfo);
-  pass_manager->AddPass(new CacheMethodLoweringInfo);
-  pass_manager->AddPass(new CalculatePredecessors);
-  pass_manager->AddPass(new DFSOrders);
-  pass_manager->AddPass(new ClassInitCheckElimination);
-  pass_manager->AddPass(new SpecialMethodInliner);
-  pass_manager->AddPass(new NullCheckElimination);
-  pass_manager->AddPass(new BBCombine);
-  pass_manager->AddPass(new CodeLayout);
-  pass_manager->AddPass(new GlobalValueNumberingPass);
-  pass_manager->AddPass(new DeadCodeEliminationPass);
-  pass_manager->AddPass(new GlobalValueNumberingCleanupPass);
-  pass_manager->AddPass(new ConstantPropagation);
-  pass_manager->AddPass(new MethodUseCount);
-  pass_manager->AddPass(new BBOptimizations);
-  pass_manager->AddPass(new SuspendCheckElimination);
-}
-
-void PassDriverMEOpts::ApplyPass(PassDataHolder* data, const Pass* pass) {
-  const PassME* const pass_me = down_cast<const PassME*>(pass);
-  DCHECK(pass_me != nullptr);
-  PassMEDataHolder* const pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-  // Set to dirty.
-  pass_me_data_holder->dirty = true;
-  // First call the base class' version.
-  PassDriver::ApplyPass(data, pass);
-  // Now we care about flags.
-  if ((pass_me->GetFlag(kOptimizationBasicBlockChange) == true) ||
-      (pass_me->GetFlag(kOptimizationDefUsesChange) == true)) {
-    // Is it dirty at least?
-    if (pass_me_data_holder->dirty == true) {
-      CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-      c_unit->mir_graph.get()->CalculateBasicBlockInformation(post_opt_pass_manager_);
-    }
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/pass_driver_me_opts.h b/compiler/dex/pass_driver_me_opts.h
deleted file mode 100644
index c8093d0..0000000
--- a/compiler/dex/pass_driver_me_opts.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_PASS_DRIVER_ME_OPTS_H_
-#define ART_COMPILER_DEX_PASS_DRIVER_ME_OPTS_H_
-
-#include "pass_driver_me.h"
-
-namespace art {
-
-// Forward Declarations.
-struct CompilationUnit;
-class Pass;
-class PassDataHolder;
-class PassManager;
-
-class PassDriverMEOpts : public PassDriverME {
- public:
-  PassDriverMEOpts(const PassManager* const manager,
-                   const PassManager* const post_opt_pass_manager,
-                   CompilationUnit* cu)
-      : PassDriverME(manager, cu), post_opt_pass_manager_(post_opt_pass_manager) {
-  }
-
-  ~PassDriverMEOpts() {
-  }
-
-  /**
-   * @brief Write and allocate corresponding passes into the pass manager.
-   */
-  static void SetupPasses(PassManager* pass_manasger);
-
-  /**
-   * @brief Apply a patch: perform start/work/end functions.
-   */
-  virtual void ApplyPass(PassDataHolder* data, const Pass* pass) OVERRIDE;
-
-  const PassManager* const post_opt_pass_manager_;
-};
-
-}  // namespace art
-#endif  // ART_COMPILER_DEX_PASS_DRIVER_ME_OPTS_H_
diff --git a/compiler/dex/pass_driver_me_post_opt.cc b/compiler/dex/pass_driver_me_post_opt.cc
deleted file mode 100644
index b35bc3d..0000000
--- a/compiler/dex/pass_driver_me_post_opt.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "pass_driver_me_post_opt.h"
-
-#include "base/macros.h"
-#include "post_opt_passes.h"
-#include "pass_manager.h"
-
-namespace art {
-
-void PassDriverMEPostOpt::SetupPasses(PassManager* pass_manager) {
-  /*
-   * Create the pass list. These passes are immutable and are shared across the threads.
-   *
-   * Advantage is that there will be no race conditions here.
-   * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
-   *   - This is not yet an issue: no current pass would require it.
-   */
-  // The initial list of passes to be used by the PassDriveMEPostOpt.
-  pass_manager->AddPass(new DFSOrders);
-  pass_manager->AddPass(new BuildDomination);
-  pass_manager->AddPass(new TopologicalSortOrders);
-  pass_manager->AddPass(new InitializeSSATransformation);
-  pass_manager->AddPass(new ClearPhiInstructions);
-  pass_manager->AddPass(new DefBlockMatrix);
-  pass_manager->AddPass(new FindPhiNodeBlocksPass);
-  pass_manager->AddPass(new SSAConversion);
-  pass_manager->AddPass(new PhiNodeOperands);
-  pass_manager->AddPass(new PerformInitRegLocations);
-  pass_manager->AddPass(new TypeInferencePass);
-  pass_manager->AddPass(new FinishSSATransformation);
-}
-
-}  // namespace art
diff --git a/compiler/dex/pass_driver_me_post_opt.h b/compiler/dex/pass_driver_me_post_opt.h
deleted file mode 100644
index 94176db..0000000
--- a/compiler/dex/pass_driver_me_post_opt.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_PASS_DRIVER_ME_POST_OPT_H_
-#define ART_COMPILER_DEX_PASS_DRIVER_ME_POST_OPT_H_
-
-#include "pass_driver_me.h"
-
-namespace art {
-
-// Forward Declarations.
-struct CompilationUnit;
-class Pass;
-class PassDataHolder;
-
-class PassDriverMEPostOpt : public PassDriverME {
- public:
-  PassDriverMEPostOpt(const PassManager* const manager, CompilationUnit* cu)
-      : PassDriverME(manager, cu) {
-  }
-
-  ~PassDriverMEPostOpt() {
-  }
-
-  /**
-   * @brief Write and allocate corresponding passes into the pass manager.
-   */
-  static void SetupPasses(PassManager* pass_manager);
-};
-
-}  // namespace art
-#endif  // ART_COMPILER_DEX_PASS_DRIVER_ME_POST_OPT_H_
diff --git a/compiler/dex/pass_manager.cc b/compiler/dex/pass_manager.cc
deleted file mode 100644
index 6377a6c..0000000
--- a/compiler/dex/pass_manager.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "pass_manager.h"
-
-#include "base/stl_util.h"
-#include "pass_me.h"
-
-namespace art {
-
-PassManager::PassManager(const PassManagerOptions& options) : options_(options) {
-}
-
-PassManager::~PassManager() {
-  STLDeleteElements(&passes_);
-}
-
-void PassManager::CreateDefaultPassList() {
-  default_pass_list_.clear();
-  // Add each pass which isn't disabled into default_pass_list_.
-  for (const auto* pass : passes_) {
-    if (options_.GetDisablePassList().find(pass->GetName()) != std::string::npos) {
-      VLOG(compiler) << "Skipping disabled pass " << pass->GetName();
-    } else {
-      default_pass_list_.push_back(pass);
-    }
-  }
-}
-
-void PassManager::PrintPassNames() const {
-  LOG(INFO) << "Loop Passes are:";
-  for (const Pass* cur_pass : default_pass_list_) {
-    LOG(INFO) << "\t-" << cur_pass->GetName();
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/pass_manager.h b/compiler/dex/pass_manager.h
deleted file mode 100644
index 68e488d..0000000
--- a/compiler/dex/pass_manager.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_PASS_MANAGER_H_
-#define ART_COMPILER_DEX_PASS_MANAGER_H_
-
-#include <string>
-#include <vector>
-
-#include "base/logging.h"
-
-namespace art {
-
-class Pass;
-
-class PassManagerOptions {
- public:
-  PassManagerOptions()
-     : default_print_passes_(false),
-       print_pass_names_(false),
-       print_pass_options_(false) {
-  }
-  explicit PassManagerOptions(const PassManagerOptions&) = default;
-
-  void SetPrintPassNames(bool b) {
-    print_pass_names_ = b;
-  }
-
-  void SetPrintAllPasses() {
-    default_print_passes_ = true;
-  }
-  bool GetPrintAllPasses() const {
-    return default_print_passes_;
-  }
-
-  void SetDisablePassList(const std::string& list) {
-    disable_pass_list_ = list;
-  }
-  const std::string& GetDisablePassList() const {
-    return disable_pass_list_;
-  }
-
-  void SetPrintPassList(const std::string& list) {
-    print_pass_list_ = list;
-  }
-  const std::string& GetPrintPassList() const {
-    return print_pass_list_;
-  }
-
-  void SetDumpPassList(const std::string& list) {
-    dump_pass_list_ = list;
-  }
-  const std::string& GetDumpPassList() const {
-    return dump_pass_list_;
-  }
-
-  /**
-   * @brief Used to set a string that contains the overridden pass options.
-   * @details An overridden pass option means that the pass uses this option
-   * instead of using its default option.
-   * @param s The string passed by user with overridden options. The string is in format
-   * Pass1Name:Pass1Option:Pass1Setting,Pass2Name:Pass2Option::Pass2Setting
-   */
-  void SetOverriddenPassOptions(const std::string& list) {
-    overridden_pass_options_list_ = list;
-  }
-  const std::string& GetOverriddenPassOptions() const {
-    return overridden_pass_options_list_;
-  }
-
-  void SetPrintPassOptions(bool b) {
-    print_pass_options_ = b;
-  }
-  bool GetPrintPassOptions() const {
-    return print_pass_options_;
-  }
-
- private:
-  /** @brief Do we, by default, want to be printing the log messages? */
-  bool default_print_passes_;
-
-  /** @brief What are the passes we want to be printing the log messages? */
-  std::string print_pass_list_;
-
-  /** @brief What are the passes we want to be dumping the CFG? */
-  std::string dump_pass_list_;
-
-  /** @brief String of all options that should be overridden for selected passes */
-  std::string overridden_pass_options_list_;
-
-  /** @brief String of all options that should be overridden for selected passes */
-  std::string disable_pass_list_;
-
-  /** @brief Whether or not we print all the passes when we create the pass manager */
-  bool print_pass_names_;
-
-  /** @brief Whether or not we print all the pass options when we create the pass manager */
-  bool print_pass_options_;
-};
-
-/**
- * @class PassManager
- * @brief Owns passes
- */
-class PassManager {
- public:
-  explicit PassManager(const PassManagerOptions& options);
-  virtual ~PassManager();
-  void CreateDefaultPassList();
-  void AddPass(const Pass* pass) {
-    passes_.push_back(pass);
-  }
-  /**
-   * @brief Print the pass names of all the passes available.
-   */
-  void PrintPassNames() const;
-  const std::vector<const Pass*>* GetDefaultPassList() const {
-    return &default_pass_list_;
-  }
-  const PassManagerOptions& GetOptions() const {
-    return options_;
-  }
-
- private:
-  /** @brief The set of possible passes.  */
-  std::vector<const Pass*> passes_;
-
-  /** @brief The default pass list is used to initialize pass_list_. */
-  std::vector<const Pass*> default_pass_list_;
-
-  /** @brief Pass manager options. */
-  PassManagerOptions options_;
-
-  DISALLOW_COPY_AND_ASSIGN(PassManager);
-};
-}  // namespace art
-#endif  // ART_COMPILER_DEX_PASS_MANAGER_H_
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
deleted file mode 100644
index d3cf393..0000000
--- a/compiler/dex/pass_me.h
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_PASS_ME_H_
-#define ART_COMPILER_DEX_PASS_ME_H_
-
-#include <string>
-
-#include "base/logging.h"
-#include "pass.h"
-#include "compiler_ir.h"
-#include "safe_map.h"
-
-namespace art {
-
-// Forward declarations.
-class BasicBlock;
-struct CompilationUnit;
-
-/**
- * @brief OptimizationFlag is an enumeration to perform certain tasks for a given pass.
- * @details Each enum should be a power of 2 to be correctly used.
- */
-enum OptimizationFlag {
-  kOptimizationBasicBlockChange = 1,  /// @brief Has there been a change to a BasicBlock?
-  kOptimizationDefUsesChange = 2,     /// @brief Has there been a change to a def-use?
-  kLoopStructureChange = 4,           /// @brief Has there been a loop structural change?
-};
-std::ostream& operator<<(std::ostream& os, const OptimizationFlag& rhs);
-
-// Data holder class.
-class PassMEDataHolder: public PassDataHolder {
- public:
-  CompilationUnit* c_unit;
-  BasicBlock* bb;
-  void* data;               /**< @brief Any data the pass wants to use */
-  bool dirty;               /**< @brief Has the pass rendered the CFG dirty, requiring post-opt? */
-};
-
-enum DataFlowAnalysisMode {
-  kAllNodes = 0,                           /// @brief All nodes.
-  kPreOrderDFSTraversal,                   /// @brief Depth-First-Search / Pre-Order.
-  kRepeatingPreOrderDFSTraversal,          /// @brief Depth-First-Search / Repeating Pre-Order.
-  kReversePostOrderDFSTraversal,           /// @brief Depth-First-Search / Reverse Post-Order.
-  kRepeatingPostOrderDFSTraversal,         /// @brief Depth-First-Search / Repeating Post-Order.
-  kRepeatingReversePostOrderDFSTraversal,  /// @brief Depth-First-Search / Repeating Reverse Post-Order.
-  kPostOrderDOMTraversal,                  /// @brief Dominator tree / Post-Order.
-  kTopologicalSortTraversal,               /// @brief Topological Order traversal.
-  kLoopRepeatingTopologicalSortTraversal,  /// @brief Loop-repeating Topological Order traversal.
-  kNoNodes,                                /// @brief Skip BasicBlock traversal.
-};
-std::ostream& operator<<(std::ostream& os, const DataFlowAnalysisMode& rhs);
-
-/**
- * @class Pass
- * @brief Pass is the Pass structure for the optimizations.
- * @details The following structure has the different optimization passes that we are going to do.
- */
-class PassME : public Pass {
- public:
-  explicit PassME(const char* name, DataFlowAnalysisMode type = kAllNodes,
-          unsigned int flags = 0u, const char* dump = "")
-    : Pass(name), traversal_type_(type), flags_(flags), dump_cfg_folder_(dump) {
-  }
-
-  PassME(const char* name, DataFlowAnalysisMode type, const char* dump)
-    : Pass(name), traversal_type_(type), flags_(0), dump_cfg_folder_(dump) {
-  }
-
-  PassME(const char* name, const char* dump)
-    : Pass(name), traversal_type_(kAllNodes), flags_(0), dump_cfg_folder_(dump) {
-  }
-
-  ~PassME() {
-    default_options_.clear();
-  }
-
-  virtual DataFlowAnalysisMode GetTraversal() const {
-    return traversal_type_;
-  }
-
-  /**
-   * @return Returns whether the pass has any configurable options.
-   */
-  bool HasOptions() const {
-    return default_options_.size() != 0;
-  }
-
-  /**
-   * @brief Prints the pass options along with default settings if there are any.
-   * @details The printing is done using LOG(INFO).
-   */
-  void PrintPassDefaultOptions() const {
-    for (const auto& option : default_options_) {
-      LOG(INFO) << "\t" << option.first << ":" << option.second;
-    }
-  }
-
-  /**
-   * @brief Prints the pass options along with either default or overridden setting.
-   * @param overridden_options The overridden settings for this pass.
-   */
-  void PrintPassOptions(SafeMap<const std::string, const OptionContent>& overridden_options) const {
-    // We walk through the default options only to get the pass names. We use GetPassOption to
-    // also consider the overridden ones.
-    for (const auto& option : default_options_) {
-      LOG(INFO) << "\t" << option.first << ":"
-                << GetPassOption(option.first, overridden_options);
-    }
-  }
-
-  /**
-   * @brief Used to obtain the option structure for a pass.
-   * @details Will return the overridden option if it exists or default one otherwise.
-   * @param option_name The name of option whose setting to look for.
-   * @param c_unit The compilation unit currently being handled.
-   * @return Returns the option structure containing the option value.
-  */
-  const OptionContent& GetPassOption(const char* option_name, CompilationUnit* c_unit) const {
-    return GetPassOption(option_name, c_unit->overridden_pass_options);
-  }
-
-  /**
-   * @brief Used to obtain the option for a pass as a string.
-   * @details Will return the overridden option if it exists or default one otherwise.
-   * It will return nullptr if the required option value is not a string.
-   * @param option_name The name of option whose setting to look for.
-   * @param c_unit The compilation unit currently being handled.
-   * @return Returns the overridden option if it exists or the default one otherwise.
-  */
-  const char* GetStringPassOption(const char* option_name, CompilationUnit* c_unit) const {
-    return GetStringPassOption(option_name, c_unit->overridden_pass_options);
-  }
-
-  /**
-    * @brief Used to obtain the pass option value as an integer.
-    * @details Will return the overridden option if it exists or default one otherwise.
-    * It will return 0 if the required option value is not an integer.
-    * @param c_unit The compilation unit currently being handled.
-    * @return Returns the overriden option if it exists or the default one otherwise.
-   */
-  int64_t GetIntegerPassOption(const char* option_name, CompilationUnit* c_unit) const {
-    return GetIntegerPassOption(option_name, c_unit->overridden_pass_options);
-  }
-
-  const char* GetDumpCFGFolder() const {
-    return dump_cfg_folder_;
-  }
-
-  bool GetFlag(OptimizationFlag flag) const {
-    return (flags_ & flag);
-  }
-
- protected:
-  const OptionContent& GetPassOption(const char* option_name,
-        const SafeMap<const std::string, const OptionContent>& overridden_options) const {
-    DCHECK(option_name != nullptr);
-
-    // First check if there are any overridden settings.
-    auto overridden_it = overridden_options.find(std::string(option_name));
-    if (overridden_it != overridden_options.end()) {
-      return overridden_it->second;
-    } else {
-      // Otherwise, there must be a default value for this option name.
-      auto default_it = default_options_.find(option_name);
-      // An invalid option is being requested.
-      if (default_it == default_options_.end()) {
-        LOG(FATAL) << "Fatal: Cannot find an option named \"" << option_name << "\"";
-      }
-
-      return default_it->second;
-    }
-  }
-
-  const char* GetStringPassOption(const char* option_name,
-        const SafeMap<const std::string, const OptionContent>& overridden_options) const {
-    const OptionContent& option_content = GetPassOption(option_name, overridden_options);
-    if (option_content.type != OptionContent::kString) {
-      return nullptr;
-    }
-
-    return option_content.GetString();
-  }
-
-  int64_t GetIntegerPassOption(const char* option_name,
-          const SafeMap<const std::string, const OptionContent>& overridden_options) const {
-    const OptionContent& option_content = GetPassOption(option_name, overridden_options);
-    if (option_content.type != OptionContent::kInteger) {
-      return 0;
-    }
-
-    return option_content.GetInteger();
-  }
-
-  /** @brief Type of traversal: determines the order to execute the pass on the BasicBlocks. */
-  const DataFlowAnalysisMode traversal_type_;
-
-  /** @brief Flags for additional directives: used to determine if a particular
-    * post-optimization pass is necessary. */
-  const unsigned int flags_;
-
-  /** @brief CFG Dump Folder: what sub-folder to use for dumping the CFGs post pass. */
-  const char* const dump_cfg_folder_;
-
-  /**
-   * @brief Contains a map of options with the default settings.
-   * @details The constructor of the specific pass instance should fill this
-   * with default options.
-   * */
-  SafeMap<const char*, const OptionContent> default_options_;
-};
-}  // namespace art
-#endif  // ART_COMPILER_DEX_PASS_ME_H_
diff --git a/compiler/dex/post_opt_passes.cc b/compiler/dex/post_opt_passes.cc
deleted file mode 100644
index 9262440..0000000
--- a/compiler/dex/post_opt_passes.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "post_opt_passes.h"
-
-#include "dataflow_iterator-inl.h"
-
-namespace art {
-
-bool ClearPhiInstructions::Worker(PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-  CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-  DCHECK(c_unit != nullptr);
-  BasicBlock* bb = pass_me_data_holder->bb;
-  DCHECK(bb != nullptr);
-  MIR* mir = bb->first_mir_insn;
-
-  while (mir != nullptr) {
-    MIR* next = mir->next;
-
-    Instruction::Code opcode = mir->dalvikInsn.opcode;
-
-    if (opcode == static_cast<Instruction::Code> (kMirOpPhi)) {
-      bb->RemoveMIR(mir);
-    }
-
-    mir = next;
-  }
-
-  // We do not care in reporting a change or not in the MIR.
-  return false;
-}
-
-void CalculatePredecessors::Start(PassDataHolder* data) const {
-  DCHECK(data != nullptr);
-  CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-  DCHECK(c_unit != nullptr);
-  // First get the MIRGraph here to factorize a bit the code.
-  MIRGraph *mir_graph = c_unit->mir_graph.get();
-
-  // First clear all predecessors.
-  AllNodesIterator first(mir_graph);
-  for (BasicBlock* bb = first.Next(); bb != nullptr; bb = first.Next()) {
-    bb->predecessors.clear();
-  }
-
-  // Now calculate all predecessors.
-  AllNodesIterator second(mir_graph);
-  for (BasicBlock* bb = second.Next(); bb != nullptr; bb = second.Next()) {
-    // We only care about non hidden blocks.
-    if (bb->hidden == true) {
-      continue;
-    }
-
-    // Create iterator for visiting children.
-    ChildBlockIterator child_iter(bb, mir_graph);
-
-    // Now iterate through the children to set the predecessor bits.
-    for (BasicBlock* child = child_iter.Next(); child != nullptr; child = child_iter.Next()) {
-      child->predecessors.push_back(bb->id);
-    }
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/post_opt_passes.h b/compiler/dex/post_opt_passes.h
deleted file mode 100644
index e9fa0eb..0000000
--- a/compiler/dex/post_opt_passes.h
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_POST_OPT_PASSES_H_
-#define ART_COMPILER_DEX_POST_OPT_PASSES_H_
-
-#include "base/casts.h"
-#include "base/logging.h"
-#include "compiler_ir.h"
-#include "dex_flags.h"
-#include "mir_graph.h"
-#include "pass_me.h"
-
-namespace art {
-
-/**
- * @class PassMEMirSsaRep
- * @brief Convenience class for passes that check MIRGraph::MirSsaRepUpToDate().
- */
-class PassMEMirSsaRep : public PassME {
- public:
-  PassMEMirSsaRep(const char* name, DataFlowAnalysisMode type = kAllNodes)
-      : PassME(name, type) {
-  }
-
-  bool Gate(const PassDataHolder* data) const OVERRIDE {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return !c_unit->mir_graph->MirSsaRepUpToDate();
-  }
-};
-
-/**
- * @class InitializeSSATransformation
- * @brief There is some data that needs to be initialized before performing
- * the post optimization passes.
- */
-class InitializeSSATransformation : public PassMEMirSsaRep {
- public:
-  InitializeSSATransformation() : PassMEMirSsaRep("InitializeSSATransformation", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    // New blocks may have been inserted so the first thing we do is ensure that
-    // the c_unit's number of blocks matches the actual count of basic blocks.
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->SSATransformationStart();
-    c_unit->mir_graph->CompilerInitializeSSAConversion();
-  }
-};
-
-/**
- * @class ClearPhiInformation
- * @brief Clear the PHI nodes from the CFG.
- */
-class ClearPhiInstructions : public PassMEMirSsaRep {
- public:
-  ClearPhiInstructions() : PassMEMirSsaRep("ClearPhiInstructions") {
-  }
-
-  bool Worker(PassDataHolder* data) const;
-};
-
-/**
- * @class CalculatePredecessors
- * @brief Calculate the predecessor BitVector of each Basicblock.
- */
-class CalculatePredecessors : public PassME {
- public:
-  CalculatePredecessors() : PassME("CalculatePredecessors", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const;
-};
-
-/**
- * @class DFSOrders
- * @brief Compute the DFS order of the MIR graph
- */
-class DFSOrders : public PassME {
- public:
-  DFSOrders() : PassME("DFSOrders", kNoNodes) {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return !c_unit->mir_graph->DfsOrdersUpToDate();
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph.get()->ComputeDFSOrders();
-  }
-};
-
-/**
- * @class BuildDomination
- * @brief Build the domination information of the MIR Graph
- */
-class BuildDomination : public PassME {
- public:
-  BuildDomination() : PassME("BuildDomination", kNoNodes) {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return !c_unit->mir_graph->DominationUpToDate();
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->ComputeDominators();
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    // Verify the dataflow information after the pass.
-    if (c_unit->enable_debug & (1 << kDebugVerifyDataflow)) {
-      c_unit->mir_graph->VerifyDataflow();
-    }
-  }
-};
-
-/**
- * @class TopologicalSortOrders
- * @brief Compute the topological sort order of the MIR graph
- */
-class TopologicalSortOrders : public PassME {
- public:
-  TopologicalSortOrders() : PassME("TopologicalSortOrders", kNoNodes) {
-  }
-
-  bool Gate(const PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    return !c_unit->mir_graph->TopologicalOrderUpToDate();
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph.get()->ComputeTopologicalSortOrder();
-  }
-};
-
-/**
- * @class DefBlockMatrix
- * @brief Calculate the matrix of definition per basic block
- */
-class DefBlockMatrix : public PassMEMirSsaRep {
- public:
-  DefBlockMatrix() : PassMEMirSsaRep("DefBlockMatrix", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph.get()->ComputeDefBlockMatrix();
-  }
-};
-
-/**
- * @class FindPhiNodeBlocksPass
- * @brief Pass to find out where we need to insert the phi nodes for the SSA conversion.
- */
-class FindPhiNodeBlocksPass : public PassMEMirSsaRep {
- public:
-  FindPhiNodeBlocksPass() : PassMEMirSsaRep("FindPhiNodeBlocks", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph.get()->FindPhiNodeBlocks();
-  }
-};
-
-/**
- * @class SSAConversion
- * @brief Pass for SSA conversion of MIRs
- */
-class SSAConversion : public PassMEMirSsaRep {
- public:
-  SSAConversion() : PassMEMirSsaRep("SSAConversion", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    MIRGraph *mir_graph = c_unit->mir_graph.get();
-    mir_graph->ClearAllVisitedFlags();
-    mir_graph->DoDFSPreOrderSSARename(mir_graph->GetEntryBlock());
-  }
-};
-
-/**
- * @class PhiNodeOperands
- * @brief Pass to insert the Phi node operands to basic blocks
- */
-class PhiNodeOperands : public PassMEMirSsaRep {
- public:
-  PhiNodeOperands() : PassMEMirSsaRep("PhiNodeOperands", kPreOrderDFSTraversal) {
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = down_cast<PassMEDataHolder*>(data)->bb;
-    DCHECK(bb != nullptr);
-    c_unit->mir_graph->InsertPhiNodeOperands(bb);
-    // No need of repeating, so just return false.
-    return false;
-  }
-};
-
-/**
- * @class InitRegLocations
- * @brief Initialize Register Locations.
- */
-class PerformInitRegLocations : public PassMEMirSsaRep {
- public:
-  PerformInitRegLocations() : PassMEMirSsaRep("PerformInitRegLocation", kNoNodes) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->InitRegLocations();
-  }
-};
-
-/**
- * @class TypeInferencePass
- * @brief Type inference pass.
- */
-class TypeInferencePass : public PassMEMirSsaRep {
- public:
-  TypeInferencePass() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
-  }
-
-  void Start(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph->InferTypesStart();
-  }
-
-  bool Worker(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
-    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
-    DCHECK(c_unit != nullptr);
-    BasicBlock* bb = pass_me_data_holder->bb;
-    DCHECK(bb != nullptr);
-    return c_unit->mir_graph->InferTypes(bb);
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph.get()->InferTypesEnd();
-  }
-};
-
-/**
- * @class FinishSSATransformation
- * @brief There is some data that needs to be freed after performing the post optimization passes.
- */
-class FinishSSATransformation : public PassMEMirSsaRep {
- public:
-  FinishSSATransformation() : PassMEMirSsaRep("FinishSSATransformation", kNoNodes) {
-  }
-
-  void End(PassDataHolder* data) const {
-    DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
-    DCHECK(c_unit != nullptr);
-    c_unit->mir_graph.get()->SSATransformationEnd();
-  }
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_POST_OPT_PASSES_H_
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
deleted file mode 100644
index 9717459..0000000
--- a/compiler/dex/quick/arm/arm_lir.h
+++ /dev/null
@@ -1,605 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_ARM_ARM_LIR_H_
-#define ART_COMPILER_DEX_QUICK_ARM_ARM_LIR_H_
-
-#include "dex/compiler_enums.h"
-#include "dex/reg_location.h"
-#include "dex/reg_storage.h"
-
-namespace art {
-
-/*
- * Runtime register usage conventions.
- *
- * r0-r3: Argument registers in both Dalvik and C/C++ conventions.
- *        However, for Dalvik->Dalvik calls we'll pass the target's Method*
- *        pointer in r0 as a hidden arg0. Otherwise used as codegen scratch
- *        registers.
- * r0-r1: As in C/C++ r0 is 32-bit return register and r0/r1 is 64-bit
- * r4   : If ARM_R4_SUSPEND_FLAG is set then reserved as a suspend check/debugger
- *        assist flag, otherwise a callee save promotion target.
- * r5   : Callee save (promotion target)
- * r6   : Callee save (promotion target)
- * r7   : Callee save (promotion target)
- * r8   : Callee save (promotion target)
- * r9   : (rARM_SELF) is reserved (pointer to thread-local storage)
- * r10  : Callee save (promotion target)
- * r11  : Callee save (promotion target)
- * r12  : Scratch, may be trashed by linkage stubs
- * r13  : (sp) is reserved
- * r14  : (lr) is reserved
- * r15  : (pc) is reserved
- *
- * 5 core temps that codegen can use (r0, r1, r2, r3, r12)
- * 7 core registers that can be used for promotion
- *
- * Floating pointer registers
- * s0-s31
- * d0-d15, where d0={s0,s1}, d1={s2,s3}, ... , d15={s30,s31}
- *
- * s16-s31 (d8-d15) preserved across C calls
- * s0-s15 (d0-d7) trashed across C calls
- *
- * s0-s15/d0-d7 used as codegen temp/scratch
- * s16-s31/d8-d31 can be used for promotion.
- *
- * Calling convention
- *     o On a call to a Dalvik method, pass target's Method* in r0
- *     o r1-r3 will be used for up to the first 3 words of arguments
- *     o Arguments past the first 3 words will be placed in appropriate
- *       out slots by the caller.
- *     o If a 64-bit argument would span the register/memory argument
- *       boundary, it will instead be fully passed in the frame.
- *     o Maintain a 16-byte stack alignment
- *
- *  Stack frame diagram (stack grows down, higher addresses at top):
- *
- * +------------------------+
- * | IN[ins-1]              |  {Note: resides in caller's frame}
- * |       .                |
- * | IN[0]                  |
- * | caller's Method*       |
- * +========================+  {Note: start of callee's frame}
- * | spill region           |  {variable sized - will include lr if non-leaf.}
- * +------------------------+
- * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
- * +------------------------+
- * | V[locals-1]            |
- * | V[locals-2]            |
- * |      .                 |
- * |      .                 |
- * | V[1]                   |
- * | V[0]                   |
- * +------------------------+
- * |  0 to 3 words padding  |
- * +------------------------+
- * | OUT[outs-1]            |
- * | OUT[outs-2]            |
- * |       .                |
- * | OUT[0]                 |
- * | cur_method*            | <<== sp w/ 16-byte alignment
- * +========================+
- */
-
-// First FP callee save.
-#define ARM_FP_CALLEE_SAVE_BASE 16
-// Flag for using R4 to do suspend check
-// #define ARM_R4_SUSPEND_FLAG
-
-enum ArmResourceEncodingPos {
-  kArmGPReg0   = 0,
-  kArmRegSP    = 13,
-  kArmRegLR    = 14,
-  kArmRegPC    = 15,
-  kArmFPReg0   = 16,
-  kArmFPReg16  = 32,
-  kArmRegEnd   = 48,
-};
-
-enum ArmNativeRegisterPool {  // private marker to avoid generate-operator-out.py from processing.
-  r0           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  0,
-  r1           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  1,
-  r2           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  2,
-  r3           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  3,
-#ifdef ARM_R4_SUSPEND_FLAG
-  rARM_SUSPEND = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  4,
-#else
-  r4           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  4,
-#endif
-  r5           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  5,
-  r6           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  6,
-  r7           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  7,
-  r8           = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  8,
-  rARM_SELF    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  9,
-  r10          = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
-  r11          = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
-  r12          = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
-  r13sp        = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
-  rARM_SP      = r13sp,
-  r14lr        = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
-  rARM_LR      = r14lr,
-  r15pc        = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
-  rARM_PC      = r15pc,
-
-  fr0          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  0,
-  fr1          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  1,
-  fr2          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  2,
-  fr3          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  3,
-  fr4          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  4,
-  fr5          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  5,
-  fr6          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  6,
-  fr7          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  7,
-  fr8          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  8,
-  fr9          = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  9,
-  fr10         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
-  fr11         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
-  fr12         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
-  fr13         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
-  fr14         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
-  fr15         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
-  fr16         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
-  fr17         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
-  fr18         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
-  fr19         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
-  fr20         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
-  fr21         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
-  fr22         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
-  fr23         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
-  fr24         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
-  fr25         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
-  fr26         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
-  fr27         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
-  fr28         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
-  fr29         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
-  fr30         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
-  fr31         = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
-
-  dr0          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  0,
-  dr1          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  1,
-  dr2          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  2,
-  dr3          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  3,
-  dr4          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  4,
-  dr5          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  5,
-  dr6          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  6,
-  dr7          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  7,
-  dr8          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  8,
-  dr9          = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  9,
-  dr10         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
-  dr11         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
-  dr12         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
-  dr13         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
-  dr14         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
-  dr15         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
-#if 0
-  // Enable when def/use and runtime able to handle these.
-  dr16         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
-  dr17         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
-  dr18         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
-  dr19         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
-  dr20         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
-  dr21         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
-  dr22         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
-  dr23         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
-  dr24         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
-  dr25         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
-  dr26         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
-  dr27         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
-  dr28         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
-  dr29         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
-  dr30         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
-  dr31         = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
-#endif
-};
-
-constexpr RegStorage rs_r0(RegStorage::kValid | r0);
-constexpr RegStorage rs_r1(RegStorage::kValid | r1);
-constexpr RegStorage rs_r2(RegStorage::kValid | r2);
-constexpr RegStorage rs_r3(RegStorage::kValid | r3);
-#ifdef ARM_R4_SUSPEND_FLAG
-constexpr RegStorage rs_rARM_SUSPEND(RegStorage::kValid | rARM_SUSPEND);
-#else
-constexpr RegStorage rs_r4(RegStorage::kValid | r4);
-#endif
-constexpr RegStorage rs_r5(RegStorage::kValid | r5);
-constexpr RegStorage rs_r6(RegStorage::kValid | r6);
-constexpr RegStorage rs_r7(RegStorage::kValid | r7);
-constexpr RegStorage rs_r8(RegStorage::kValid | r8);
-constexpr RegStorage rs_rARM_SELF(RegStorage::kValid | rARM_SELF);
-constexpr RegStorage rs_r10(RegStorage::kValid | r10);
-constexpr RegStorage rs_r11(RegStorage::kValid | r11);
-constexpr RegStorage rs_r12(RegStorage::kValid | r12);
-constexpr RegStorage rs_r13sp(RegStorage::kValid | r13sp);
-constexpr RegStorage rs_rARM_SP(RegStorage::kValid | rARM_SP);
-constexpr RegStorage rs_r14lr(RegStorage::kValid | r14lr);
-constexpr RegStorage rs_rARM_LR(RegStorage::kValid | rARM_LR);
-constexpr RegStorage rs_r15pc(RegStorage::kValid | r15pc);
-constexpr RegStorage rs_rARM_PC(RegStorage::kValid | rARM_PC);
-constexpr RegStorage rs_invalid(RegStorage::kInvalid);
-
-constexpr RegStorage rs_fr0(RegStorage::kValid | fr0);
-constexpr RegStorage rs_fr1(RegStorage::kValid | fr1);
-constexpr RegStorage rs_fr2(RegStorage::kValid | fr2);
-constexpr RegStorage rs_fr3(RegStorage::kValid | fr3);
-constexpr RegStorage rs_fr4(RegStorage::kValid | fr4);
-constexpr RegStorage rs_fr5(RegStorage::kValid | fr5);
-constexpr RegStorage rs_fr6(RegStorage::kValid | fr6);
-constexpr RegStorage rs_fr7(RegStorage::kValid | fr7);
-constexpr RegStorage rs_fr8(RegStorage::kValid | fr8);
-constexpr RegStorage rs_fr9(RegStorage::kValid | fr9);
-constexpr RegStorage rs_fr10(RegStorage::kValid | fr10);
-constexpr RegStorage rs_fr11(RegStorage::kValid | fr11);
-constexpr RegStorage rs_fr12(RegStorage::kValid | fr12);
-constexpr RegStorage rs_fr13(RegStorage::kValid | fr13);
-constexpr RegStorage rs_fr14(RegStorage::kValid | fr14);
-constexpr RegStorage rs_fr15(RegStorage::kValid | fr15);
-constexpr RegStorage rs_fr16(RegStorage::kValid | fr16);
-constexpr RegStorage rs_fr17(RegStorage::kValid | fr17);
-constexpr RegStorage rs_fr18(RegStorage::kValid | fr18);
-constexpr RegStorage rs_fr19(RegStorage::kValid | fr19);
-constexpr RegStorage rs_fr20(RegStorage::kValid | fr20);
-constexpr RegStorage rs_fr21(RegStorage::kValid | fr21);
-constexpr RegStorage rs_fr22(RegStorage::kValid | fr22);
-constexpr RegStorage rs_fr23(RegStorage::kValid | fr23);
-constexpr RegStorage rs_fr24(RegStorage::kValid | fr24);
-constexpr RegStorage rs_fr25(RegStorage::kValid | fr25);
-constexpr RegStorage rs_fr26(RegStorage::kValid | fr26);
-constexpr RegStorage rs_fr27(RegStorage::kValid | fr27);
-constexpr RegStorage rs_fr28(RegStorage::kValid | fr28);
-constexpr RegStorage rs_fr29(RegStorage::kValid | fr29);
-constexpr RegStorage rs_fr30(RegStorage::kValid | fr30);
-constexpr RegStorage rs_fr31(RegStorage::kValid | fr31);
-
-constexpr RegStorage rs_dr0(RegStorage::kValid | dr0);
-constexpr RegStorage rs_dr1(RegStorage::kValid | dr1);
-constexpr RegStorage rs_dr2(RegStorage::kValid | dr2);
-constexpr RegStorage rs_dr3(RegStorage::kValid | dr3);
-constexpr RegStorage rs_dr4(RegStorage::kValid | dr4);
-constexpr RegStorage rs_dr5(RegStorage::kValid | dr5);
-constexpr RegStorage rs_dr6(RegStorage::kValid | dr6);
-constexpr RegStorage rs_dr7(RegStorage::kValid | dr7);
-constexpr RegStorage rs_dr8(RegStorage::kValid | dr8);
-constexpr RegStorage rs_dr9(RegStorage::kValid | dr9);
-constexpr RegStorage rs_dr10(RegStorage::kValid | dr10);
-constexpr RegStorage rs_dr11(RegStorage::kValid | dr11);
-constexpr RegStorage rs_dr12(RegStorage::kValid | dr12);
-constexpr RegStorage rs_dr13(RegStorage::kValid | dr13);
-constexpr RegStorage rs_dr14(RegStorage::kValid | dr14);
-constexpr RegStorage rs_dr15(RegStorage::kValid | dr15);
-#if 0
-constexpr RegStorage rs_dr16(RegStorage::kValid | dr16);
-constexpr RegStorage rs_dr17(RegStorage::kValid | dr17);
-constexpr RegStorage rs_dr18(RegStorage::kValid | dr18);
-constexpr RegStorage rs_dr19(RegStorage::kValid | dr19);
-constexpr RegStorage rs_dr20(RegStorage::kValid | dr20);
-constexpr RegStorage rs_dr21(RegStorage::kValid | dr21);
-constexpr RegStorage rs_dr22(RegStorage::kValid | dr22);
-constexpr RegStorage rs_dr23(RegStorage::kValid | dr23);
-constexpr RegStorage rs_dr24(RegStorage::kValid | dr24);
-constexpr RegStorage rs_dr25(RegStorage::kValid | dr25);
-constexpr RegStorage rs_dr26(RegStorage::kValid | dr26);
-constexpr RegStorage rs_dr27(RegStorage::kValid | dr27);
-constexpr RegStorage rs_dr28(RegStorage::kValid | dr28);
-constexpr RegStorage rs_dr29(RegStorage::kValid | dr29);
-constexpr RegStorage rs_dr30(RegStorage::kValid | dr30);
-constexpr RegStorage rs_dr31(RegStorage::kValid | dr31);
-#endif
-
-// RegisterLocation templates return values (r0, r0/r1, s0, or d0).
-// Note: The return locations are shared between quick code and quick helper. This follows quick
-// ABI. Quick helper assembly routine needs to handle the ABI differences.
-const RegLocation arm_loc_c_return =
-    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r0, INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_wide =
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
-     RegStorage::MakeRegPair(rs_r0, rs_r1), INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_float = kArm32QuickCodeUseSoftFloat
-    ? arm_loc_c_return
-    : RegLocation({kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, rs_fr0, INVALID_SREG, INVALID_SREG});
-const RegLocation arm_loc_c_return_double = kArm32QuickCodeUseSoftFloat
-    ? arm_loc_c_return_wide
-    : RegLocation({kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, rs_dr0, INVALID_SREG, INVALID_SREG});
-
-enum ArmShiftEncodings {
-  kArmLsl = 0x0,
-  kArmLsr = 0x1,
-  kArmAsr = 0x2,
-  kArmRor = 0x3
-};
-
-/*
- * The following enum defines the list of supported Thumb instructions by the
- * assembler. Their corresponding EncodingMap positions will be defined in
- * Assemble.cc.
- */
-enum ArmOpcode {
-  kArmFirst = 0,
-  kArm16BitData = kArmFirst,  // DATA   [0] rd[15..0].
-  kThumbAdcRR,       // adc   [0100000101] rm[5..3] rd[2..0].
-  kThumbAddRRI3,     // add(1)  [0001110] imm_3[8..6] rn[5..3] rd[2..0].
-  kThumbAddRI8,      // add(2)  [00110] rd[10..8] imm_8[7..0].
-  kThumbAddRRR,      // add(3)  [0001100] rm[8..6] rn[5..3] rd[2..0].
-  kThumbAddRRLH,     // add(4)  [01000100] H12[01] rm[5..3] rd[2..0].
-  kThumbAddRRHL,     // add(4)  [01001000] H12[10] rm[5..3] rd[2..0].
-  kThumbAddRRHH,     // add(4)  [01001100] H12[11] rm[5..3] rd[2..0].
-  kThumbAddPcRel,    // add(5)  [10100] rd[10..8] imm_8[7..0].
-  kThumbAddSpRel,    // add(6)  [10101] rd[10..8] imm_8[7..0].
-  kThumbAddSpI7,     // add(7)  [101100000] imm_7[6..0].
-  kThumbAndRR,       // and   [0100000000] rm[5..3] rd[2..0].
-  kThumbAsrRRI5,     // asr(1)  [00010] imm_5[10..6] rm[5..3] rd[2..0].
-  kThumbAsrRR,       // asr(2)  [0100000100] rs[5..3] rd[2..0].
-  kThumbBCond,       // b(1)  [1101] cond[11..8] offset_8[7..0].
-  kThumbBUncond,     // b(2)  [11100] offset_11[10..0].
-  kThumbBicRR,       // bic   [0100001110] rm[5..3] rd[2..0].
-  kThumbBkpt,        // bkpt  [10111110] imm_8[7..0].
-  kThumbBlx1,        // blx(1)  [111] H[10] offset_11[10..0].
-  kThumbBlx2,        // blx(1)  [111] H[01] offset_11[10..0].
-  kThumbBl1,         // blx(1)  [111] H[10] offset_11[10..0].
-  kThumbBl2,         // blx(1)  [111] H[11] offset_11[10..0].
-  kThumbBlxR,        // blx(2)  [010001111] rm[6..3] [000].
-  kThumbBx,          // bx    [010001110] H2[6..6] rm[5..3] SBZ[000].
-  kThumbCmnRR,       // cmn   [0100001011] rm[5..3] rd[2..0].
-  kThumbCmpRI8,      // cmp(1)  [00101] rn[10..8] imm_8[7..0].
-  kThumbCmpRR,       // cmp(2)  [0100001010] rm[5..3] rd[2..0].
-  kThumbCmpLH,       // cmp(3)  [01000101] H12[01] rm[5..3] rd[2..0].
-  kThumbCmpHL,       // cmp(3)  [01000110] H12[10] rm[5..3] rd[2..0].
-  kThumbCmpHH,       // cmp(3)  [01000111] H12[11] rm[5..3] rd[2..0].
-  kThumbEorRR,       // eor   [0100000001] rm[5..3] rd[2..0].
-  kThumbLdmia,       // ldmia   [11001] rn[10..8] reglist [7..0].
-  kThumbLdrRRI5,     // ldr(1)  [01101] imm_5[10..6] rn[5..3] rd[2..0].
-  kThumbLdrRRR,      // ldr(2)  [0101100] rm[8..6] rn[5..3] rd[2..0].
-  kThumbLdrPcRel,    // ldr(3)  [01001] rd[10..8] imm_8[7..0].
-  kThumbLdrSpRel,    // ldr(4)  [10011] rd[10..8] imm_8[7..0].
-  kThumbLdrbRRI5,    // ldrb(1) [01111] imm_5[10..6] rn[5..3] rd[2..0].
-  kThumbLdrbRRR,     // ldrb(2) [0101110] rm[8..6] rn[5..3] rd[2..0].
-  kThumbLdrhRRI5,    // ldrh(1) [10001] imm_5[10..6] rn[5..3] rd[2..0].
-  kThumbLdrhRRR,     // ldrh(2) [0101101] rm[8..6] rn[5..3] rd[2..0].
-  kThumbLdrsbRRR,    // ldrsb   [0101011] rm[8..6] rn[5..3] rd[2..0].
-  kThumbLdrshRRR,    // ldrsh   [0101111] rm[8..6] rn[5..3] rd[2..0].
-  kThumbLslRRI5,     // lsl(1)  [00000] imm_5[10..6] rm[5..3] rd[2..0].
-  kThumbLslRR,       // lsl(2)  [0100000010] rs[5..3] rd[2..0].
-  kThumbLsrRRI5,     // lsr(1)  [00001] imm_5[10..6] rm[5..3] rd[2..0].
-  kThumbLsrRR,       // lsr(2)  [0100000011] rs[5..3] rd[2..0].
-  kThumbMovImm,      // mov(1)  [00100] rd[10..8] imm_8[7..0].
-  kThumbMovRR,       // mov(2)  [0001110000] rn[5..3] rd[2..0].
-  kThumbMovRR_H2H,   // mov(3)  [01000111] H12[11] rm[5..3] rd[2..0].
-  kThumbMovRR_H2L,   // mov(3)  [01000110] H12[01] rm[5..3] rd[2..0].
-  kThumbMovRR_L2H,   // mov(3)  [01000101] H12[10] rm[5..3] rd[2..0].
-  kThumbMul,         // mul   [0100001101] rm[5..3] rd[2..0].
-  kThumbMvn,         // mvn   [0100001111] rm[5..3] rd[2..0].
-  kThumbNeg,         // neg   [0100001001] rm[5..3] rd[2..0].
-  kThumbOrr,         // orr   [0100001100] rm[5..3] rd[2..0].
-  kThumbPop,         // pop   [1011110] r[8..8] rl[7..0].
-  kThumbPush,        // push  [1011010] r[8..8] rl[7..0].
-  kThumbRev,         // rev   [1011101000] rm[5..3] rd[2..0]
-  kThumbRevsh,       // revsh   [1011101011] rm[5..3] rd[2..0]
-  kThumbRorRR,       // ror   [0100000111] rs[5..3] rd[2..0].
-  kThumbSbc,         // sbc   [0100000110] rm[5..3] rd[2..0].
-  kThumbStmia,       // stmia   [11000] rn[10..8] reglist [7.. 0].
-  kThumbStrRRI5,     // str(1)  [01100] imm_5[10..6] rn[5..3] rd[2..0].
-  kThumbStrRRR,      // str(2)  [0101000] rm[8..6] rn[5..3] rd[2..0].
-  kThumbStrSpRel,    // str(3)  [10010] rd[10..8] imm_8[7..0].
-  kThumbStrbRRI5,    // strb(1) [01110] imm_5[10..6] rn[5..3] rd[2..0].
-  kThumbStrbRRR,     // strb(2) [0101010] rm[8..6] rn[5..3] rd[2..0].
-  kThumbStrhRRI5,    // strh(1) [10000] imm_5[10..6] rn[5..3] rd[2..0].
-  kThumbStrhRRR,     // strh(2) [0101001] rm[8..6] rn[5..3] rd[2..0].
-  kThumbSubRRI3,     // sub(1)  [0001111] imm_3[8..6] rn[5..3] rd[2..0]*/
-  kThumbSubRI8,      // sub(2)  [00111] rd[10..8] imm_8[7..0].
-  kThumbSubRRR,      // sub(3)  [0001101] rm[8..6] rn[5..3] rd[2..0].
-  kThumbSubSpI7,     // sub(4)  [101100001] imm_7[6..0].
-  kThumbSwi,         // swi   [11011111] imm_8[7..0].
-  kThumbTst,         // tst   [0100001000] rm[5..3] rn[2..0].
-  kThumb2Vldrs,      // vldr low  sx [111011011001] rn[19..16] rd[15-12] [1010] imm_8[7..0].
-  kThumb2Vldrd,      // vldr low  dx [111011011001] rn[19..16] rd[15-12] [1011] imm_8[7..0].
-  kThumb2Vmuls,      // vmul vd, vn, vm [111011100010] rn[19..16] rd[15-12] [10100000] rm[3..0].
-  kThumb2Vmuld,      // vmul vd, vn, vm [111011100010] rn[19..16] rd[15-12] [10110000] rm[3..0].
-  kThumb2Vstrs,      // vstr low  sx [111011011000] rn[19..16] rd[15-12] [1010] imm_8[7..0].
-  kThumb2Vstrd,      // vstr low  dx [111011011000] rn[19..16] rd[15-12] [1011] imm_8[7..0].
-  kThumb2Vsubs,      // vsub vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10100040] rm[3..0].
-  kThumb2Vsubd,      // vsub vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10110040] rm[3..0].
-  kThumb2Vadds,      // vadd vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10100000] rm[3..0].
-  kThumb2Vaddd,      // vadd vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10110000] rm[3..0].
-  kThumb2Vdivs,      // vdiv vd, vn, vm [111011101000] rn[19..16] rd[15-12] [10100000] rm[3..0].
-  kThumb2Vdivd,      // vdiv vd, vn, vm [111011101000] rn[19..16] rd[15-12] [10110000] rm[3..0].
-  kThumb2VmlaF64,    // vmla.F64 vd, vn, vm [111011100000] vn[19..16] vd[15..12] [10110000] vm[3..0].
-  kThumb2VcvtIF,     // vcvt.F32.S32 vd, vm [1110111010111000] vd[15..12] [10101100] vm[3..0].
-  kThumb2VcvtFI,     // vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12] [10101100] vm[3..0].
-  kThumb2VcvtDI,     // vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12] [10111100] vm[3..0].
-  kThumb2VcvtFd,     // vcvt.F64.F32 vd, vm [1110111010110111] vd[15..12] [10101100] vm[3..0].
-  kThumb2VcvtDF,     // vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12] [10111100] vm[3..0].
-  kThumb2VcvtF64S32,  // vcvt.F64.S32 vd, vm [1110111010111000] vd[15..12] [10111100] vm[3..0].
-  kThumb2VcvtF64U32,  // vcvt.F64.U32 vd, vm [1110111010111000] vd[15..12] [10110100] vm[3..0].
-  kThumb2Vsqrts,     // vsqrt.f32 vd, vm [1110111010110001] vd[15..12] [10101100] vm[3..0].
-  kThumb2Vsqrtd,     // vsqrt.f64 vd, vm [1110111010110001] vd[15..12] [10111100] vm[3..0].
-  kThumb2MovI8M,     // mov(T2) rd, #<const> [11110] i [00001001111] imm3 rd[11..8] imm8.
-  kThumb2MovImm16,   // mov(T3) rd, #<const> [11110] i [0010100] imm4 [0] imm3 rd[11..8] imm8.
-  kThumb2StrRRI12,   // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0].
-  kThumb2LdrRRI12,   // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0].
-  kThumb2StrRRI8Predec,  // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0].
-  kThumb2LdrRRI8Predec,  // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0].
-  kThumb2Cbnz,       // cbnz rd,<label> [101110] i [1] imm5[7..3] rn[2..0].
-  kThumb2Cbz,        // cbn rd,<label> [101100] i [1] imm5[7..3] rn[2..0].
-  kThumb2AddRRI12,   // add rd, rn, #imm12 [11110] i [100000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2MovRR,      // mov rd, rm [11101010010011110000] rd[11..8] [0000] rm[3..0].
-  kThumb2Vmovs,      // vmov.f32 vd, vm [111011101] D [110000] vd[15..12] 101001] M [0] vm[3..0].
-  kThumb2Vmovd,      // vmov.f64 vd, vm [111011101] D [110000] vd[15..12] 101101] M [0] vm[3..0].
-  kThumb2Ldmia,      // ldmia  [111010001001] rn[19..16] mask[15..0].
-  kThumb2Stmia,      // stmia  [111010001000] rn[19..16] mask[15..0].
-  kThumb2AddRRR,     // add [111010110000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
-  kThumb2SubRRR,     // sub [111010111010] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
-  kThumb2SbcRRR,     // sbc [111010110110] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
-  kThumb2CmpRR,      // cmp [111010111011] rn[19..16] [0000] [1111] [0000] rm[3..0].
-  kThumb2SubRRI12,   // sub rd, rn, #imm12 [11110] i [101010] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2MvnI8M,     // mov(T2) rd, #<const> [11110] i [00011011110] imm3 rd[11..8] imm8.
-  kThumb2Sel,        // sel rd, rn, rm [111110101010] rn[19-16] rd[11-8] rm[3-0].
-  kThumb2Ubfx,       // ubfx rd,rn,#lsb,#width [111100111100] rn[19..16] [0] imm3[14-12] rd[11-8] w[4-0].
-  kThumb2Sbfx,       // ubfx rd,rn,#lsb,#width [111100110100] rn[19..16] [0] imm3[14-12] rd[11-8] w[4-0].
-  kThumb2LdrRRR,     // ldr rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
-  kThumb2LdrhRRR,    // ldrh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
-  kThumb2LdrshRRR,   // ldrsh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
-  kThumb2LdrbRRR,    // ldrb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
-  kThumb2LdrsbRRR,   // ldrsb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
-  kThumb2StrRRR,     // str rt,[rn,rm,LSL #imm] [111110000100] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
-  kThumb2StrhRRR,    // str rt,[rn,rm,LSL #imm] [111110000010] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
-  kThumb2StrbRRR,    // str rt,[rn,rm,LSL #imm] [111110000000] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
-  kThumb2LdrhRRI12,  // ldrh rt,[rn,#imm12] [111110001011] rt[15..12] rn[19..16] imm12[11..0].
-  kThumb2LdrshRRI12,  // ldrsh rt,[rn,#imm12] [111110011011] rt[15..12] rn[19..16] imm12[11..0].
-  kThumb2LdrbRRI12,  // ldrb rt,[rn,#imm12] [111110001001] rt[15..12] rn[19..16] imm12[11..0].
-  kThumb2LdrsbRRI12,  // ldrsb rt,[rn,#imm12] [111110011001] rt[15..12] rn[19..16] imm12[11..0].
-  kThumb2StrhRRI12,  // strh rt,[rn,#imm12] [111110001010] rt[15..12] rn[19..16] imm12[11..0].
-  kThumb2StrbRRI12,  // strb rt,[rn,#imm12] [111110001000] rt[15..12] rn[19..16] imm12[11..0].
-  kThumb2Pop,        // pop   [1110100010111101] list[15-0]*/
-  kThumb2Push,       // push  [1110100100101101] list[15-0]*/
-  kThumb2CmpRI8M,    // cmp rn, #<const> [11110] i [011011] rn[19-16] [0] imm3 [1111] imm8[7..0].
-  kThumb2CmnRI8M,    // cmn rn, #<const> [11110] i [010001] rn[19-16] [0] imm3 [1111] imm8[7..0].
-  kThumb2AdcRRR,     // adc [111010110101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
-  kThumb2AndRRR,     // and [111010100000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
-  kThumb2BicRRR,     // bic [111010100010] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
-  kThumb2CmnRR,      // cmn [111010110001] rn[19..16] [0000] [1111] [0000] rm[3..0].
-  kThumb2EorRRR,     // eor [111010101000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
-  kThumb2MulRRR,     // mul [111110110000] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
-  kThumb2SdivRRR,    // sdiv [111110111001] rn[19..16] [1111] rd[11..8] [1111] rm[3..0].
-  kThumb2UdivRRR,    // udiv [111110111011] rn[19..16] [1111] rd[11..8] [1111] rm[3..0].
-  kThumb2MnvRR,      // mvn [11101010011011110] rd[11-8] [0000] rm[3..0].
-  kThumb2RsubRRI8M,  // rsb rd, rn, #<const> [11110] i [011101] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2NegRR,      // actually rsub rd, rn, #0.
-  kThumb2OrrRRR,     // orr [111010100100] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
-  kThumb2TstRR,      // tst [111010100001] rn[19..16] [0000] [1111] [0000] rm[3..0].
-  kThumb2LslRRR,     // lsl [111110100000] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
-  kThumb2LsrRRR,     // lsr [111110100010] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
-  kThumb2AsrRRR,     // asr [111110100100] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
-  kThumb2RorRRR,     // ror [111110100110] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
-  kThumb2LslRRI5,    // lsl [11101010010011110] imm3[14..12] rd[11..8] imm2[7..6] [00] rm[3..0].
-  kThumb2LsrRRI5,    // lsr [11101010010011110] imm3[14..12] rd[11..8] imm2[7..6] [01] rm[3..0].
-  kThumb2AsrRRI5,    // asr [11101010010011110] imm3[14..12] rd[11..8] imm2[7..6] [10] rm[3..0].
-  kThumb2RorRRI5,    // ror [11101010010011110] imm3[14..12] rd[11..8] imm2[7..6] [11] rm[3..0].
-  kThumb2BicRRI8M,   // bic rd, rn, #<const> [11110] i [000010] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2AndRRI8M,   // and rd, rn, #<const> [11110] i [000000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2OrrRRI8M,   // orr rd, rn, #<const> [11110] i [000100] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2OrnRRI8M,   // orn rd, rn, #<const> [11110] i [000110] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2EorRRI8M,   // eor rd, rn, #<const> [11110] i [001000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2AddRRI8M,   // add rd, rn, #<const> [11110] i [010001] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2AdcRRI8M,   // adc rd, rn, #<const> [11110] i [010101] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2SubRRI8M,   // sub rd, rn, #<const> [11110] i [011011] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2SbcRRI8M,   // sub rd, rn, #<const> [11110] i [010111] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
-  kThumb2RevRR,      // rev [111110101001] rm[19..16] [1111] rd[11..8] 1000 rm[3..0]
-  kThumb2RevshRR,    // rev [111110101001] rm[19..16] [1111] rd[11..8] 1011 rm[3..0]
-  kThumb2It,         // it [10111111] firstcond[7-4] mask[3-0].
-  kThumb2Fmstat,     // fmstat [11101110111100011111101000010000].
-  kThumb2Vcmpd,      // vcmp [111011101] D [11011] rd[15-12] [1011] E [1] M [0] rm[3-0].
-  kThumb2Vcmps,      // vcmp [111011101] D [11010] rd[15-12] [1011] E [1] M [0] rm[3-0].
-  kThumb2LdrPcRel12,  // ldr rd,[pc,#imm12] [1111100011011111] rt[15-12] imm12[11-0].
-  kThumb2BCond,      // b<c> [1110] S cond[25-22] imm6[21-16] [10] J1 [0] J2 imm11[10..0].
-  kThumb2Fmrs,       // vmov [111011100000] vn[19-16] rt[15-12] [1010] N [0010000].
-  kThumb2Fmsr,       // vmov [111011100001] vn[19-16] rt[15-12] [1010] N [0010000].
-  kThumb2Fmrrd,      // vmov [111011000100] rt2[19-16] rt[15-12] [101100] M [1] vm[3-0].
-  kThumb2Fmdrr,      // vmov [111011000101] rt2[19-16] rt[15-12] [101100] M [1] vm[3-0].
-  kThumb2Vabsd,      // vabs.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0].
-  kThumb2Vabss,      // vabs.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
-  kThumb2Vnegd,      // vneg.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0].
-  kThumb2Vnegs,      // vneg.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
-  kThumb2Vmovs_IMM8,  // vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12] [10100000] imm4l[3-0].
-  kThumb2Vmovd_IMM8,  // vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12] [10110000] imm4l[3-0].
-  kThumb2Mla,        // mla [111110110000] rn[19-16] ra[15-12] rd[11-8] [0000] rm[3-0].
-  kThumb2Mls,        // mls [111110110000] rn[19-16] ra[15-12] rd[11-8] [0001] rm[3-0].
-  kThumb2Umull,      // umull [111110111010] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
-  kThumb2Ldrex,      // ldrex [111010000101] rn[19-16] rt[15-12] [1111] imm8[7-0].
-  kThumb2Ldrexd,     // ldrexd [111010001101] rn[19-16] rt[15-12] rt2[11-8] [11111111].
-  kThumb2Strex,      // strex [111010000100] rn[19-16] rt[15-12] rd[11-8] imm8[7-0].
-  kThumb2Strexd,     // strexd [111010001100] rn[19-16] rt[15-12] rt2[11-8] [0111] Rd[3-0].
-  kThumb2Clrex,      // clrex [11110011101111111000111100101111].
-  kThumb2Bfi,        // bfi [111100110110] rn[19-16] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0].
-  kThumb2Bfc,        // bfc [11110011011011110] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0].
-  kThumb2Dmb,        // dmb [1111001110111111100011110101] option[3-0].
-  kThumb2LdrPcReln12,  // ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12] imm12[11-0].
-  kThumb2Stm,        // stm <list> [111010010000] rn[19-16] 000 rl[12-0].
-  kThumbUndefined,   // undefined [11011110xxxxxxxx].
-  kThumb2VPopCS,     // vpop <list of callee save fp singles (s16+).
-  kThumb2VPushCS,    // vpush <list callee save fp singles (s16+).
-  kThumb2Vldms,      // vldms rd, <list>.
-  kThumb2Vstms,      // vstms rd, <list>.
-  kThumb2BUncond,    // b <label>.
-  kThumb2Bl,         // bl with linker fixup. [11110] S imm10 [11] J1 [1] J2 imm11.
-  kThumb2MovImm16H,  // similar to kThumb2MovImm16, but target high hw.
-  kThumb2AddPCR,     // Thumb2 2-operand add with hard-coded PC target.
-  kThumb2Adr,        // Special purpose encoding of ADR for switch tables.
-  kThumb2MovImm16LST,  // Special purpose version for switch table use.
-  kThumb2MovImm16HST,  // Special purpose version for switch table use.
-  kThumb2LdmiaWB,    // ldmia  [111010011001[ rn[19..16] mask[15..0].
-  kThumb2OrrRRRs,    // orrs [111010100101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
-  kThumb2Push1,      // t3 encoding of push.
-  kThumb2Pop1,       // t3 encoding of pop.
-  kThumb2RsubRRR,    // rsb [111010111101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
-  kThumb2Smull,      // smull [111110111000] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
-  kThumb2LdrdPcRel8,  // ldrd rt, rt2, pc +-/1024.
-  kThumb2LdrdI8,     // ldrd rt, rt2, [rn +-/1024].
-  kThumb2StrdI8,     // strd rt, rt2, [rn +-/1024].
-  kArmLast,
-};
-std::ostream& operator<<(std::ostream& os, const ArmOpcode& rhs);
-
-enum ArmOpDmbOptions {
-  kSY = 0xf,
-  kST = 0xe,
-  kISH = 0xb,
-  kISHST = 0xa,
-  kNSH = 0x7,
-  kNSHST = 0x6
-};
-
-// Instruction assembly field_loc kind.
-enum ArmEncodingKind {
-  kFmtUnused,      // Unused field and marks end of formats.
-  kFmtBitBlt,      // Bit string using end/start.
-  kFmtLdmRegList,  // Load multiple register list using [15,14,12..0].
-  kFmtStmRegList,  // Store multiple register list using [14,12..0].
-  kFmtDfp,         // Double FP reg.
-  kFmtSfp,         // Single FP reg.
-  kFmtModImm,      // Shifted 8-bit immed using [26,14..12,7..0].
-  kFmtImm16,       // Zero-extended immed using [26,19..16,14..12,7..0].
-  kFmtImm6,        // Encoded branch target using [9,7..3]0.
-  kFmtImm12,       // Zero-extended immediate using [26,14..12,7..0].
-  kFmtShift,       // Shift descriptor, [14..12,7..4].
-  kFmtLsb,         // least significant bit using [14..12][7..6].
-  kFmtBWidth,      // bit-field width, encoded as width-1.
-  kFmtShift5,      // Shift count, [14..12,7..6].
-  kFmtBrOffset,    // Signed extended [26,11,13,21-16,10-0]:0.
-  kFmtFPImm,       // Encoded floating point immediate.
-  kFmtOff24,       // 24-bit Thumb2 unconditional branch encoding.
-  kFmtSkip,        // Unused field, but continue to next.
-};
-std::ostream& operator<<(std::ostream& os, const ArmEncodingKind& rhs);
-
-// Struct used to define the snippet positions for each Thumb opcode.
-struct ArmEncodingMap {
-  uint32_t skeleton;
-  struct {
-    ArmEncodingKind kind;
-    int end;   // end for kFmtBitBlt, 1-bit slice end for FP regs.
-    int start;  // start for kFmtBitBlt, 4-bit slice end for FP regs.
-  } field_loc[4];
-  ArmOpcode opcode;
-  uint64_t flags;
-  const char* name;
-  const char* fmt;
-  int size;   // Note: size is in bytes.
-  FixupKind fixup;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_ARM_ARM_LIR_H_
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
deleted file mode 100644
index 5f911db..0000000
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ /dev/null
@@ -1,1687 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_arm.h"
-
-#include "arm_lir.h"
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir-inl.h"
-
-namespace art {
-
-/*
- * opcode: ArmOpcode enum
- * skeleton: pre-designated bit-pattern for this opcode
- * k0: key to applying ds/de
- * ds: dest start bit position
- * de: dest end bit position
- * k1: key to applying s1s/s1e
- * s1s: src1 start bit position
- * s1e: src1 end bit position
- * k2: key to applying s2s/s2e
- * s2s: src2 start bit position
- * s2e: src2 end bit position
- * operands: number of operands (for sanity check purposes)
- * name: mnemonic name
- * fmt: for pretty-printing
- */
-#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
-                     k3, k3s, k3e, flags, name, fmt, size, fixup) \
-        {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
-                    {k3, k3s, k3e}}, opcode, flags, name, fmt, size, fixup}
-
-/* Instruction dump string format keys: !pf, where "!" is the start
- * of the key, "p" is which numeric operand to use and "f" is the
- * print format.
- *
- * [p]ositions:
- *     0 -> operands[0] (dest)
- *     1 -> operands[1] (src1)
- *     2 -> operands[2] (src2)
- *     3 -> operands[3] (extra)
- *
- * [f]ormats:
- *     h -> 4-digit hex
- *     d -> decimal
- *     E -> decimal*4
- *     F -> decimal*2
- *     c -> branch condition (beq, bne, etc.)
- *     t -> pc-relative target
- *     u -> 1st half of bl[x] target
- *     v -> 2nd half ob bl[x] target
- *     R -> register list
- *     s -> single precision floating point register
- *     S -> double precision floating point register
- *     m -> Thumb2 modified immediate
- *     n -> complimented Thumb2 modified immediate
- *     M -> Thumb2 16-bit zero-extended immediate
- *     b -> 4-digit binary
- *     B -> dmb option string (sy, st, ish, ishst, nsh, hshst)
- *     H -> operand shift
- *     C -> core register name
- *     P -> fp cs register list (base of s16)
- *     Q -> fp cs register list (base of s0)
- *
- *  [!] escape.  To insert "!", use "!!"
- */
-/* NOTE: must be kept in sync with enum ArmOpcode from LIR.h */
-const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
-    ENCODING_MAP(kArm16BitData,    0x0000,
-                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP, "data", "0x!0h(!0d)", 2, kFixupNone),
-    ENCODING_MAP(kThumbAdcRR,        0x4140,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES | USES_CCODES,
-                 "adcs", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbAddRRI3,      0x1c00,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "adds", "!0C, !1C, #!2d", 2, kFixupNone),
-    ENCODING_MAP(kThumbAddRI8,       0x3000,
-                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES,
-                 "adds", "!0C, !0C, #!1d", 2, kFixupNone),
-    ENCODING_MAP(kThumbAddRRR,       0x1800,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE12 | SETS_CCODES,
-                 "adds", "!0C, !1C, !2C", 2, kFixupNone),
-    ENCODING_MAP(kThumbAddRRLH,     0x4440,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
-                 "add", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbAddRRHL,     0x4480,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
-                 "add", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbAddRRHH,     0x44c0,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
-                 "add", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbAddPcRel,    0xa000,
-                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | NEEDS_FIXUP,
-                 "add", "!0C, pc, #!1E", 2, kFixupLoad),
-    ENCODING_MAP(kThumbAddSpRel,    0xa800,
-                 kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP,
-                 "add", "!0C, sp, #!2E", 2, kFixupNone),
-    ENCODING_MAP(kThumbAddSpI7,      0xb000,
-                 kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF_SP | REG_USE_SP,
-                 "add", "sp, #!0d*4", 2, kFixupNone),
-    ENCODING_MAP(kThumbAndRR,        0x4000,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
-                 "ands", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbAsrRRI5,      0x1000,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "asrs", "!0C, !1C, #!2d", 2, kFixupNone),
-    ENCODING_MAP(kThumbAsrRR,        0x4100,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
-                 "asrs", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbBCond,        0xd000,
-                 kFmtBitBlt, 7, 0, kFmtBitBlt, 11, 8, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | USES_CCODES |
-                 NEEDS_FIXUP, "b!1c", "!0t", 2, kFixupCondBranch),
-    ENCODING_MAP(kThumbBUncond,      0xe000,
-                 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
-                 "b", "!0t", 2, kFixupT1Branch),
-    ENCODING_MAP(kThumbBicRR,        0x4380,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
-                 "bics", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbBkpt,          0xbe00,
-                 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
-                 "bkpt", "!0d", 2, kFixupNone),
-    ENCODING_MAP(kThumbBlx1,         0xf000,
-                 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF_LR |
-                 NEEDS_FIXUP, "blx_1", "!0u", 2, kFixupBlx1),
-    ENCODING_MAP(kThumbBlx2,         0xe800,
-                 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF_LR |
-                 NEEDS_FIXUP, "blx_2", "!0v", 2, kFixupLabel),
-    ENCODING_MAP(kThumbBl1,          0xf000,
-                 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR | NEEDS_FIXUP,
-                 "bl_1", "!0u", 2, kFixupBl1),
-    ENCODING_MAP(kThumbBl2,          0xf800,
-                 kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR | NEEDS_FIXUP,
-                 "bl_2", "!0v", 2, kFixupLabel),
-    ENCODING_MAP(kThumbBlxR,         0x4780,
-                 kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_USE0 | IS_BRANCH | REG_DEF_LR,
-                 "blx", "!0C", 2, kFixupNone),
-    ENCODING_MAP(kThumbBx,            0x4700,
-                 kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE0 | IS_BRANCH,
-                 "bx", "!0C", 2, kFixupNone),
-    ENCODING_MAP(kThumbCmnRR,        0x42c0,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
-                 "cmn", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbCmpRI8,       0x2800,
-                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | SETS_CCODES,
-                 "cmp", "!0C, #!1d", 2, kFixupNone),
-    ENCODING_MAP(kThumbCmpRR,        0x4280,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
-                 "cmp", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbCmpLH,        0x4540,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
-                 "cmp", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbCmpHL,        0x4580,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
-                 "cmp", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbCmpHH,        0x45c0,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
-                 "cmp", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbEorRR,        0x4040,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
-                 "eors", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbLdmia,         0xc800,
-                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
-                 "ldmia", "!0C!!, <!1R>", 2, kFixupNone),
-    ENCODING_MAP(kThumbLdrRRI5,      0x6800,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4,
-                 "ldr", "!0C, [!1C, #!2E]", 2, kFixupNone),
-    ENCODING_MAP(kThumbLdrRRR,       0x5800,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
-                 "ldr", "!0C, [!1C, !2C]", 2, kFixupNone),
-    ENCODING_MAP(kThumbLdrPcRel,    0x4800,
-                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC
-                 | IS_LOAD_OFF4 | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2, kFixupLoad),
-    ENCODING_MAP(kThumbLdrSpRel,    0x9800,
-                 kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP
-                 | IS_LOAD_OFF4, "ldr", "!0C, [sp, #!2E]", 2, kFixupNone),
-    ENCODING_MAP(kThumbLdrbRRI5,     0x7800,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldrb", "!0C, [!1C, #2d]", 2, kFixupNone),
-    ENCODING_MAP(kThumbLdrbRRR,      0x5c00,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
-                 "ldrb", "!0C, [!1C, !2C]", 2, kFixupNone),
-    ENCODING_MAP(kThumbLdrhRRI5,     0x8800,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF2,
-                 "ldrh", "!0C, [!1C, #!2F]", 2, kFixupNone),
-    ENCODING_MAP(kThumbLdrhRRR,      0x5a00,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
-                 "ldrh", "!0C, [!1C, !2C]", 2, kFixupNone),
-    ENCODING_MAP(kThumbLdrsbRRR,     0x5600,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
-                 "ldrsb", "!0C, [!1C, !2C]", 2, kFixupNone),
-    ENCODING_MAP(kThumbLdrshRRR,     0x5e00,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
-                 "ldrsh", "!0C, [!1C, !2C]", 2, kFixupNone),
-    ENCODING_MAP(kThumbLslRRI5,      0x0000,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "lsls", "!0C, !1C, #!2d", 2, kFixupNone),
-    ENCODING_MAP(kThumbLslRR,        0x4080,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
-                 "lsls", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbLsrRRI5,      0x0800,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "lsrs", "!0C, !1C, #!2d", 2, kFixupNone),
-    ENCODING_MAP(kThumbLsrRR,        0x40c0,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
-                 "lsrs", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbMovImm,       0x2000,
-                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0 | SETS_CCODES,
-                 "movs", "!0C, #!1d", 2, kFixupNone),
-    ENCODING_MAP(kThumbMovRR,        0x1c00,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES |IS_MOVE,
-                 "movs", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbMovRR_H2H,    0x46c0,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
-                 "mov", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbMovRR_H2L,    0x4640,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
-                 "mov", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbMovRR_L2H,    0x4680,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
-                 "mov", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbMul,           0x4340,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
-                 "muls", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbMvn,           0x43c0,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "mvns", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbNeg,           0x4240,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "negs", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbOrr,           0x4300,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
-                 "orrs", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbPop,           0xbc00,
-                 kFmtBitBlt, 8, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0
-                 | IS_LOAD, "pop", "<!0R>", 2, kFixupNone),
-    ENCODING_MAP(kThumbPush,          0xb400,
-                 kFmtBitBlt, 8, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
-                 | IS_STORE, "push", "<!0R>", 2, kFixupNone),
-    ENCODING_MAP(kThumbRev,           0xba00,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE1,
-                 "rev", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbRevsh,         0xbac0,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE1,
-                 "rev", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbRorRR,        0x41c0,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
-                 "rors", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbSbc,           0x4180,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE01 | USES_CCODES | SETS_CCODES,
-                 "sbcs", "!0C, !1C", 2, kFixupNone),
-    ENCODING_MAP(kThumbStmia,         0xc000,
-                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0 | REG_USE0 | REG_USE_LIST1 | IS_STORE,
-                 "stmia", "!0C!!, <!1R>", 2, kFixupNone),
-    ENCODING_MAP(kThumbStrRRI5,      0x6000,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
-                 "str", "!0C, [!1C, #!2E]", 2, kFixupNone),
-    ENCODING_MAP(kThumbStrRRR,       0x5000,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
-                 "str", "!0C, [!1C, !2C]", 2, kFixupNone),
-    ENCODING_MAP(kThumbStrSpRel,    0x9000,
-                 kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | REG_USE_SP
-                 | IS_STORE_OFF4, "str", "!0C, [sp, #!2E]", 2, kFixupNone),
-    ENCODING_MAP(kThumbStrbRRI5,     0x7000,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
-                 "strb", "!0C, [!1C, #!2d]", 2, kFixupNone),
-    ENCODING_MAP(kThumbStrbRRR,      0x5400,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
-                 "strb", "!0C, [!1C, !2C]", 2, kFixupNone),
-    ENCODING_MAP(kThumbStrhRRI5,     0x8000,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF2,
-                 "strh", "!0C, [!1C, #!2F]", 2, kFixupNone),
-    ENCODING_MAP(kThumbStrhRRR,      0x5200,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
-                 "strh", "!0C, [!1C, !2C]", 2, kFixupNone),
-    ENCODING_MAP(kThumbSubRRI3,      0x1e00,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "subs", "!0C, !1C, #!2d", 2, kFixupNone),
-    ENCODING_MAP(kThumbSubRI8,       0x3800,
-                 kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES,
-                 "subs", "!0C, #!1d", 2, kFixupNone),
-    ENCODING_MAP(kThumbSubRRR,       0x1a00,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE12 | SETS_CCODES,
-                 "subs", "!0C, !1C, !2C", 2, kFixupNone),
-    ENCODING_MAP(kThumbSubSpI7,      0xb080,
-                 kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP,
-                 "sub", "sp, #!0d*4", 2, kFixupNone),
-    ENCODING_MAP(kThumbSwi,           0xdf00,
-                 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
-                 "swi", "!0d", 2, kFixupNone),
-    ENCODING_MAP(kThumbTst,           0x4200,
-                 kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE01 | SETS_CCODES,
-                 "tst", "!0C, !1C", 2, kFixupNone),
-    /*
-     * Note: The encoding map entries for vldrd and vldrs include REG_DEF_LR, even though
-     * these instructions don't define lr.  The reason is that these instructions
-     * are used for loading values from the literal pool, and the displacement may be found
-     * to be insuffient at assembly time.  In that case, we need to materialize a new base
-     * register - and will use lr as the temp register.  This works because lr is used as
-     * a temp register in very limited situations, and never in conjunction with a floating
-     * point constant load.  However, it is possible that during instruction scheduling,
-     * another use of lr could be moved across a vldrd/vldrs.  By setting REG_DEF_LR, we
-     * prevent that from happening.  Note that we set REG_DEF_LR on all vldrd/vldrs - even those
-     * not used in a pc-relative case.  It is really only needed on the pc-relative loads, but
-     * the case we're handling is rare enough that it seemed not worth the trouble to distinguish.
-     */
-    ENCODING_MAP(kThumb2Vldrs,       0xed900a00,
-                 kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4 |
-                 REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0s, [!1C, #!2E]", 4, kFixupVLoad),
-    ENCODING_MAP(kThumb2Vldrd,       0xed900b00,
-                 kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4 |
-                 REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0S, [!1C, #!2E]", 4, kFixupVLoad),
-    ENCODING_MAP(kThumb2Vmuls,        0xee200a00,
-                 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "vmuls", "!0s, !1s, !2s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vmuld,        0xee200b00,
-                 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "vmuld", "!0S, !1S, !2S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vstrs,       0xed800a00,
-                 kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
-                 "vstr", "!0s, [!1C, #!2E]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vstrd,       0xed800b00,
-                 kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
-                 "vstr", "!0S, [!1C, #!2E]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vsubs,        0xee300a40,
-                 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "vsub", "!0s, !1s, !2s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vsubd,        0xee300b40,
-                 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "vsub", "!0S, !1S, !2S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vadds,        0xee300a00,
-                 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "vadd", "!0s, !1s, !2s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vaddd,        0xee300b00,
-                 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "vadd", "!0S, !1S, !2S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vdivs,        0xee800a00,
-                 kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "vdivs", "!0s, !1s, !2s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vdivd,        0xee800b00,
-                 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "vdivd", "!0S, !1S, !2S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2VmlaF64,     0xee000b00,
-                 kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE012,
-                 "vmla", "!0S, !1S, !2S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2VcvtIF,       0xeeb80ac0,
-                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vcvt.f32.s32", "!0s, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2VcvtFI,       0xeebd0ac0,
-                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vcvt.s32.f32 ", "!0s, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2VcvtDI,       0xeebd0bc0,
-                 kFmtSfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vcvt.s32.f64 ", "!0s, !1S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2VcvtFd,       0xeeb70ac0,
-                 kFmtDfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vcvt.f64.f32 ", "!0S, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2VcvtDF,       0xeeb70bc0,
-                 kFmtSfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vcvt.f32.f64 ", "!0s, !1S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2VcvtF64S32,   0xeeb80bc0,
-                 kFmtDfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vcvt.f64.s32 ", "!0S, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2VcvtF64U32,   0xeeb80b40,
-                 kFmtDfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vcvt.f64.u32 ", "!0S, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vsqrts,       0xeeb10ac0,
-                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vsqrt.f32 ", "!0s, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vsqrtd,       0xeeb10bc0,
-                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vsqrt.f64 ", "!0S, !1S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2MovI8M, 0xf04f0000, /* no setflags encoding */
-                 kFmtBitBlt, 11, 8, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
-                 "mov", "!0C, #!1m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2MovImm16,       0xf2400000,
-                 kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
-                 "mov", "!0C, #!1M", 4, kFixupNone),
-    ENCODING_MAP(kThumb2StrRRI12,       0xf8c00000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
-                 "str", "!0C, [!1C, #!2d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrRRI12,       0xf8d00000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldr", "!0C, [!1C, #!2d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2StrRRI8Predec,       0xf8400c00,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
-                 "str", "!0C, [!1C, #-!2d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrRRI8Predec,       0xf8500c00,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldr", "!0C, [!1C, #-!2d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Cbnz,       0xb900, /* Note: does not affect flags */
-                 kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | IS_BRANCH |
-                 NEEDS_FIXUP, "cbnz", "!0C,!1t", 2, kFixupCBxZ),
-    ENCODING_MAP(kThumb2Cbz,       0xb100, /* Note: does not affect flags */
-                 kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | IS_BRANCH |
-                 NEEDS_FIXUP, "cbz", "!0C,!1t", 2, kFixupCBxZ),
-    ENCODING_MAP(kThumb2AddRRI12,       0xf2000000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1,/* Note: doesn't affect flags */
-                 "add", "!0C,!1C,#!2d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2MovRR,       0xea4f0000, /* no setflags encoding */
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
-                 "mov", "!0C, !1C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vmovs,       0xeeb00a40,
-                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
-                 "vmov.f32 ", " !0s, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vmovd,       0xeeb00b40,
-                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
-                 "vmov.f64 ", " !0S, !1S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Ldmia,         0xe8900000,
-                 kFmtBitBlt, 19, 16, kFmtLdmRegList, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
-                 "ldmia", "!0C!!, <!1R>", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Stmia,         0xe8800000,
-                 kFmtBitBlt, 19, 16, kFmtStmRegList, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE0 | REG_USE_LIST1 | IS_STORE,
-                 "stmia", "!0C!!, <!1R>", 4, kFixupNone),
-    ENCODING_MAP(kThumb2AddRRR,  0xeb100000, /* setflags encoding */
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtShift, -1, -1,
-                 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
-                 "adds", "!0C, !1C, !2C!3H", 4, kFixupNone),
-    ENCODING_MAP(kThumb2SubRRR,       0xebb00000, /* setflags enconding */
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtShift, -1, -1,
-                 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
-                 "subs", "!0C, !1C, !2C!3H", 4, kFixupNone),
-    ENCODING_MAP(kThumb2SbcRRR,       0xeb700000, /* setflags encoding */
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtShift, -1, -1,
-                 IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES | SETS_CCODES,
-                 "sbcs", "!0C, !1C, !2C!3H", 4, kFixupNone),
-    ENCODING_MAP(kThumb2CmpRR,       0xebb00f00,
-                 kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
-                 "cmp", "!0C, !1C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2SubRRI12,       0xf2a00000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1,/* Note: doesn't affect flags */
-                 "sub", "!0C,!1C,#!2d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2MvnI8M,  0xf06f0000, /* no setflags encoding */
-                 kFmtBitBlt, 11, 8, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
-                 "mvn", "!0C, #!1n", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Sel,       0xfaa0f080,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES,
-                 "sel", "!0C, !1C, !2C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Ubfx,       0xf3c00000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtLsb, -1, -1,
-                 kFmtBWidth, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
-                 "ubfx", "!0C, !1C, #!2d, #!3d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Sbfx,       0xf3400000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtLsb, -1, -1,
-                 kFmtBWidth, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
-                 "sbfx", "!0C, !1C, #!2d, #!3d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrRRR,    0xf8500000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
-                 "ldr", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrhRRR,    0xf8300000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
-                 "ldrh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrshRRR,    0xf9300000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
-                 "ldrsh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrbRRR,    0xf8100000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
-                 "ldrb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrsbRRR,    0xf9100000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
-                 "ldrsb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2StrRRR,    0xf8400000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
-                 "str", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2StrhRRR,    0xf8200000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
-                 "strh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2StrbRRR,    0xf8000000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
-                 "strb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrhRRI12,       0xf8b00000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldrh", "!0C, [!1C, #!2d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrshRRI12,       0xf9b00000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldrsh", "!0C, [!1C, #!2d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrbRRI12,       0xf8900000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldrb", "!0C, [!1C, #!2d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrsbRRI12,       0xf9900000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldrsb", "!0C, [!1C, #!2d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2StrhRRI12,       0xf8a00000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
-                 "strh", "!0C, [!1C, #!2d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2StrbRRI12,       0xf8800000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
-                 "strb", "!0C, [!1C, #!2d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Pop,           0xe8bd0000,
-                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0
-                 | IS_LOAD, "pop", "<!0R>", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Push,          0xe92d0000,
-                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
-                 | IS_STORE, "push", "<!0R>", 4, kFixupNone),
-    ENCODING_MAP(kThumb2CmpRI8M, 0xf1b00f00,
-                 kFmtBitBlt, 19, 16, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_USE0 | SETS_CCODES,
-                 "cmp", "!0C, #!1m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2CmnRI8M, 0xf1100f00,
-                 kFmtBitBlt, 19, 16, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_USE0 | SETS_CCODES,
-                 "cmn", "!0C, #!1m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2AdcRRR,  0xeb500000, /* setflags encoding */
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtShift, -1, -1,
-                 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES | USES_CCODES,
-                 "adcs", "!0C, !1C, !2C!3H", 4, kFixupNone),
-    ENCODING_MAP(kThumb2AndRRR,  0xea000000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "and", "!0C, !1C, !2C!3H", 4, kFixupNone),
-    ENCODING_MAP(kThumb2BicRRR,  0xea200000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "bic", "!0C, !1C, !2C!3H", 4, kFixupNone),
-    ENCODING_MAP(kThumb2CmnRR,  0xeb000000,
-                 kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "cmn", "!0C, !1C, shift !2d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2EorRRR,  0xea800000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "eor", "!0C, !1C, !2C!3H", 4, kFixupNone),
-    ENCODING_MAP(kThumb2MulRRR,  0xfb00f000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mul", "!0C, !1C, !2C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2SdivRRR,  0xfb90f0f0,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "sdiv", "!0C, !1C, !2C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2UdivRRR,  0xfbb0f0f0,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "udiv", "!0C, !1C, !2C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2MnvRR,  0xea6f0000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "mvn", "!0C, !1C, shift !2d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2RsubRRI8M,       0xf1d00000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "rsbs", "!0C,!1C,#!2m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2NegRR,       0xf1d00000, /* instance of rsub */
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "neg", "!0C,!1C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2OrrRRR,  0xea400000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "orr", "!0C, !1C, !2C!3H", 4, kFixupNone),
-    ENCODING_MAP(kThumb2TstRR,       0xea100f00,
-                 kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
-                 "tst", "!0C, !1C, shift !2d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LslRRR,  0xfa00f000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "lsl", "!0C, !1C, !2C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LsrRRR,  0xfa20f000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "lsr", "!0C, !1C, !2C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2AsrRRR,  0xfa40f000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "asr", "!0C, !1C, !2C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2RorRRR,  0xfa60f000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "ror", "!0C, !1C, !2C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LslRRI5,  0xea4f0000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "lsl", "!0C, !1C, #!2d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LsrRRI5,  0xea4f0010,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "lsr", "!0C, !1C, #!2d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2AsrRRI5,  0xea4f0020,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "asr", "!0C, !1C, #!2d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2RorRRI5,  0xea4f0030,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "ror", "!0C, !1C, #!2d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2BicRRI8M,  0xf0200000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "bic", "!0C, !1C, #!2m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2AndRRI8M,  0xf0000000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "and", "!0C, !1C, #!2m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2OrrRRI8M,  0xf0400000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "orr", "!0C, !1C, #!2m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2OrnRRI8M,  0xf0600000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "orn", "!0C, !1C, #!2m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2EorRRI8M,  0xf0800000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "eor", "!0C, !1C, #!2m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2AddRRI8M,  0xf1100000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "adds", "!0C, !1C, #!2m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2AdcRRI8M,  0xf1500000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES | USES_CCODES,
-                 "adcs", "!0C, !1C, #!2m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2SubRRI8M,  0xf1b00000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "subs", "!0C, !1C, #!2m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2SbcRRI8M,  0xf1700000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES | USES_CCODES,
-                 "sbcs", "!0C, !1C, #!2m", 4, kFixupNone),
-    ENCODING_MAP(kThumb2RevRR, 0xfa90f080,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE12,  // Binary, but rm is stored twice.
-                 "rev", "!0C, !1C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2RevshRR, 0xfa90f0b0,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0_USE12,  // Binary, but rm is stored twice.
-                 "revsh", "!0C, !1C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2It,  0xbf00,
-                 kFmtBitBlt, 7, 4, kFmtBitBlt, 3, 0, kFmtModImm, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_IT | USES_CCODES,
-                 "it:!1b", "!0c", 2, kFixupNone),
-    ENCODING_MAP(kThumb2Fmstat,  0xeef1fa10,
-                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, NO_OPERAND | SETS_CCODES | USES_CCODES,
-                 "fmstat", "", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vcmpd,        0xeeb40b40,
-                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
-                 "vcmp.f64", "!0S, !1S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vcmps,        0xeeb40a40,
-                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
-                 "vcmp.f32", "!0s, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrPcRel12,       0xf8df0000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF | NEEDS_FIXUP,
-                 "ldr", "!0C, [r15pc, #!1d]", 4, kFixupLoad),
-    ENCODING_MAP(kThumb2BCond,        0xf0008000,
-                 kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | IS_BRANCH | USES_CCODES | NEEDS_FIXUP,
-                 "b!1c", "!0t", 4, kFixupCondBranch),
-    ENCODING_MAP(kThumb2Fmrs,       0xee100a10,
-                 kFmtBitBlt, 15, 12, kFmtSfp, 7, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fmrs", "!0C, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Fmsr,       0xee000a10,
-                 kFmtSfp, 7, 16, kFmtBitBlt, 15, 12, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fmsr", "!0s, !1C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Fmrrd,       0xec500b10,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtDfp, 5, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2,
-                 "fmrrd", "!0C, !1C, !2S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Fmdrr,       0xec400b10,
-                 kFmtDfp, 5, 0, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "fmdrr", "!0S, !1C, !2C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vabsd,       0xeeb00bc0,
-                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vabs.f64", "!0S, !1S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vabss,       0xeeb00ac0,
-                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vabs.f32", "!0s, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vnegd,       0xeeb10b40,
-                 kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vneg.f64", "!0S, !1S", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vnegs,       0xeeb10a40,
-                 kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "vneg.f32", "!0s, !1s", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vmovs_IMM8,       0xeeb00a00,
-                 kFmtSfp, 22, 12, kFmtFPImm, 16, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
-                 "vmov.f32", "!0s, #0x!1h", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vmovd_IMM8,       0xeeb00b00,
-                 kFmtDfp, 22, 12, kFmtFPImm, 16, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
-                 "vmov.f64", "!0S, #0x!1h", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Mla,  0xfb000000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE123,
-                 "mla", "!0C, !1C, !2C, !3C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Mls,  0xfb000010,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE123,
-                 "mls", "!0C, !1C, !2C, !3C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Umull,  0xfba00000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
-                 kFmtBitBlt, 3, 0,
-                 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | REG_USE3,
-                 "umull", "!0C, !1C, !2C, !3C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Ldrex,       0xe8500f00,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOADX,
-                 "ldrex", "!0C, [!1C, #!2E]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Ldrexd,      0xe8d0007f,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2 | IS_LOADX,
-                 "ldrexd", "!0C, !1C, [!2C]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Strex,       0xe8400000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
-                 kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STOREX,
-                 "strex", "!0C, !1C, [!2C, #!2E]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Strexd,      0xe8c00070,
-                 kFmtBitBlt, 3, 0, kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8,
-                 kFmtBitBlt, 19, 16, IS_QUAD_OP | REG_DEF0_USE123 | IS_STOREX,
-                 "strexd", "!0C, !1C, !2C, [!3C]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Clrex,       0xf3bf8f2f,
-                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, NO_OPERAND,
-                 "clrex", "", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Bfi,         0xf3600000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtShift5, -1, -1,
-                 kFmtBitBlt, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
-                 "bfi", "!0C,!1C,#!2d,#!3d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Bfc,         0xf36f0000,
-                 kFmtBitBlt, 11, 8, kFmtShift5, -1, -1, kFmtBitBlt, 4, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0,
-                 "bfc", "!0C,#!1d,#!2d", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Dmb,         0xf3bf8f50,
-                 kFmtBitBlt, 3, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_VOLATILE,
-                 "dmb", "#!0B", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrPcReln12,       0xf85f0000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF,
-                 "ldr", "!0C, [r15pc, -#!1d]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Stm,          0xe9000000,
-                 kFmtBitBlt, 19, 16, kFmtStmRegList, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_USE0 | REG_USE_LIST1 | IS_STORE,
-                 "stm", "!0C, <!1R>", 4, kFixupNone),
-    ENCODING_MAP(kThumbUndefined,       0xde00,
-                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, NO_OPERAND,
-                 "undefined", "", 2, kFixupNone),
-    // NOTE: vpop, vpush hard-encoded for s16+ reg list
-    ENCODING_MAP(kThumb2VPopCS,       0xecbd8a00,
-                 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_FPCS_LIST0
-                 | IS_LOAD, "vpop", "<!0P>", 4, kFixupNone),
-    ENCODING_MAP(kThumb2VPushCS,      0xed2d8a00,
-                 kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_FPCS_LIST0
-                 | IS_STORE, "vpush", "<!0P>", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vldms,        0xec900a00,
-                 kFmtBitBlt, 19, 16, kFmtSfp, 22, 12, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_USE0 | REG_DEF_FPCS_LIST2
-                 | IS_LOAD, "vldms", "!0C, <!2Q>", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Vstms,        0xec800a00,
-                 kFmtBitBlt, 19, 16, kFmtSfp, 22, 12, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_USE0 | REG_USE_FPCS_LIST2
-                 | IS_STORE, "vstms", "!0C, <!2Q>", 4, kFixupNone),
-    ENCODING_MAP(kThumb2BUncond,      0xf0009000,
-                 kFmtOff24, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH,
-                 "b", "!0t", 4, kFixupT2Branch),
-    ENCODING_MAP(kThumb2Bl,           0xf000d000,
-                 kFmtOff24, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR | NEEDS_FIXUP,
-                 "bl", "!0T", 4, kFixupLabel),
-    ENCODING_MAP(kThumb2MovImm16H,       0xf2c00000,
-                 kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | REG_USE0,
-                 "movt", "!0C, #!1M", 4, kFixupNone),
-    ENCODING_MAP(kThumb2AddPCR,      0x4487,
-                 kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP,
-                 "add", "rPC, !0C", 2, kFixupLabel),
-    ENCODING_MAP(kThumb2Adr,         0xf20f0000,
-                 kFmtBitBlt, 11, 8, kFmtImm12, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 /* Note: doesn't affect flags */
-                 IS_TERTIARY_OP | REG_DEF0 | NEEDS_FIXUP,
-                 "adr", "!0C,#!1d", 4, kFixupAdr),
-    ENCODING_MAP(kThumb2MovImm16LST,     0xf2400000,
-                 kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | NEEDS_FIXUP,
-                 "mov", "!0C, #!1M", 4, kFixupMovImmLST),
-    ENCODING_MAP(kThumb2MovImm16HST,     0xf2c00000,
-                 kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | REG_USE0 | NEEDS_FIXUP,
-                 "movt", "!0C, #!1M", 4, kFixupMovImmHST),
-    ENCODING_MAP(kThumb2LdmiaWB,         0xe8b00000,
-                 kFmtBitBlt, 19, 16, kFmtLdmRegList, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
-                 "ldmia", "!0C!!, <!1R>", 4, kFixupNone),
-    ENCODING_MAP(kThumb2OrrRRRs,  0xea500000,
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
-                 "orrs", "!0C, !1C, !2C!3H", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Push1,    0xf84d0d04,
-                 kFmtBitBlt, 15, 12, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE0
-                 | IS_STORE, "push1", "!0C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Pop1,    0xf85d0b04,
-                 kFmtBitBlt, 15, 12, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF0
-                 | IS_LOAD, "pop1", "!0C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2RsubRRR,  0xebd00000, /* setflags encoding */
-                 kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
-                 kFmtShift, -1, -1,
-                 IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
-                 "rsbs", "!0C, !1C, !2C!3H", 4, kFixupNone),
-    ENCODING_MAP(kThumb2Smull,  0xfb800000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
-                 kFmtBitBlt, 3, 0,
-                 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | REG_USE3,
-                 "smull", "!0C, !1C, !2C, !3C", 4, kFixupNone),
-    ENCODING_MAP(kThumb2LdrdPcRel8,  0xe9df0000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 7, 0,
-                 kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD_OFF4 | NEEDS_FIXUP,
-                 "ldrd", "!0C, !1C, [pc, #!2E]", 4, kFixupLoad),
-    ENCODING_MAP(kThumb2LdrdI8, 0xe9d00000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
-                 kFmtBitBlt, 7, 0,
-                 IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD_OFF4,
-                 "ldrd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone),
-    ENCODING_MAP(kThumb2StrdI8, 0xe9c00000,
-                 kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
-                 kFmtBitBlt, 7, 0,
-                 IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE_OFF4,
-                 "strd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone),
-};
-
-// new_lir replaces orig_lir in the pcrel_fixup list.
-void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
-  new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
-  if (UNLIKELY(prev_lir == nullptr)) {
-    first_fixup_ = new_lir;
-  } else {
-    prev_lir->u.a.pcrel_next = new_lir;
-  }
-  orig_lir->flags.fixup = kFixupNone;
-}
-
-// new_lir is inserted before orig_lir in the pcrel_fixup list.
-void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
-  new_lir->u.a.pcrel_next = orig_lir;
-  if (UNLIKELY(prev_lir == nullptr)) {
-    first_fixup_ = new_lir;
-  } else {
-    DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
-    prev_lir->u.a.pcrel_next = new_lir;
-  }
-}
-
-/*
- * The fake NOP of moving r0 to r0 actually will incur data stalls if r0 is
- * not ready. Since r5FP is not updated often, it is less likely to
- * generate unnecessary stall cycles.
- * TUNING: No longer true - find new NOP pattern.
- */
-#define PADDING_MOV_R5_R5               0x1C2D
-
-uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
-  uint8_t* const write_buffer = write_pos;
-  for (; lir != nullptr; lir = NEXT_LIR(lir)) {
-    lir->offset = (write_pos - write_buffer);
-    if (!lir->flags.is_nop) {
-      int opcode = lir->opcode;
-      if (IsPseudoLirOp(opcode)) {
-        if (UNLIKELY(opcode == kPseudoPseudoAlign4)) {
-          // Note: size for this opcode will be either 0 or 2 depending on final alignment.
-          if (lir->offset & 0x2) {
-            write_pos[0] = (PADDING_MOV_R5_R5 & 0xff);
-            write_pos[1] = ((PADDING_MOV_R5_R5 >> 8) & 0xff);
-            write_pos += 2;
-          }
-        }
-      } else if (LIKELY(!lir->flags.is_nop)) {
-        const ArmEncodingMap *encoder = &EncodingMap[lir->opcode];
-        uint32_t bits = encoder->skeleton;
-        for (int i = 0; i < 4; i++) {
-          uint32_t operand;
-          uint32_t value;
-          operand = lir->operands[i];
-          ArmEncodingKind kind = encoder->field_loc[i].kind;
-          if (LIKELY(kind == kFmtBitBlt)) {
-            value = (operand << encoder->field_loc[i].start) &
-                ((1 << (encoder->field_loc[i].end + 1)) - 1);
-            bits |= value;
-          } else {
-            switch (encoder->field_loc[i].kind) {
-              case kFmtLdmRegList:
-                value = (operand << encoder->field_loc[i].start) &
-                    ((1 << (encoder->field_loc[i].end + 1)) - 1);
-                bits |= value;
-                DCHECK_EQ((bits & (1 << 13)), 0u);
-                break;
-              case kFmtStmRegList:
-                value = (operand << encoder->field_loc[i].start) &
-                    ((1 << (encoder->field_loc[i].end + 1)) - 1);
-                bits |= value;
-                DCHECK_EQ((bits & (1 << 13)), 0u);
-                DCHECK_EQ((bits & (1 << 15)), 0u);
-                break;
-              case kFmtSkip:
-                break;  // Nothing to do, but continue to next.
-              case kFmtUnused:
-                i = 4;  // Done, break out of the enclosing loop.
-                break;
-              case kFmtFPImm:
-                value = ((operand & 0xF0) >> 4) << encoder->field_loc[i].end;
-                value |= (operand & 0x0F) << encoder->field_loc[i].start;
-                bits |= value;
-                break;
-              case kFmtBrOffset:
-                value = ((operand  & 0x80000) >> 19) << 26;
-                value |= ((operand & 0x40000) >> 18) << 11;
-                value |= ((operand & 0x20000) >> 17) << 13;
-                value |= ((operand & 0x1f800) >> 11) << 16;
-                value |= (operand  & 0x007ff);
-                bits |= value;
-                break;
-              case kFmtShift5:
-                value = ((operand & 0x1c) >> 2) << 12;
-                value |= (operand & 0x03) << 6;
-                bits |= value;
-                break;
-              case kFmtShift:
-                value = ((operand & 0x70) >> 4) << 12;
-                value |= (operand & 0x0f) << 4;
-                bits |= value;
-                break;
-              case kFmtBWidth:
-                value = operand - 1;
-                bits |= value;
-                break;
-              case kFmtLsb:
-                value = ((operand & 0x1c) >> 2) << 12;
-                value |= (operand & 0x03) << 6;
-                bits |= value;
-                break;
-              case kFmtImm6:
-                value = ((operand & 0x20) >> 5) << 9;
-                value |= (operand & 0x1f) << 3;
-                bits |= value;
-                break;
-              case kFmtDfp: {
-                DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
-                uint32_t reg_num = RegStorage::RegNum(operand);
-                /* Snag the 1-bit slice and position it */
-                value = ((reg_num & 0x10) >> 4) << encoder->field_loc[i].end;
-                /* Extract and position the 4-bit slice */
-                value |= (reg_num & 0x0f) << encoder->field_loc[i].start;
-                bits |= value;
-                break;
-              }
-              case kFmtSfp: {
-                DCHECK(RegStorage::IsSingle(operand)) << ", Operand = 0x" << std::hex << operand;
-                uint32_t reg_num = RegStorage::RegNum(operand);
-                /* Snag the 1-bit slice and position it */
-                value = (reg_num & 0x1) << encoder->field_loc[i].end;
-                /* Extract and position the 4-bit slice */
-                value |= ((reg_num & 0x1e) >> 1) << encoder->field_loc[i].start;
-                bits |= value;
-                break;
-              }
-              case kFmtImm12:
-              case kFmtModImm:
-                value = ((operand & 0x800) >> 11) << 26;
-                value |= ((operand & 0x700) >> 8) << 12;
-                value |= operand & 0x0ff;
-                bits |= value;
-                break;
-              case kFmtImm16:
-                value = ((operand & 0x0800) >> 11) << 26;
-                value |= ((operand & 0xf000) >> 12) << 16;
-                value |= ((operand & 0x0700) >> 8) << 12;
-                value |= operand & 0x0ff;
-                bits |= value;
-                break;
-              case kFmtOff24: {
-                uint32_t signbit = (operand >> 31) & 0x1;
-                uint32_t i1 = (operand >> 22) & 0x1;
-                uint32_t i2 = (operand >> 21) & 0x1;
-                uint32_t imm10 = (operand >> 11) & 0x03ff;
-                uint32_t imm11 = operand & 0x07ff;
-                uint32_t j1 = (i1 ^ signbit) ? 0 : 1;
-                uint32_t j2 = (i2 ^ signbit) ? 0 : 1;
-                value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) |
-                    imm11;
-                bits |= value;
-                }
-                break;
-              default:
-                LOG(FATAL) << "Bad fmt:" << encoder->field_loc[i].kind;
-            }
-          }
-        }
-        if (encoder->size == 4) {
-          write_pos[0] = ((bits >> 16) & 0xff);
-          write_pos[1] = ((bits >> 24) & 0xff);
-          write_pos[2] = (bits & 0xff);
-          write_pos[3] = ((bits >> 8) & 0xff);
-          write_pos += 4;
-        } else {
-          DCHECK_EQ(encoder->size, 2);
-          write_pos[0] = (bits & 0xff);
-          write_pos[1] = ((bits >> 8) & 0xff);
-          write_pos += 2;
-        }
-      }
-    }
-  }
-  return write_pos;
-}
-
-// Assemble the LIR into binary instruction format.
-void ArmMir2Lir::AssembleLIR() {
-  LIR* lir;
-  LIR* prev_lir;
-  cu_->NewTimingSplit("Assemble");
-  int assembler_retries = 0;
-  CodeOffset starting_offset = LinkFixupInsns(first_lir_insn_, last_lir_insn_, 0);
-  data_offset_ = RoundUp(starting_offset, 4);
-  int32_t offset_adjustment;
-  AssignDataOffsets();
-
-  /*
-   * Note: generation must be 1 on first pass (to distinguish from initialized state of 0 for
-   * non-visited nodes).  Start at zero here, and bit will be flipped to 1 on entry to the loop.
-   */
-  int generation = 0;
-  while (true) {
-    offset_adjustment = 0;
-    AssemblerStatus res = kSuccess;  // Assume success
-    generation ^= 1;
-    // Note: nodes requring possible fixup linked in ascending order.
-    lir = first_fixup_;
-    prev_lir = nullptr;
-    while (lir != nullptr) {
-      /*
-       * NOTE: the lir being considered here will be encoded following the switch (so long as
-       * we're not in a retry situation).  However, any new non-pc_rel instructions inserted
-       * due to retry must be explicitly encoded at the time of insertion.  Note that
-       * inserted instructions don't need use/def flags, but do need size and pc-rel status
-       * properly updated.
-       */
-      lir->offset += offset_adjustment;
-      // During pass, allows us to tell whether a node has been updated with offset_adjustment yet.
-      lir->flags.generation = generation;
-      switch (static_cast<FixupKind>(lir->flags.fixup)) {
-        case kFixupLabel:
-        case kFixupNone:
-          break;
-        case kFixupVLoad:
-          if (lir->operands[1] != rs_r15pc.GetReg()) {
-            break;
-          }
-          FALLTHROUGH_INTENDED;
-        case kFixupLoad: {
-          /*
-           * PC-relative loads are mostly used to load immediates
-           * that are too large to materialize directly in one shot.
-           * However, if the load displacement exceeds the limit,
-           * we revert to a multiple-instruction materialization sequence.
-           */
-          LIR *lir_target = lir->target;
-          CodeOffset pc = (lir->offset + 4) & ~3;
-          CodeOffset target = lir_target->offset +
-              ((lir_target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int32_t delta = target - pc;
-          if (res != kSuccess) {
-            /*
-             * In this case, we're just estimating and will do it again for real.  Ensure offset
-             * is legal.
-             */
-            delta &= ~0x3;
-          }
-          DCHECK_ALIGNED(delta, 4);
-          // First, a sanity check for cases we shouldn't see now
-          if (kIsDebugBuild && (((lir->opcode == kThumbAddPcRel) && (delta > 1020)) ||
-              ((lir->opcode == kThumbLdrPcRel) && (delta > 1020)))) {
-            // Shouldn't happen in current codegen.
-            LOG(FATAL) << "Unexpected pc-rel offset " << delta;
-          }
-          // Now, check for the difficult cases
-          if (((lir->opcode == kThumb2LdrPcRel12) && (delta > 4091)) ||
-              ((lir->opcode == kThumb2LdrdPcRel8) && (delta > 1020)) ||
-              ((lir->opcode == kThumb2Vldrs) && (delta > 1020)) ||
-              ((lir->opcode == kThumb2Vldrd) && (delta > 1020))) {
-            /*
-             * Note: The reason vldrs/vldrd include rARM_LR in their use/def masks is that we
-             * sometimes have to use it to fix up out-of-range accesses.  This is where that
-             * happens.
-             */
-            int base_reg = ((lir->opcode == kThumb2LdrdPcRel8) ||
-                            (lir->opcode == kThumb2LdrPcRel12)) ?  lir->operands[0] :
-                            rs_rARM_LR.GetReg();
-
-            // Add new Adr to generate the address.
-            LIR* new_adr = RawLIR(lir->dalvik_offset, kThumb2Adr,
-                       base_reg, 0, 0, 0, 0, lir->target);
-            new_adr->offset = lir->offset;
-            new_adr->flags.fixup = kFixupAdr;
-            new_adr->flags.size = EncodingMap[kThumb2Adr].size;
-            InsertLIRBefore(lir, new_adr);
-            lir->offset += new_adr->flags.size;
-            offset_adjustment += new_adr->flags.size;
-
-            // lir no longer pcrel, unlink and link in new_adr.
-            ReplaceFixup(prev_lir, lir, new_adr);
-
-            // Convert to normal load.
-            offset_adjustment -= lir->flags.size;
-            if (lir->opcode == kThumb2LdrPcRel12) {
-              lir->opcode = kThumb2LdrRRI12;
-            } else if (lir->opcode == kThumb2LdrdPcRel8) {
-              lir->opcode = kThumb2LdrdI8;
-            }
-            lir->flags.size = EncodingMap[lir->opcode].size;
-            offset_adjustment += lir->flags.size;
-            // Change the load to be relative to the new Adr base.
-            if (lir->opcode == kThumb2LdrdI8) {
-              lir->operands[3] = 0;
-              lir->operands[2] = base_reg;
-            } else {
-              lir->operands[2] = 0;
-              lir->operands[1] = base_reg;
-            }
-            prev_lir = new_adr;  // Continue scan with new_adr;
-            lir = new_adr->u.a.pcrel_next;
-            res = kRetryAll;
-            continue;
-          } else {
-            if ((lir->opcode == kThumb2Vldrs) ||
-                (lir->opcode == kThumb2Vldrd) ||
-                (lir->opcode == kThumb2LdrdPcRel8)) {
-              lir->operands[2] = delta >> 2;
-            } else {
-              lir->operands[1] = (lir->opcode == kThumb2LdrPcRel12) ?  delta :
-                  delta >> 2;
-            }
-          }
-          break;
-        }
-        case kFixupCBxZ: {
-          LIR *target_lir = lir->target;
-          CodeOffset pc = lir->offset + 4;
-          CodeOffset target = target_lir->offset +
-              ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int32_t delta = target - pc;
-          if (delta > 126 || delta < 0) {
-            /*
-             * Convert to cmp rx,#0 / b[eq/ne] tgt pair
-             * Make new branch instruction and insert after
-             */
-            LIR* new_inst =
-              RawLIR(lir->dalvik_offset, kThumbBCond, 0,
-                     (lir->opcode == kThumb2Cbz) ? kArmCondEq : kArmCondNe,
-                     0, 0, 0, lir->target);
-            InsertLIRAfter(lir, new_inst);
-
-            /* Convert the cb[n]z to a cmp rx, #0 ] */
-            // Subtract the old size.
-            offset_adjustment -= lir->flags.size;
-            lir->opcode = kThumbCmpRI8;
-            /* operand[0] is src1 in both cb[n]z & CmpRI8 */
-            lir->operands[1] = 0;
-            lir->target = 0;
-            lir->flags.size = EncodingMap[lir->opcode].size;
-            // Add back the new size.
-            offset_adjustment += lir->flags.size;
-            // Set up the new following inst.
-            new_inst->offset = lir->offset + lir->flags.size;
-            new_inst->flags.fixup = kFixupCondBranch;
-            new_inst->flags.size = EncodingMap[new_inst->opcode].size;
-            offset_adjustment += new_inst->flags.size;
-
-            // lir no longer pcrel, unlink and link in new_inst.
-            ReplaceFixup(prev_lir, lir, new_inst);
-            prev_lir = new_inst;  // Continue with the new instruction.
-            lir = new_inst->u.a.pcrel_next;
-            res = kRetryAll;
-            continue;
-          } else {
-            lir->operands[1] = delta >> 1;
-          }
-          break;
-        }
-        case kFixupCondBranch: {
-          LIR *target_lir = lir->target;
-          int32_t delta = 0;
-          DCHECK(target_lir);
-          CodeOffset pc = lir->offset + 4;
-          CodeOffset target = target_lir->offset +
-              ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          delta = target - pc;
-          if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
-            offset_adjustment -= lir->flags.size;
-            lir->opcode = kThumb2BCond;
-            lir->flags.size = EncodingMap[lir->opcode].size;
-            // Fixup kind remains the same.
-            offset_adjustment += lir->flags.size;
-            res = kRetryAll;
-          }
-          lir->operands[0] = delta >> 1;
-          break;
-        }
-        case kFixupT2Branch: {
-          LIR *target_lir = lir->target;
-          CodeOffset pc = lir->offset + 4;
-          CodeOffset target = target_lir->offset +
-              ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int32_t delta = target - pc;
-          lir->operands[0] = delta >> 1;
-          if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && lir->operands[0] == 0) {
-            // Useless branch
-            offset_adjustment -= lir->flags.size;
-            lir->flags.is_nop = true;
-            // Don't unlink - just set to do-nothing.
-            lir->flags.fixup = kFixupNone;
-            res = kRetryAll;
-          }
-          break;
-        }
-        case kFixupT1Branch: {
-          LIR *target_lir = lir->target;
-          CodeOffset pc = lir->offset + 4;
-          CodeOffset target = target_lir->offset +
-              ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int32_t delta = target - pc;
-          if (delta > 2046 || delta < -2048) {
-            // Convert to Thumb2BCond w/ kArmCondAl
-            offset_adjustment -= lir->flags.size;
-            lir->opcode = kThumb2BUncond;
-            lir->operands[0] = 0;
-            lir->flags.size = EncodingMap[lir->opcode].size;
-            lir->flags.fixup = kFixupT2Branch;
-            offset_adjustment += lir->flags.size;
-            res = kRetryAll;
-          } else {
-            lir->operands[0] = delta >> 1;
-            if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && lir->operands[0] == -1) {
-              // Useless branch
-              offset_adjustment -= lir->flags.size;
-              lir->flags.is_nop = true;
-              // Don't unlink - just set to do-nothing.
-              lir->flags.fixup = kFixupNone;
-              res = kRetryAll;
-            }
-          }
-          break;
-        }
-        case kFixupBlx1: {
-          DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
-          /* cur_pc is Thumb */
-          CodeOffset cur_pc = (lir->offset + 4) & ~3;
-          CodeOffset target = lir->operands[1];
-
-          /* Match bit[1] in target with base */
-          if (cur_pc & 0x2) {
-            target |= 0x2;
-          }
-          int32_t delta = target - cur_pc;
-          DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
-
-          lir->operands[0] = (delta >> 12) & 0x7ff;
-          NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
-          break;
-        }
-        case kFixupBl1: {
-          DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
-          /* Both cur_pc and target are Thumb */
-          CodeOffset cur_pc = lir->offset + 4;
-          CodeOffset target = lir->operands[1];
-
-          int32_t delta = target - cur_pc;
-          DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
-
-          lir->operands[0] = (delta >> 12) & 0x7ff;
-          NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
-          break;
-        }
-        case kFixupAdr: {
-          const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[2]);
-          LIR* target = lir->target;
-          int32_t target_disp = (tab_rec != nullptr) ?  tab_rec->offset + offset_adjustment
-              : target->offset + ((target->flags.generation == lir->flags.generation) ? 0 :
-              offset_adjustment);
-          int32_t disp = target_disp - ((lir->offset + 4) & ~3);
-          if (disp < 4096) {
-            lir->operands[1] = disp;
-          } else {
-            // convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
-            // TUNING: if this case fires often, it can be improved.  Not expected to be common.
-            LIR *new_mov16L =
-                RawLIR(lir->dalvik_offset, kThumb2MovImm16LST, lir->operands[0], 0,
-                       WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target);
-            new_mov16L->flags.size = EncodingMap[new_mov16L->opcode].size;
-            new_mov16L->flags.fixup = kFixupMovImmLST;
-            new_mov16L->offset = lir->offset;
-            // Link the new instruction, retaining lir.
-            InsertLIRBefore(lir, new_mov16L);
-            lir->offset += new_mov16L->flags.size;
-            offset_adjustment += new_mov16L->flags.size;
-            InsertFixupBefore(prev_lir, lir, new_mov16L);
-            prev_lir = new_mov16L;   // Now we've got a new prev.
-            LIR *new_mov16H =
-                RawLIR(lir->dalvik_offset, kThumb2MovImm16HST, lir->operands[0], 0,
-                       WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target);
-            new_mov16H->flags.size = EncodingMap[new_mov16H->opcode].size;
-            new_mov16H->flags.fixup = kFixupMovImmHST;
-            new_mov16H->offset = lir->offset;
-            // Link the new instruction, retaining lir.
-            InsertLIRBefore(lir, new_mov16H);
-            lir->offset += new_mov16H->flags.size;
-            offset_adjustment += new_mov16H->flags.size;
-            InsertFixupBefore(prev_lir, lir, new_mov16H);
-            prev_lir = new_mov16H;  // Now we've got a new prev.
-
-            offset_adjustment -= lir->flags.size;
-            if (RegStorage::RegNum(lir->operands[0]) < 8) {
-              lir->opcode = kThumbAddRRLH;
-            } else {
-              lir->opcode = kThumbAddRRHH;
-            }
-            lir->operands[1] = rs_rARM_PC.GetReg();
-            lir->flags.size = EncodingMap[lir->opcode].size;
-            offset_adjustment += lir->flags.size;
-            // Must stay in fixup list and have offset updated; will be used by LST/HSP pair.
-            lir->flags.fixup = kFixupNone;
-            res = kRetryAll;
-          }
-          break;
-        }
-        case kFixupMovImmLST: {
-          // operands[1] should hold disp, [2] has add, [3] has tab_rec
-          const LIR* addPCInst = UnwrapPointer<LIR>(lir->operands[2]);
-          const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
-          // If tab_rec is null, this is a literal load. Use target
-          LIR* target = lir->target;
-          int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
-          lir->operands[1] = (target_disp - (addPCInst->offset + 4)) & 0xffff;
-          break;
-        }
-        case kFixupMovImmHST: {
-          // operands[1] should hold disp, [2] has add, [3] has tab_rec
-          const LIR* addPCInst = UnwrapPointer<LIR>(lir->operands[2]);
-          const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
-          // If tab_rec is null, this is a literal load. Use target
-          LIR* target = lir->target;
-          int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
-          lir->operands[1] =
-              ((target_disp - (addPCInst->offset + 4)) >> 16) & 0xffff;
-          break;
-        }
-        case kFixupAlign4: {
-          int32_t required_size = lir->offset & 0x2;
-          if (lir->flags.size != required_size) {
-            offset_adjustment += required_size - lir->flags.size;
-            lir->flags.size = required_size;
-            res = kRetryAll;
-          }
-          break;
-        }
-        default:
-          LOG(FATAL) << "Unexpected case " << lir->flags.fixup;
-      }
-      prev_lir = lir;
-      lir = lir->u.a.pcrel_next;
-    }
-
-    if (res == kSuccess) {
-      break;
-    } else {
-      assembler_retries++;
-      if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
-        CodegenDump();
-        LOG(FATAL) << "Assembler error - too many retries";
-      }
-      starting_offset += offset_adjustment;
-      data_offset_ = RoundUp(starting_offset, 4);
-      AssignDataOffsets();
-    }
-  }
-
-  // Build the CodeBuffer.
-  DCHECK_LE(data_offset_, total_size_);
-  code_buffer_.reserve(total_size_);
-  code_buffer_.resize(starting_offset);
-  uint8_t* write_pos = &code_buffer_[0];
-  write_pos = EncodeLIRs(write_pos, first_lir_insn_);
-  DCHECK_EQ(static_cast<CodeOffset>(write_pos - &code_buffer_[0]), starting_offset);
-
-  DCHECK_EQ(data_offset_, RoundUp(code_buffer_.size(), 4));
-
-  // Install literals
-  InstallLiteralPools();
-
-  // Install switch tables
-  InstallSwitchTables();
-
-  // Install fill array data
-  InstallFillArrayData();
-
-  // Create the mapping table and native offset to reference map.
-  cu_->NewTimingSplit("PcMappingTable");
-  CreateMappingTables();
-
-  cu_->NewTimingSplit("GcMap");
-  CreateNativeGcMap();
-}
-
-size_t ArmMir2Lir::GetInsnSize(LIR* lir) {
-  DCHECK(!IsPseudoLirOp(lir->opcode));
-  return EncodingMap[lir->opcode].size;
-}
-
-// Encode instruction bit pattern and assign offsets.
-uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
-  LIR* end_lir = tail_lir->next;
-
-  LIR* last_fixup = nullptr;
-  for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
-    if (!lir->flags.is_nop) {
-      if (lir->flags.fixup != kFixupNone) {
-        if (!IsPseudoLirOp(lir->opcode)) {
-          lir->flags.size = EncodingMap[lir->opcode].size;
-          lir->flags.fixup = EncodingMap[lir->opcode].fixup;
-        } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
-          lir->flags.size = (offset & 0x2);
-          lir->flags.fixup = kFixupAlign4;
-        } else {
-          lir->flags.size = 0;
-          lir->flags.fixup = kFixupLabel;
-        }
-        // Link into the fixup chain.
-        lir->flags.use_def_invalid = true;
-        lir->u.a.pcrel_next = nullptr;
-        if (first_fixup_ == nullptr) {
-          first_fixup_ = lir;
-        } else {
-          last_fixup->u.a.pcrel_next = lir;
-        }
-        last_fixup = lir;
-        lir->offset = offset;
-      }
-      offset += lir->flags.size;
-    }
-  }
-  return offset;
-}
-
-void ArmMir2Lir::AssignDataOffsets() {
-  /* Set up offsets for literals */
-  CodeOffset offset = data_offset_;
-
-  offset = AssignLiteralOffset(offset);
-
-  offset = AssignSwitchTablesOffset(offset);
-
-  total_size_ = AssignFillArrayDataOffset(offset);
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm/backend_arm.h b/compiler/dex/quick/arm/backend_arm.h
deleted file mode 100644
index 42a9bca..0000000
--- a/compiler/dex/quick/arm/backend_arm.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_ARM_BACKEND_ARM_H_
-#define ART_COMPILER_DEX_QUICK_ARM_BACKEND_ARM_H_
-
-namespace art {
-
-struct CompilationUnit;
-class Mir2Lir;
-class MIRGraph;
-class ArenaAllocator;
-
-Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                          ArenaAllocator* const arena);
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_ARM_BACKEND_ARM_H_
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
deleted file mode 100644
index 868d9a4..0000000
--- a/compiler/dex/quick/arm/call_arm.cc
+++ /dev/null
@@ -1,763 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the Thumb2 ISA. */
-
-#include "codegen_arm.h"
-
-#include "arm_lir.h"
-#include "art_method.h"
-#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "gc/accounting/card_table.h"
-#include "mirror/object_array-inl.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "utils/dex_cache_arrays_layout-inl.h"
-
-namespace art {
-
-/*
- * The sparse table in the literal pool is an array of <key,displacement>
- * pairs.  For each set, we'll load them as a pair using ldmia.
- * This means that the register number of the temp we use for the key
- * must be lower than the reg for the displacement.
- *
- * The test loop will look something like:
- *
- *   adr   r_base, <table>
- *   ldr   r_val, [rARM_SP, v_reg_off]
- *   mov   r_idx, #table_size
- * lp:
- *   ldmia r_base!, {r_key, r_disp}
- *   sub   r_idx, #1
- *   cmp   r_val, r_key
- *   ifeq
- *   add   rARM_PC, r_disp   ; This is the branch from which we compute displacement
- *   cbnz  r_idx, lp
- */
-void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  // Add the table to the list - we'll process it later
-  SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
-  tab_rec->switch_mir = mir;
-  tab_rec->table = table;
-  tab_rec->vaddr = current_dalvik_offset_;
-  uint32_t size = table[1];
-  switch_tables_.push_back(tab_rec);
-
-  // Get the switch value
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegStorage r_base = AllocTemp();
-  /* Allocate key and disp temps */
-  RegStorage r_key = AllocTemp();
-  RegStorage r_disp = AllocTemp();
-  // Make sure r_key's register number is less than r_disp's number for ldmia
-  if (r_key.GetReg() > r_disp.GetReg()) {
-    RegStorage tmp = r_disp;
-    r_disp = r_key;
-    r_key = tmp;
-  }
-  // Materialize a pointer to the switch table
-  NewLIR3(kThumb2Adr, r_base.GetReg(), 0, WrapPointer(tab_rec));
-  // Set up r_idx
-  RegStorage r_idx = AllocTemp();
-  LoadConstant(r_idx, size);
-  // Establish loop branch target
-  LIR* target = NewLIR0(kPseudoTargetLabel);
-  // Load next key/disp
-  NewLIR2(kThumb2LdmiaWB, r_base.GetReg(), (1 << r_key.GetRegNum()) | (1 << r_disp.GetRegNum()));
-  OpRegReg(kOpCmp, r_key, rl_src.reg);
-  // Go if match. NOTE: No instruction set switch here - must stay Thumb2
-  LIR* it = OpIT(kCondEq, "");
-  LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp.GetReg());
-  OpEndIT(it);
-  tab_rec->anchor = switch_branch;
-  // Needs to use setflags encoding here
-  OpRegRegImm(kOpSub, r_idx, r_idx, 1);  // For value == 1, this should set flags.
-  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-  OpCondBranch(kCondNe, target);
-}
-
-
-void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  // Add the table to the list - we'll process it later
-  SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable),  kArenaAllocData));
-  tab_rec->switch_mir = mir;
-  tab_rec->table = table;
-  tab_rec->vaddr = current_dalvik_offset_;
-  uint32_t size = table[1];
-  switch_tables_.push_back(tab_rec);
-
-  // Get the switch value
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegStorage table_base = AllocTemp();
-  // Materialize a pointer to the switch table
-  NewLIR3(kThumb2Adr, table_base.GetReg(), 0, WrapPointer(tab_rec));
-  int low_key = s4FromSwitchData(&table[2]);
-  RegStorage keyReg;
-  // Remove the bias, if necessary
-  if (low_key == 0) {
-    keyReg = rl_src.reg;
-  } else {
-    keyReg = AllocTemp();
-    OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
-  }
-  // Bounds check - if < 0 or >= size continue following switch
-  OpRegImm(kOpCmp, keyReg, size-1);
-  LIR* branch_over = OpCondBranch(kCondHi, nullptr);
-
-  // Load the displacement from the switch table
-  RegStorage disp_reg = AllocTemp();
-  LoadBaseIndexed(table_base, keyReg, disp_reg, 2, k32);
-
-  // ..and go! NOTE: No instruction set switch here - must stay Thumb2
-  LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg.GetReg());
-  tab_rec->anchor = switch_branch;
-
-  /* branch_over target here */
-  LIR* target = NewLIR0(kPseudoTargetLabel);
-  branch_over->target = target;
-}
-
-/*
- * Handle unlocked -> thin locked transition inline or else call out to quick entrypoint. For more
- * details see monitor.cc.
- */
-void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
-  FlushAllRegs();
-  // FIXME: need separate LoadValues for object references.
-  LoadValueDirectFixed(rl_src, rs_r0);  // Get obj
-  LockCallTemps();  // Prepare for explicit register usage
-  constexpr bool kArchVariantHasGoodBranchPredictor = false;  // TODO: true if cortex-A15.
-  if (kArchVariantHasGoodBranchPredictor) {
-    LIR* null_check_branch = nullptr;
-    if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
-      null_check_branch = nullptr;  // No null check.
-    } else {
-      // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
-      if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-        null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
-      }
-    }
-    Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
-    NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
-        mirror::Object::MonitorOffset().Int32Value() >> 2);
-    MarkPossibleNullPointerException(opt_flags);
-    // Zero out the read barrier bits.
-    OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
-    LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, nullptr);
-    // r1 is zero except for the rb bits here. Copy the read barrier bits into r2.
-    OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
-    NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
-        mirror::Object::MonitorOffset().Int32Value() >> 2);
-    LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, nullptr);
-
-
-    LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
-    not_unlocked_branch->target = slow_path_target;
-    if (null_check_branch != nullptr) {
-      null_check_branch->target = slow_path_target;
-    }
-    // TODO: move to a slow path.
-    // Go expensive route - artLockObjectFromCode(obj);
-    LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pLockObject).Int32Value(), rs_rARM_LR);
-    ClobberCallerSave();
-    LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
-    MarkSafepointPC(call_inst);
-
-    LIR* success_target = NewLIR0(kPseudoTargetLabel);
-    lock_success_branch->target = success_target;
-    GenMemBarrier(kLoadAny);
-  } else {
-    // Explicit null-check as slow-path is entered using an IT.
-    GenNullCheck(rs_r0, opt_flags);
-    Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
-    NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
-        mirror::Object::MonitorOffset().Int32Value() >> 2);
-    MarkPossibleNullPointerException(opt_flags);
-    // Zero out the read barrier bits.
-    OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
-    // r1 will be zero except for the rb bits if the following
-    // cmp-and-branch branches to eq where r2 will be used. Copy the
-    // read barrier bits into r2.
-    OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
-    OpRegImm(kOpCmp, rs_r3, 0);
-
-    LIR* it = OpIT(kCondEq, "");
-    NewLIR4(kThumb2Strex/*eq*/, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
-        mirror::Object::MonitorOffset().Int32Value() >> 2);
-    OpEndIT(it);
-    OpRegImm(kOpCmp, rs_r1, 0);
-    it = OpIT(kCondNe, "T");
-    // Go expensive route - artLockObjectFromCode(self, obj);
-    LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pLockObject).Int32Value(),
-                       rs_rARM_LR);
-    ClobberCallerSave();
-    LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
-    OpEndIT(it);
-    MarkSafepointPC(call_inst);
-    GenMemBarrier(kLoadAny);
-  }
-}
-
-/*
- * Handle thin locked -> unlocked transition inline or else call out to quick entrypoint. For more
- * details see monitor.cc. Note the code below doesn't use ldrex/strex as the code holds the lock
- * and can only give away ownership if its suspended.
- */
-void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
-  FlushAllRegs();
-  LoadValueDirectFixed(rl_src, rs_r0);  // Get obj
-  LockCallTemps();  // Prepare for explicit register usage
-  LIR* null_check_branch = nullptr;
-  Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
-  constexpr bool kArchVariantHasGoodBranchPredictor = false;  // TODO: true if cortex-A15.
-  if (kArchVariantHasGoodBranchPredictor) {
-    if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
-      null_check_branch = nullptr;  // No null check.
-    } else {
-      // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
-      if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-        null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
-      }
-    }
-    if (!kUseReadBarrier) {
-      Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);  // Get lock
-    } else {
-      NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
-              mirror::Object::MonitorOffset().Int32Value() >> 2);
-    }
-    MarkPossibleNullPointerException(opt_flags);
-    // Zero out the read barrier bits.
-    OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
-    // Zero out except the read barrier bits.
-    OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
-    LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, nullptr);
-    GenMemBarrier(kAnyStore);
-    LIR* unlock_success_branch;
-    if (!kUseReadBarrier) {
-      Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
-      unlock_success_branch = OpUnconditionalBranch(nullptr);
-    } else {
-      NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
-              mirror::Object::MonitorOffset().Int32Value() >> 2);
-      unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, nullptr);
-    }
-    LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
-    slow_unlock_branch->target = slow_path_target;
-    if (null_check_branch != nullptr) {
-      null_check_branch->target = slow_path_target;
-    }
-    // TODO: move to a slow path.
-    // Go expensive route - artUnlockObjectFromCode(obj);
-    LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(), rs_rARM_LR);
-    ClobberCallerSave();
-    LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
-    MarkSafepointPC(call_inst);
-
-    LIR* success_target = NewLIR0(kPseudoTargetLabel);
-    unlock_success_branch->target = success_target;
-  } else {
-    // Explicit null-check as slow-path is entered using an IT.
-    GenNullCheck(rs_r0, opt_flags);
-    if (!kUseReadBarrier) {
-      Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);  // Get lock
-    } else {
-      // If we use read barriers, we need to use atomic instructions.
-      NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
-              mirror::Object::MonitorOffset().Int32Value() >> 2);
-    }
-    MarkPossibleNullPointerException(opt_flags);
-    Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
-    // Zero out the read barrier bits.
-    OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
-    // Zero out except the read barrier bits.
-    OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
-    // Is lock unheld on lock or held by us (==thread_id) on unlock?
-    OpRegReg(kOpCmp, rs_r3, rs_r2);
-    if (!kUseReadBarrier) {
-      LIR* it = OpIT(kCondEq, "EE");
-      if (GenMemBarrier(kAnyStore)) {
-        UpdateIT(it, "TEE");
-      }
-      Store32Disp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
-      // Go expensive route - UnlockObjectFromCode(obj);
-      LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(),
-                         rs_rARM_LR);
-      ClobberCallerSave();
-      LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
-      OpEndIT(it);
-      MarkSafepointPC(call_inst);
-    } else {
-      // If we use read barriers, we need to use atomic instructions.
-      LIR* it = OpIT(kCondEq, "");
-      if (GenMemBarrier(kAnyStore)) {
-        UpdateIT(it, "T");
-      }
-      NewLIR4/*eq*/(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
-                    mirror::Object::MonitorOffset().Int32Value() >> 2);
-      OpEndIT(it);
-      // Since we know r2 wasn't zero before the above it instruction,
-      // if r2 is zero here, we know r3 was equal to r2 and the strex
-      // suceeded (we're done). Otherwise (either r3 wasn't equal to r2
-      // or the strex failed), call the entrypoint.
-      OpRegImm(kOpCmp, rs_r2, 0);
-      LIR* it2 = OpIT(kCondNe, "T");
-      // Go expensive route - UnlockObjectFromCode(obj);
-      LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(),
-                         rs_rARM_LR);
-      ClobberCallerSave();
-      LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
-      OpEndIT(it2);
-      MarkSafepointPC(call_inst);
-    }
-  }
-}
-
-void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
-  int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
-  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
-  RegStorage reset_reg = AllocTempRef();
-  LoadRefDisp(rs_rARM_SELF, ex_offset, rl_result.reg, kNotVolatile);
-  LoadConstant(reset_reg, 0);
-  StoreRefDisp(rs_rARM_SELF, ex_offset, reset_reg, kNotVolatile);
-  FreeTemp(reset_reg);
-  StoreValue(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
-  RegStorage reg_card_base = AllocTemp();
-  RegStorage reg_card_no = AllocTemp();
-  LoadWordDisp(rs_rARM_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
-  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
-  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
-  FreeTemp(reg_card_base);
-  FreeTemp(reg_card_no);
-}
-
-static dwarf::Reg DwarfCoreReg(int num) {
-  return dwarf::Reg::ArmCore(num);
-}
-
-static dwarf::Reg DwarfFpReg(int num) {
-  return dwarf::Reg::ArmFp(num);
-}
-
-void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
-  DCHECK_EQ(cfi_.GetCurrentCFAOffset(), 0);  // empty stack.
-  int spill_count = num_core_spills_ + num_fp_spills_;
-  /*
-   * On entry, r0, r1, r2 & r3 are live.  Let the register allocation
-   * mechanism know so it doesn't try to use any of them when
-   * expanding the frame or flushing.  This leaves the utility
-   * code with a single temp: r12.  This should be enough.
-   */
-  LockTemp(rs_r0);
-  LockTemp(rs_r1);
-  LockTemp(rs_r2);
-  LockTemp(rs_r3);
-
-  /*
-   * We can safely skip the stack overflow check if we're
-   * a leaf *and* our frame size < fudge factor.
-   */
-  bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kArm);
-  const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm);
-  bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes);
-  bool generate_explicit_stack_overflow_check = large_frame ||
-    !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks();
-  if (!skip_overflow_check) {
-    if (generate_explicit_stack_overflow_check) {
-      if (!large_frame) {
-        /* Load stack limit */
-        LockTemp(rs_r12);
-        Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12);
-      }
-    } else {
-      // Implicit stack overflow check.
-      // Generate a load from [sp, #-overflowsize].  If this is in the stack
-      // redzone we will get a segmentation fault.
-      //
-      // Caveat coder: if someone changes the kStackOverflowReservedBytes value
-      // we need to make sure that it's loadable in an immediate field of
-      // a sub instruction.  Otherwise we will get a temp allocation and the
-      // code size will increase.
-      //
-      // This is done before the callee save instructions to avoid any possibility
-      // of these overflowing.  This uses r12 and that's never saved in a callee
-      // save.
-      OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, GetStackOverflowReservedBytes(kArm));
-      Load32Disp(rs_r12, 0, rs_r12);
-      MarkPossibleStackOverflowException();
-    }
-  }
-  /* Spill core callee saves */
-  if (core_spill_mask_ != 0u) {
-    if ((core_spill_mask_ & ~(0xffu | (1u << rs_rARM_LR.GetRegNum()))) == 0u) {
-      // Spilling only low regs and/or LR, use 16-bit PUSH.
-      constexpr int lr_bit_shift = rs_rARM_LR.GetRegNum() - 8;
-      NewLIR1(kThumbPush,
-              (core_spill_mask_ & ~(1u << rs_rARM_LR.GetRegNum())) |
-              ((core_spill_mask_ & (1u << rs_rARM_LR.GetRegNum())) >> lr_bit_shift));
-    } else if (IsPowerOfTwo(core_spill_mask_)) {
-      // kThumb2Push cannot be used to spill a single register.
-      NewLIR1(kThumb2Push1, CTZ(core_spill_mask_));
-    } else {
-      NewLIR1(kThumb2Push, core_spill_mask_);
-    }
-    cfi_.AdjustCFAOffset(num_core_spills_ * kArmPointerSize);
-    cfi_.RelOffsetForMany(DwarfCoreReg(0), 0, core_spill_mask_, kArmPointerSize);
-  }
-  /* Need to spill any FP regs? */
-  if (num_fp_spills_ != 0u) {
-    /*
-     * NOTE: fp spills are a little different from core spills in that
-     * they are pushed as a contiguous block.  When promoting from
-     * the fp set, we must allocate all singles from s16..highest-promoted
-     */
-    NewLIR1(kThumb2VPushCS, num_fp_spills_);
-    cfi_.AdjustCFAOffset(num_fp_spills_ * kArmPointerSize);
-    cfi_.RelOffsetForMany(DwarfFpReg(0), 0, fp_spill_mask_, kArmPointerSize);
-  }
-
-  const int spill_size = spill_count * 4;
-  const int frame_size_without_spills = frame_size_ - spill_size;
-  if (!skip_overflow_check) {
-    if (generate_explicit_stack_overflow_check) {
-      class StackOverflowSlowPath : public LIRSlowPath {
-       public:
-        StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace)
-            : LIRSlowPath(m2l, branch), restore_lr_(restore_lr),
-              sp_displace_(sp_displace) {
-        }
-        void Compile() OVERRIDE {
-          m2l_->ResetRegPool();
-          m2l_->ResetDefTracking();
-          GenerateTargetLabel(kPseudoThrowTarget);
-          if (restore_lr_) {
-            m2l_->LoadWordDisp(rs_rARM_SP, sp_displace_ - 4, rs_rARM_LR);
-          }
-          m2l_->OpRegImm(kOpAdd, rs_rARM_SP, sp_displace_);
-          m2l_->cfi().AdjustCFAOffset(-sp_displace_);
-          m2l_->ClobberCallerSave();
-          ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow);
-          // Load the entrypoint directly into the pc instead of doing a load + branch. Assumes
-          // codegen and target are in thumb2 mode.
-          // NOTE: native pointer.
-          m2l_->LoadWordDisp(rs_rARM_SELF, func_offset.Int32Value(), rs_rARM_PC);
-          m2l_->cfi().AdjustCFAOffset(sp_displace_);
-        }
-
-       private:
-        const bool restore_lr_;
-        const size_t sp_displace_;
-      };
-      if (large_frame) {
-        // Note: may need a temp reg, and we only have r12 free at this point.
-        OpRegRegImm(kOpSub, rs_rARM_LR, rs_rARM_SP, frame_size_without_spills);
-        Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12);
-        LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_LR, rs_r12, nullptr);
-        // Need to restore LR since we used it as a temp.
-        AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, true, spill_size));
-        OpRegCopy(rs_rARM_SP, rs_rARM_LR);     // Establish stack
-        cfi_.AdjustCFAOffset(frame_size_without_spills);
-      } else {
-        /*
-         * If the frame is small enough we are guaranteed to have enough space that remains to
-         * handle signals on the user stack.  However, we may not have any free temp
-         * registers at this point, so we'll temporarily add LR to the temp pool.
-         */
-        DCHECK(!GetRegInfo(rs_rARM_LR)->IsTemp());
-        MarkTemp(rs_rARM_LR);
-        FreeTemp(rs_rARM_LR);
-        OpRegRegImm(kOpSub, rs_rARM_SP, rs_rARM_SP, frame_size_without_spills);
-        cfi_.AdjustCFAOffset(frame_size_without_spills);
-        Clobber(rs_rARM_LR);
-        UnmarkTemp(rs_rARM_LR);
-        LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_SP, rs_r12, nullptr);
-        AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, false, frame_size_));
-      }
-    } else {
-      // Implicit stack overflow check has already been done.  Just make room on the
-      // stack for the frame now.
-      OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills);
-      cfi_.AdjustCFAOffset(frame_size_without_spills);
-    }
-  } else {
-    OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills);
-    cfi_.AdjustCFAOffset(frame_size_without_spills);
-  }
-
-  FlushIns(ArgLocs, rl_method);
-
-  // We can promote a PC-relative reference to dex cache arrays to a register
-  // if it's used at least twice. Without investigating where we should lazily
-  // load the reference, we conveniently load it after flushing inputs.
-  if (dex_cache_arrays_base_reg_.Valid()) {
-    OpPcRelDexCacheArrayAddr(cu_->dex_file, dex_cache_arrays_min_offset_,
-                             dex_cache_arrays_base_reg_);
-  }
-
-  FreeTemp(rs_r0);
-  FreeTemp(rs_r1);
-  FreeTemp(rs_r2);
-  FreeTemp(rs_r3);
-  FreeTemp(rs_r12);
-}
-
-void ArmMir2Lir::GenExitSequence() {
-  cfi_.RememberState();
-  int spill_count = num_core_spills_ + num_fp_spills_;
-
-  /*
-   * In the exit path, r0/r1 are live - make sure they aren't
-   * allocated by the register utilities as temps.
-   */
-  LockTemp(rs_r0);
-  LockTemp(rs_r1);
-
-  int adjust = frame_size_ - (spill_count * kArmPointerSize);
-  OpRegImm(kOpAdd, rs_rARM_SP, adjust);
-  cfi_.AdjustCFAOffset(-adjust);
-  /* Need to restore any FP callee saves? */
-  if (num_fp_spills_) {
-    NewLIR1(kThumb2VPopCS, num_fp_spills_);
-    cfi_.AdjustCFAOffset(-num_fp_spills_ * kArmPointerSize);
-    cfi_.RestoreMany(DwarfFpReg(0), fp_spill_mask_);
-  }
-  bool unspill_LR_to_PC = (core_spill_mask_ & (1 << rs_rARM_LR.GetRegNum())) != 0;
-  uint32_t core_unspill_mask = core_spill_mask_;
-  if (unspill_LR_to_PC) {
-    core_unspill_mask &= ~(1 << rs_rARM_LR.GetRegNum());
-    core_unspill_mask |= (1 << rs_rARM_PC.GetRegNum());
-  }
-  if (core_unspill_mask != 0u) {
-    if ((core_unspill_mask & ~(0xffu | (1u << rs_rARM_PC.GetRegNum()))) == 0u) {
-      // Unspilling only low regs and/or PC, use 16-bit POP.
-      constexpr int pc_bit_shift = rs_rARM_PC.GetRegNum() - 8;
-      NewLIR1(kThumbPop,
-              (core_unspill_mask & ~(1u << rs_rARM_PC.GetRegNum())) |
-              ((core_unspill_mask & (1u << rs_rARM_PC.GetRegNum())) >> pc_bit_shift));
-    } else if (IsPowerOfTwo(core_unspill_mask)) {
-      // kThumb2Pop cannot be used to unspill a single register.
-      NewLIR1(kThumb2Pop1, CTZ(core_unspill_mask));
-    } else {
-      NewLIR1(kThumb2Pop, core_unspill_mask);
-    }
-    // If we pop to PC, there is no further epilogue code.
-    if (!unspill_LR_to_PC) {
-      cfi_.AdjustCFAOffset(-num_core_spills_ * kArmPointerSize);
-      cfi_.RestoreMany(DwarfCoreReg(0), core_unspill_mask);
-      DCHECK_EQ(cfi_.GetCurrentCFAOffset(), 0);  // empty stack.
-    }
-  }
-  if (!unspill_LR_to_PC) {
-    /* We didn't pop to rARM_PC, so must do a bv rARM_LR */
-    NewLIR1(kThumbBx, rs_rARM_LR.GetReg());
-  }
-  // The CFI should be restored for any code that follows the exit block.
-  cfi_.RestoreState();
-  cfi_.DefCFAOffset(frame_size_);
-}
-
-void ArmMir2Lir::GenSpecialExitSequence() {
-  NewLIR1(kThumbBx, rs_rARM_LR.GetReg());
-}
-
-void ArmMir2Lir::GenSpecialEntryForSuspend() {
-  // Keep 16-byte stack alignment - push r0, i.e. ArtMethod*, r5, r6, lr.
-  DCHECK(!IsTemp(rs_r5));
-  DCHECK(!IsTemp(rs_r6));
-  core_spill_mask_ =
-      (1u << rs_r5.GetRegNum()) | (1u << rs_r6.GetRegNum()) | (1u << rs_rARM_LR.GetRegNum());
-  num_core_spills_ = 3u;
-  fp_spill_mask_ = 0u;
-  num_fp_spills_ = 0u;
-  frame_size_ = 16u;
-  core_vmap_table_.clear();
-  fp_vmap_table_.clear();
-  NewLIR1(kThumbPush, (1u << rs_r0.GetRegNum()) |                 // ArtMethod*
-          (core_spill_mask_ & ~(1u << rs_rARM_LR.GetRegNum())) |  // Spills other than LR.
-          (1u << 8));                                             // LR encoded for 16-bit push.
-  cfi_.AdjustCFAOffset(frame_size_);
-  // Do not generate CFI for scratch register r0.
-  cfi_.RelOffsetForMany(DwarfCoreReg(0), 4, core_spill_mask_, kArmPointerSize);
-}
-
-void ArmMir2Lir::GenSpecialExitForSuspend() {
-  // Pop the frame. (ArtMethod* no longer needed but restore it anyway.)
-  NewLIR1(kThumb2Pop, (1u << rs_r0.GetRegNum()) | core_spill_mask_);  // 32-bit because of LR.
-  cfi_.AdjustCFAOffset(-frame_size_);
-  cfi_.RestoreMany(DwarfCoreReg(0), core_spill_mask_);
-}
-
-static bool ArmUseRelativeCall(CompilationUnit* cu, const MethodReference& target_method) {
-  // Emit relative calls only within a dex file due to the limited range of the BL insn.
-  return cu->dex_file == target_method.dex_file;
-}
-
-/*
- * Bit of a hack here - in the absence of a real scheduling pass,
- * emit the next instruction in static & direct invoke sequences.
- */
-int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
-                                  int state, const MethodReference& target_method,
-                                  uint32_t unused_idx ATTRIBUTE_UNUSED,
-                                  uintptr_t direct_code, uintptr_t direct_method,
-                                  InvokeType type) {
-  ArmMir2Lir* cg = static_cast<ArmMir2Lir*>(cu->cg.get());
-  if (info->string_init_offset != 0) {
-    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
-    switch (state) {
-    case 0: {  // Grab target method* from thread pointer
-      cg->LoadRefDisp(rs_rARM_SELF, info->string_init_offset, arg0_ref, kNotVolatile);
-      break;
-    }
-    case 1:  // Grab the code from the method*
-      if (direct_code == 0) {
-        // kInvokeTgt := arg0_ref->entrypoint
-        cg->LoadWordDisp(arg0_ref,
-                         ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-                             kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
-      }
-      break;
-    default:
-      return -1;
-    }
-  } else if (direct_code != 0 && direct_method != 0) {
-    switch (state) {
-    case 0:  // Get the current Method* [sets kArg0]
-      if (direct_code != static_cast<uintptr_t>(-1)) {
-        cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
-      } else if (ArmUseRelativeCall(cu, target_method)) {
-        // Defer to linker patch.
-      } else {
-        cg->LoadCodeAddress(target_method, type, kInvokeTgt);
-      }
-      if (direct_method != static_cast<uintptr_t>(-1)) {
-        cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
-      } else {
-        cg->LoadMethodAddress(target_method, type, kArg0);
-      }
-      break;
-    default:
-      return -1;
-    }
-  } else {
-    bool use_pc_rel = cg->CanUseOpPcRelDexCacheArrayLoad();
-    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
-    switch (state) {
-    case 0:  // Get the current Method* [sets kArg0]
-      // TUNING: we can save a reg copy if Method* has been promoted.
-      if (!use_pc_rel) {
-        cg->LoadCurrMethodDirect(arg0_ref);
-        break;
-      }
-      ++state;
-      FALLTHROUGH_INTENDED;
-    case 1:  // Get method->dex_cache_resolved_methods_
-      if (!use_pc_rel) {
-        cg->LoadBaseDisp(arg0_ref,
-                         ArtMethod::DexCacheResolvedMethodsOffset(kArmPointerSize).Int32Value(),
-                         arg0_ref,
-                         k32,
-                         kNotVolatile);
-      }
-      // Set up direct code if known.
-      if (direct_code != 0) {
-        if (direct_code != static_cast<uintptr_t>(-1)) {
-          cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
-        } else if (ArmUseRelativeCall(cu, target_method)) {
-          // Defer to linker patch.
-        } else {
-          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
-          cg->LoadCodeAddress(target_method, type, kInvokeTgt);
-        }
-      }
-      if (!use_pc_rel || direct_code != 0) {
-        break;
-      }
-      ++state;
-      FALLTHROUGH_INTENDED;
-    case 2:  // Grab target method*
-      CHECK_EQ(cu->dex_file, target_method.dex_file);
-      if (!use_pc_rel) {
-        cg->LoadRefDisp(arg0_ref,
-                        cg->GetCachePointerOffset(target_method.dex_method_index,
-                                                  kArmPointerSize),
-                        arg0_ref,
-                        kNotVolatile);
-      } else {
-        size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index);
-        cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref, false);
-      }
-      break;
-    case 3:  // Grab the code from the method*
-      if (direct_code == 0) {
-        // kInvokeTgt := arg0_ref->entrypoint
-        cg->LoadWordDisp(arg0_ref,
-                         ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-                             kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
-      }
-      break;
-    default:
-      return -1;
-    }
-  }
-  return state + 1;
-}
-
-NextCallInsn ArmMir2Lir::GetNextSDCallInsn() {
-  return ArmNextSDCallInsn;
-}
-
-LIR* ArmMir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
-  // For ARM, just generate a relative BL instruction that will be filled in at 'link time'.
-  // If the target turns out to be too far, the linker will generate a thunk for dispatch.
-  int target_method_idx = target_method.dex_method_index;
-  const DexFile* target_dex_file = target_method.dex_file;
-
-  // Generate the call instruction and save index, dex_file, and type.
-  // NOTE: Method deduplication takes linker patches into account, so we can just pass 0
-  // as a placeholder for the offset.
-  LIR* call = RawLIR(current_dalvik_offset_, kThumb2Bl, 0,
-                     target_method_idx, WrapPointer(target_dex_file), type);
-  AppendLIR(call);
-  call_method_insns_.push_back(call);
-  return call;
-}
-
-LIR* ArmMir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
-  LIR* call_insn;
-  if (method_info.FastPath() && ArmUseRelativeCall(cu_, method_info.GetTargetMethod()) &&
-      (method_info.GetSharpType() == kDirect || method_info.GetSharpType() == kStatic) &&
-      method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
-    call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType());
-  } else {
-    call_insn = OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
-  }
-  return call_insn;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
deleted file mode 100644
index b94e707..0000000
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_ARM_CODEGEN_ARM_H_
-#define ART_COMPILER_DEX_QUICK_ARM_CODEGEN_ARM_H_
-
-#include "arm_lir.h"
-#include "base/arena_containers.h"
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir.h"
-
-namespace art {
-
-struct CompilationUnit;
-
-class ArmMir2Lir FINAL : public Mir2Lir {
- protected:
-  // Inherited class for ARM backend.
-  class InToRegStorageArmMapper FINAL : public InToRegStorageMapper {
-   public:
-    InToRegStorageArmMapper()
-        : cur_core_reg_(0), cur_fp_reg_(0), cur_fp_double_reg_(0) {
-    }
-
-    RegStorage GetNextReg(ShortyArg arg) OVERRIDE;
-
-    virtual void Reset() OVERRIDE {
-      cur_core_reg_ = 0;
-      cur_fp_reg_ = 0;
-      cur_fp_double_reg_ = 0;
-    }
-
-   private:
-    size_t cur_core_reg_;
-    size_t cur_fp_reg_;
-    size_t cur_fp_double_reg_;
-  };
-
-  InToRegStorageArmMapper in_to_reg_storage_arm_mapper_;
-  InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
-    in_to_reg_storage_arm_mapper_.Reset();
-    return &in_to_reg_storage_arm_mapper_;
-  }
-
-  public:
-    ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
-
-    // Required for target - codegen helpers.
-    bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                            RegLocation rl_dest, int lit);
-    bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-    void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
-                                    int32_t constant) OVERRIDE;
-    void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
-                                     int64_t constant) OVERRIDE;
-    LIR* CheckSuspendUsingLoad() OVERRIDE;
-    RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
-    LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                      OpSize size, VolatileKind is_volatile) OVERRIDE;
-    LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
-                         OpSize size) OVERRIDE;
-    LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
-    LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
-    LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                       OpSize size, VolatileKind is_volatile) OVERRIDE;
-    LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
-                          OpSize size) OVERRIDE;
-
-    /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
-    void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
-
-    bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE;
-    void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
-                                  bool wide) OVERRIDE;
-
-    // Required for target - register utilities.
-    RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
-    RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE {
-      if (wide_kind == kWide) {
-        DCHECK((kArg0 <= reg && reg < kArg3) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
-        RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
-            TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
-        if (ret_reg.IsFloat()) {
-          // Regard double as double, be consistent with register allocation.
-          ret_reg = As64BitFloatReg(ret_reg);
-        }
-        return ret_reg;
-      } else {
-        return TargetReg(reg);
-      }
-    }
-
-    RegLocation GetReturnAlt() OVERRIDE;
-    RegLocation GetReturnWideAlt() OVERRIDE;
-    RegLocation LocCReturn() OVERRIDE;
-    RegLocation LocCReturnRef() OVERRIDE;
-    RegLocation LocCReturnDouble() OVERRIDE;
-    RegLocation LocCReturnFloat() OVERRIDE;
-    RegLocation LocCReturnWide() OVERRIDE;
-    ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
-    void AdjustSpillMask();
-    void ClobberCallerSave();
-    void FreeCallTemps();
-    void LockCallTemps();
-    void MarkPreservedSingle(int v_reg, RegStorage reg);
-    void MarkPreservedDouble(int v_reg, RegStorage reg);
-    void CompilerInitializeRegAlloc();
-
-    // Required for target - miscellaneous.
-    void AssembleLIR();
-    uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
-    int AssignInsnOffsets();
-    void AssignOffsets();
-    static uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
-    void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
-    void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                  ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
-    const char* GetTargetInstFmt(int opcode);
-    const char* GetTargetInstName(int opcode);
-    std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-    ResourceMask GetPCUseDefEncoding() const OVERRIDE;
-    uint64_t GetTargetInstFlags(int opcode);
-    size_t GetInsnSize(LIR* lir) OVERRIDE;
-    bool IsUnconditionalBranch(LIR* lir);
-
-    // Get the register class for load/store of a field.
-    RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
-
-    // Required for target - Dalvik-level generators.
-    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                        RegLocation rl_src2, int flags) OVERRIDE;
-    void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_src2, int flags);
-    void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
-                     RegLocation rl_index, RegLocation rl_dest, int scale);
-    void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
-                     RegLocation rl_src, int scale, bool card_mark);
-    void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_shift, int flags);
-    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                          RegLocation rl_src2);
-    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_src2);
-    void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-    void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
-    bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
-    bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
-    bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
-    bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
-    bool GenInlinedSqrt(CallInfo* info);
-    bool GenInlinedPeek(CallInfo* info, OpSize size);
-    bool GenInlinedPoke(CallInfo* info, OpSize size);
-    bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
-    RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
-    RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
-    void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenDivZeroCheckWide(RegStorage reg);
-    void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
-    void GenExitSequence();
-    void GenSpecialExitSequence() OVERRIDE;
-    void GenSpecialEntryForSuspend() OVERRIDE;
-    void GenSpecialExitForSuspend() OVERRIDE;
-    void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
-    void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
-    void GenSelect(BasicBlock* bb, MIR* mir);
-    void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                          int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                          RegisterClass dest_reg_class) OVERRIDE;
-    bool GenMemBarrier(MemBarrierKind barrier_kind);
-    void GenMonitorEnter(int opt_flags, RegLocation rl_src);
-    void GenMonitorExit(int opt_flags, RegLocation rl_src);
-    void GenMoveException(RegLocation rl_dest);
-    void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                       int first_bit, int second_bit);
-    void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
-    void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
-    void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-    void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-    void GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                        RegLocation rl_src3, bool is_sub);
-
-    // Required for target - single operation generators.
-    LIR* OpUnconditionalBranch(LIR* target);
-    LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
-    LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
-    LIR* OpCondBranch(ConditionCode cc, LIR* target);
-    LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
-    LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
-    LIR* OpIT(ConditionCode cond, const char* guide);
-    void UpdateIT(LIR* it, const char* new_guide);
-    void OpEndIT(LIR* it);
-    LIR* OpMem(OpKind op, RegStorage r_base, int disp);
-    void OpPcRelLoad(RegStorage reg, LIR* target);
-    LIR* OpReg(OpKind op, RegStorage r_dest_src);
-    void OpRegCopy(RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
-    LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
-    LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
-    LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
-    LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
-    LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
-    LIR* OpTestSuspend(LIR* target);
-    LIR* OpVldm(RegStorage r_base, int count);
-    LIR* OpVstm(RegStorage r_base, int count);
-    void OpRegCopyWide(RegStorage dest, RegStorage src);
-
-    LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
-    LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
-    LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
-                          int shift);
-    LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
-    static const ArmEncodingMap EncodingMap[kArmLast];
-    int EncodeShift(int code, int amount);
-    int ModifiedImmediate(uint32_t value);
-    ArmConditionCode ArmConditionEncoding(ConditionCode code);
-    bool InexpensiveConstantInt(int32_t value) OVERRIDE;
-    bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) OVERRIDE;
-    bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
-    bool InexpensiveConstantLong(int64_t value) OVERRIDE;
-    bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
-    RegStorage AllocPreservedDouble(int s_reg);
-    RegStorage AllocPreservedSingle(int s_reg);
-
-    bool WideGPRsAreAliases() const OVERRIDE {
-      return false;  // Wide GPRs are formed by pairing.
-    }
-    bool WideFPRsAreAliases() const OVERRIDE {
-      return false;  // Wide FPRs are formed by pairing.
-    }
-
-    NextCallInsn GetNextSDCallInsn() OVERRIDE;
-
-    /*
-     * @brief Generate a relative call to the method that will be patched at link time.
-     * @param target_method The MethodReference of the method to be invoked.
-     * @param type How the method will be invoked.
-     * @returns Call instruction
-     */
-    LIR* CallWithLinkerFixup(const MethodReference& target_method, InvokeType type);
-
-    /*
-     * @brief Generate the actual call insn based on the method info.
-     * @param method_info the lowering info for the method call.
-     * @returns Call instruction
-     */
-    LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
-
-    void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) OVERRIDE;
-    void DoPromotion() OVERRIDE;
-
-    /*
-     * @brief Handle ARM specific literals.
-     */
-    void InstallLiteralPools() OVERRIDE;
-
-    LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
-    size_t GetInstructionOffset(LIR* lir);
-
-    void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) OVERRIDE;
-
-    bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
-                          RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-
-  private:
-    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
-                                  ConditionCode ccode);
-    LIR* LoadFPConstantValue(int r_dest, int value);
-    LIR* LoadStoreUsingInsnWithOffsetImm8Shl2(ArmOpcode opcode, RegStorage r_base,
-                                              int displacement, RegStorage r_src_dest,
-                                              RegStorage r_work = RegStorage::InvalidReg());
-    void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
-    void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
-    void AssignDataOffsets();
-    RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                          bool is_div, int flags) OVERRIDE;
-    RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) OVERRIDE;
-    struct EasyMultiplyOp {
-      OpKind op;
-      uint32_t shift;
-    };
-    bool GetEasyMultiplyOp(int lit, EasyMultiplyOp* op);
-    bool GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops);
-    void GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops);
-
-    static constexpr ResourceMask GetRegMaskArm(RegStorage reg);
-    static constexpr ResourceMask EncodeArmRegList(int reg_list);
-    static constexpr ResourceMask EncodeArmRegFpcsList(int reg_list);
-
-    ArenaVector<LIR*> call_method_insns_;
-
-    // Instructions needing patching with PC relative code addresses.
-    ArenaVector<LIR*> dex_cache_access_insns_;
-
-    // Register with a reference to the dex cache arrays at dex_cache_arrays_min_offset_,
-    // if promoted.
-    RegStorage dex_cache_arrays_base_reg_;
-
-    /**
-     * @brief Given float register pair, returns Solo64 float register.
-     * @param reg #RegStorage containing a float register pair (e.g. @c s2 and @c s3).
-     * @return A Solo64 float mapping to the register pair (e.g. @c d1).
-     */
-    static RegStorage As64BitFloatReg(RegStorage reg) {
-      DCHECK(reg.IsFloat());
-
-      RegStorage low = reg.GetLow();
-      RegStorage high = reg.GetHigh();
-      DCHECK((low.GetRegNum() % 2 == 0) && (low.GetRegNum() + 1 == high.GetRegNum()));
-
-      return RegStorage::FloatSolo64(low.GetRegNum() / 2);
-    }
-
-    /**
-     * @brief Given Solo64 float register, returns float register pair.
-     * @param reg #RegStorage containing a Solo64 float register (e.g. @c d1).
-     * @return A float register pair mapping to the Solo64 float pair (e.g. @c s2 and s3).
-     */
-    static RegStorage As64BitFloatRegPair(RegStorage reg) {
-      DCHECK(reg.IsDouble() && reg.Is64BitSolo());
-
-      int reg_num = reg.GetRegNum();
-      return RegStorage::MakeRegPair(RegStorage::FloatSolo32(reg_num * 2),
-                                     RegStorage::FloatSolo32(reg_num * 2 + 1));
-    }
-
-    int GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) OVERRIDE;
-
-    static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
-                                 int state, const MethodReference& target_method,
-                                 uint32_t unused_idx ATTRIBUTE_UNUSED,
-                                 uintptr_t direct_code, uintptr_t direct_method,
-                                 InvokeType type);
-
-    void OpPcRelDexCacheArrayAddr(const DexFile* dex_file, int offset, RegStorage r_dest);
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_ARM_CODEGEN_ARM_H_
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
deleted file mode 100644
index 1a5c108..0000000
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_arm.h"
-
-#include "arm_lir.h"
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-
-namespace art {
-
-void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
-                                 RegLocation rl_src1, RegLocation rl_src2) {
-  int op = kThumbBkpt;
-  RegLocation rl_result;
-
-  /*
-   * Don't attempt to optimize register usage since these opcodes call out to
-   * the handlers.
-   */
-  switch (opcode) {
-    case Instruction::ADD_FLOAT_2ADDR:
-    case Instruction::ADD_FLOAT:
-      op = kThumb2Vadds;
-      break;
-    case Instruction::SUB_FLOAT_2ADDR:
-    case Instruction::SUB_FLOAT:
-      op = kThumb2Vsubs;
-      break;
-    case Instruction::DIV_FLOAT_2ADDR:
-    case Instruction::DIV_FLOAT:
-      op = kThumb2Vdivs;
-      break;
-    case Instruction::MUL_FLOAT_2ADDR:
-    case Instruction::MUL_FLOAT:
-      op = kThumb2Vmuls;
-      break;
-    case Instruction::REM_FLOAT_2ADDR:
-    case Instruction::REM_FLOAT:
-      FlushAllRegs();   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
-      rl_result = GetReturn(kFPReg);
-      StoreValue(rl_dest, rl_result);
-      return;
-    case Instruction::NEG_FLOAT:
-      GenNegFloat(rl_dest, rl_src1);
-      return;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-  rl_src1 = LoadValue(rl_src1, kFPReg);
-  rl_src2 = LoadValue(rl_src2, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  StoreValue(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode,
-                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
-  int op = kThumbBkpt;
-  RegLocation rl_result;
-
-  switch (opcode) {
-    case Instruction::ADD_DOUBLE_2ADDR:
-    case Instruction::ADD_DOUBLE:
-      op = kThumb2Vaddd;
-      break;
-    case Instruction::SUB_DOUBLE_2ADDR:
-    case Instruction::SUB_DOUBLE:
-      op = kThumb2Vsubd;
-      break;
-    case Instruction::DIV_DOUBLE_2ADDR:
-    case Instruction::DIV_DOUBLE:
-      op = kThumb2Vdivd;
-      break;
-    case Instruction::MUL_DOUBLE_2ADDR:
-    case Instruction::MUL_DOUBLE:
-      op = kThumb2Vmuld;
-      break;
-    case Instruction::REM_DOUBLE_2ADDR:
-    case Instruction::REM_DOUBLE:
-      FlushAllRegs();   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
-      rl_result = GetReturnWide(kFPReg);
-      StoreValueWide(rl_dest, rl_result);
-      return;
-    case Instruction::NEG_DOUBLE:
-      GenNegDouble(rl_dest, rl_src1);
-      return;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-
-  rl_src1 = LoadValueWide(rl_src1, kFPReg);
-  DCHECK(rl_src1.wide);
-  rl_src2 = LoadValueWide(rl_src2, kFPReg);
-  DCHECK(rl_src2.wide);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  DCHECK(rl_dest.wide);
-  DCHECK(rl_result.wide);
-  NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
-                                            int32_t constant) {
-  RegLocation rl_result;
-  RegStorage r_tmp = AllocTempSingle();
-  LoadConstantNoClobber(r_tmp, constant);
-  rl_src1 = LoadValue(rl_src1, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR3(kThumb2Vmuls, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
-  StoreValue(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
-                                             int64_t constant) {
-  RegLocation rl_result;
-  RegStorage r_tmp = AllocTempDouble();
-  DCHECK(r_tmp.IsDouble());
-  LoadConstantWide(r_tmp, constant);
-  rl_src1 = LoadValueWide(rl_src1, kFPReg);
-  DCHECK(rl_src1.wide);
-  rl_result = EvalLocWide(rl_dest, kFPReg, true);
-  DCHECK(rl_dest.wide);
-  DCHECK(rl_result.wide);
-  NewLIR3(kThumb2Vmuld, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) {
-  int op = kThumbBkpt;
-  int src_reg;
-  RegLocation rl_result;
-
-  switch (opcode) {
-    case Instruction::INT_TO_FLOAT:
-      op = kThumb2VcvtIF;
-      break;
-    case Instruction::FLOAT_TO_INT:
-      op = kThumb2VcvtFI;
-      break;
-    case Instruction::DOUBLE_TO_FLOAT:
-      op = kThumb2VcvtDF;
-      break;
-    case Instruction::FLOAT_TO_DOUBLE:
-      op = kThumb2VcvtFd;
-      break;
-    case Instruction::INT_TO_DOUBLE:
-      op = kThumb2VcvtF64S32;
-      break;
-    case Instruction::DOUBLE_TO_INT:
-      op = kThumb2VcvtDI;
-      break;
-    case Instruction::LONG_TO_DOUBLE: {
-      rl_src = LoadValueWide(rl_src, kFPReg);
-      RegisterInfo* info = GetRegInfo(rl_src.reg);
-      RegStorage src_low = info->FindMatchingView(RegisterInfo::kLowSingleStorageMask)->GetReg();
-      DCHECK(src_low.Valid());
-      RegStorage src_high = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask)->GetReg();
-      DCHECK(src_high.Valid());
-      rl_result = EvalLoc(rl_dest, kFPReg, true);
-      RegStorage tmp1 = AllocTempDouble();
-      RegStorage tmp2 = AllocTempDouble();
-
-      NewLIR2(kThumb2VcvtF64S32, tmp1.GetReg(), src_high.GetReg());
-      NewLIR2(kThumb2VcvtF64U32, rl_result.reg.GetReg(), src_low.GetReg());
-      LoadConstantWide(tmp2, 0x41f0000000000000LL);
-      NewLIR3(kThumb2VmlaF64, rl_result.reg.GetReg(), tmp1.GetReg(), tmp2.GetReg());
-      FreeTemp(tmp1);
-      FreeTemp(tmp2);
-      StoreValueWide(rl_dest, rl_result);
-      return;
-    }
-    case Instruction::FLOAT_TO_LONG:
-      CheckEntrypointTypes<kQuickF2l, int64_t, float>();  // int64_t -> kCoreReg
-      GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
-      return;
-    case Instruction::LONG_TO_FLOAT: {
-      CheckEntrypointTypes<kQuickL2f, float, int64_t>();  // float -> kFPReg
-      GenConversionCall(kQuickL2f, rl_dest, rl_src, kFPReg);
-      return;
-    }
-    case Instruction::DOUBLE_TO_LONG:
-      CheckEntrypointTypes<kQuickD2l, int64_t, double>();  // int64_t -> kCoreReg
-      GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
-      return;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-  if (rl_src.wide) {
-    rl_src = LoadValueWide(rl_src, kFPReg);
-    src_reg = rl_src.reg.GetReg();
-  } else {
-    rl_src = LoadValue(rl_src, kFPReg);
-    src_reg = rl_src.reg.GetReg();
-  }
-  if (rl_dest.wide) {
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(op, rl_result.reg.GetReg(), src_reg);
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(op, rl_result.reg.GetReg(), src_reg);
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
-                                     bool is_double) {
-  LIR* target = &block_label_list_[bb->taken];
-  RegLocation rl_src1;
-  RegLocation rl_src2;
-  if (is_double) {
-    rl_src1 = mir_graph_->GetSrcWide(mir, 0);
-    rl_src2 = mir_graph_->GetSrcWide(mir, 2);
-    rl_src1 = LoadValueWide(rl_src1, kFPReg);
-    rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    NewLIR2(kThumb2Vcmpd, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  } else {
-    rl_src1 = mir_graph_->GetSrc(mir, 0);
-    rl_src2 = mir_graph_->GetSrc(mir, 1);
-    rl_src1 = LoadValue(rl_src1, kFPReg);
-    rl_src2 = LoadValue(rl_src2, kFPReg);
-    NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  }
-  NewLIR0(kThumb2Fmstat);
-  ConditionCode ccode = mir->meta.ccode;
-  switch (ccode) {
-    case kCondEq:
-    case kCondNe:
-      break;
-    case kCondLt:
-      if (gt_bias) {
-        ccode = kCondMi;
-      }
-      break;
-    case kCondLe:
-      if (gt_bias) {
-        ccode = kCondLs;
-      }
-      break;
-    case kCondGt:
-      if (gt_bias) {
-        ccode = kCondHi;
-      }
-      break;
-    case kCondGe:
-      if (gt_bias) {
-        ccode = kCondUge;
-      }
-      break;
-    default:
-      LOG(FATAL) << "Unexpected ccode: " << ccode;
-  }
-  OpCondBranch(ccode, target);
-}
-
-
-void ArmMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
-                          RegLocation rl_src1, RegLocation rl_src2) {
-  bool is_double = false;
-  int default_result = -1;
-  RegLocation rl_result;
-
-  switch (opcode) {
-    case Instruction::CMPL_FLOAT:
-      is_double = false;
-      default_result = -1;
-      break;
-    case Instruction::CMPG_FLOAT:
-      is_double = false;
-      default_result = 1;
-      break;
-    case Instruction::CMPL_DOUBLE:
-      is_double = true;
-      default_result = -1;
-      break;
-    case Instruction::CMPG_DOUBLE:
-      is_double = true;
-      default_result = 1;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-  if (is_double) {
-    rl_src1 = LoadValueWide(rl_src1, kFPReg);
-    rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    // In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
-    ClobberSReg(rl_dest.s_reg_low);
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    LoadConstant(rl_result.reg, default_result);
-    NewLIR2(kThumb2Vcmpd, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  } else {
-    rl_src1 = LoadValue(rl_src1, kFPReg);
-    rl_src2 = LoadValue(rl_src2, kFPReg);
-    // In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
-    ClobberSReg(rl_dest.s_reg_low);
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    LoadConstant(rl_result.reg, default_result);
-    NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  }
-  DCHECK(!rl_result.reg.IsFloat());
-  NewLIR0(kThumb2Fmstat);
-
-  LIR* it = OpIT((default_result == -1) ? kCondGt : kCondMi, "");
-  NewLIR2(kThumb2MovI8M, rl_result.reg.GetReg(),
-          ModifiedImmediate(-default_result));  // Must not alter ccodes
-  OpEndIT(it);
-
-  it = OpIT(kCondEq, "");
-  LoadConstant(rl_result.reg, 0);
-  OpEndIT(it);
-
-  StoreValue(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-  rl_src = LoadValue(rl_src, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(kThumb2Vnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  StoreValue(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-  rl_src = LoadValueWide(rl_src, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(kThumb2Vnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-static RegisterClass RegClassForAbsFP(RegLocation rl_src, RegLocation rl_dest) {
-  // If src is in a core reg or, unlikely, dest has been promoted to a core reg, use core reg.
-  if ((rl_src.location == kLocPhysReg && !rl_src.reg.IsFloat()) ||
-      (rl_dest.location == kLocPhysReg && !rl_dest.reg.IsFloat())) {
-    return kCoreReg;
-  }
-  // If src is in an fp reg or dest has been promoted to an fp reg, use fp reg.
-  if (rl_src.location == kLocPhysReg || rl_dest.location == kLocPhysReg) {
-    return kFPReg;
-  }
-  // With both src and dest in the stack frame we have to perform load+abs+store. Whether this
-  // is faster using a core reg or fp reg depends on the particular CPU. Without further
-  // investigation and testing we prefer core register. (If the result is subsequently used in
-  // another fp operation, the dalvik reg will probably get promoted and that should be handled
-  // by the cases above.)
-  return kCoreReg;
-}
-
-bool ArmMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
-  if (info->result.location == kLocInvalid) {
-    return true;  // Result is unused: inlining successful, no code generated.
-  }
-  RegLocation rl_dest = info->result;
-  RegLocation rl_src = UpdateLoc(info->args[0]);
-  RegisterClass reg_class = RegClassForAbsFP(rl_src, rl_dest);
-  rl_src = LoadValue(rl_src, reg_class);
-  RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
-  if (reg_class == kFPReg) {
-    NewLIR2(kThumb2Vabss, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  } else {
-    OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
-  }
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool ArmMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
-  if (info->result.location == kLocInvalid) {
-    return true;  // Result is unused: inlining successful, no code generated.
-  }
-  RegLocation rl_dest = info->result;
-  RegLocation rl_src = UpdateLocWide(info->args[0]);
-  RegisterClass reg_class = RegClassForAbsFP(rl_src, rl_dest);
-  rl_src = LoadValueWide(rl_src, reg_class);
-  RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
-  if (reg_class == kFPReg) {
-    NewLIR2(kThumb2Vabsd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  } else if (rl_result.reg.GetLow().GetReg() != rl_src.reg.GetHigh().GetReg()) {
-    // No inconvenient overlap.
-    OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow());
-    OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x7fffffff);
-  } else {
-    // Inconvenient overlap, use a temp register to preserve the high word of the source.
-    RegStorage rs_tmp = AllocTemp();
-    OpRegCopy(rs_tmp, rl_src.reg.GetHigh());
-    OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow());
-    OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rs_tmp, 0x7fffffff);
-    FreeTemp(rs_tmp);
-  }
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) {
-  DCHECK_EQ(cu_->instruction_set, kThumb2);
-  RegLocation rl_src = info->args[0];
-  RegLocation rl_dest = InlineTargetWide(info);  // double place for result
-  rl_src = LoadValueWide(rl_src, kFPReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(kThumb2Vsqrtd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
deleted file mode 100644
index b2bd6fa..0000000
--- a/compiler/dex/quick/arm/int_arm.cc
+++ /dev/null
@@ -1,1736 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the Thumb2 ISA. */
-
-#include "codegen_arm.h"
-
-#include "arch/instruction_set_features.h"
-#include "arm_lir.h"
-#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "driver/compiler_driver.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "mirror/array-inl.h"
-
-namespace art {
-
-LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
-  OpRegReg(kOpCmp, src1, src2);
-  return OpCondBranch(cond, target);
-}
-
-/*
- * Generate a Thumb2 IT instruction, which can nullify up to
- * four subsequent instructions based on a condition and its
- * inverse.  The condition applies to the first instruction, which
- * is executed if the condition is met.  The string "guide" consists
- * of 0 to 3 chars, and applies to the 2nd through 4th instruction.
- * A "T" means the instruction is executed if the condition is
- * met, and an "E" means the instruction is executed if the condition
- * is not met.
- */
-LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
-  int mask;
-  int mask3 = 0;
-  int mask2 = 0;
-  int mask1 = 0;
-  ArmConditionCode code = ArmConditionEncoding(ccode);
-  int cond_bit = code & 1;
-  int alt_bit = cond_bit ^ 1;
-
-  switch (strlen(guide)) {
-    case 3:
-      mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
-      FALLTHROUGH_INTENDED;
-    case 2:
-      mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
-      FALLTHROUGH_INTENDED;
-    case 1:
-      mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
-      break;
-    case 0:
-      break;
-    default:
-      LOG(FATAL) << "OAT: bad case in OpIT";
-      UNREACHABLE();
-  }
-  mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
-       (1 << (3 - strlen(guide)));
-  return NewLIR2(kThumb2It, code, mask);
-}
-
-void ArmMir2Lir::UpdateIT(LIR* it, const char* new_guide) {
-  int mask;
-  int mask3 = 0;
-  int mask2 = 0;
-  int mask1 = 0;
-  ArmConditionCode code = static_cast<ArmConditionCode>(it->operands[0]);
-  int cond_bit = code & 1;
-  int alt_bit = cond_bit ^ 1;
-
-  switch (strlen(new_guide)) {
-    case 3:
-      mask1 = (new_guide[2] == 'T') ? cond_bit : alt_bit;
-      FALLTHROUGH_INTENDED;
-    case 2:
-      mask2 = (new_guide[1] == 'T') ? cond_bit : alt_bit;
-      FALLTHROUGH_INTENDED;
-    case 1:
-      mask3 = (new_guide[0] == 'T') ? cond_bit : alt_bit;
-      break;
-    case 0:
-      break;
-    default:
-      LOG(FATAL) << "OAT: bad case in UpdateIT";
-      UNREACHABLE();
-  }
-  mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
-      (1 << (3 - strlen(new_guide)));
-  it->operands[1] = mask;
-}
-
-void ArmMir2Lir::OpEndIT(LIR* it) {
-  // TODO: use the 'it' pointer to do some checks with the LIR, for example
-  //       we could check that the number of instructions matches the mask
-  //       in the IT instruction.
-  CHECK(it != nullptr);
-  GenBarrier();
-}
-
-/*
- * 64-bit 3way compare function.
- *     mov   rX, #-1
- *     cmp   op1hi, op2hi
- *     blt   done
- *     bgt   flip
- *     sub   rX, op1lo, op2lo (treat as unsigned)
- *     beq   done
- *     ite   hi
- *     mov(hi)   rX, #-1
- *     mov(!hi)  rX, #1
- * flip:
- *     neg   rX
- * done:
- */
-void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
-  LIR* target1;
-  LIR* target2;
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  RegStorage t_reg = AllocTemp();
-  LoadConstant(t_reg, -1);
-  OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
-  LIR* branch1 = OpCondBranch(kCondLt, nullptr);
-  LIR* branch2 = OpCondBranch(kCondGt, nullptr);
-  OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
-  LIR* branch3 = OpCondBranch(kCondEq, nullptr);
-
-  LIR* it = OpIT(kCondHi, "E");
-  NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
-  LoadConstant(t_reg, 1);
-  OpEndIT(it);
-
-  target2 = NewLIR0(kPseudoTargetLabel);
-  OpRegReg(kOpNeg, t_reg, t_reg);
-
-  target1 = NewLIR0(kPseudoTargetLabel);
-
-  RegLocation rl_temp = LocCReturn();  // Just using as template, will change
-  rl_temp.reg.SetReg(t_reg.GetReg());
-  StoreValue(rl_dest, rl_temp);
-  FreeTemp(t_reg);
-
-  branch1->target = target1;
-  branch2->target = target2;
-  branch3->target = branch1->target;
-}
-
-void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
-                                          int64_t val, ConditionCode ccode) {
-  int32_t val_lo = Low32Bits(val);
-  int32_t val_hi = High32Bits(val);
-  DCHECK_GE(ModifiedImmediate(val_lo), 0);
-  DCHECK_GE(ModifiedImmediate(val_hi), 0);
-  LIR* taken = &block_label_list_[bb->taken];
-  LIR* not_taken = &block_label_list_[bb->fall_through];
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  RegStorage low_reg = rl_src1.reg.GetLow();
-  RegStorage high_reg = rl_src1.reg.GetHigh();
-
-  if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
-    RegStorage t_reg = AllocTemp();
-    NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), low_reg.GetReg(), high_reg.GetReg(), 0);
-    FreeTemp(t_reg);
-    OpCondBranch(ccode, taken);
-    return;
-  }
-
-  switch (ccode) {
-    case kCondEq:
-    case kCondNe:
-      OpCmpImmBranch(kCondNe, high_reg, val_hi, (ccode == kCondEq) ? not_taken : taken);
-      break;
-    case kCondLt:
-      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
-      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
-      ccode = kCondUlt;
-      break;
-    case kCondLe:
-      OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
-      OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
-      ccode = kCondLs;
-      break;
-    case kCondGt:
-      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
-      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
-      ccode = kCondHi;
-      break;
-    case kCondGe:
-      OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
-      OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
-      ccode = kCondUge;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected ccode: " << ccode;
-  }
-  OpCmpImmBranch(ccode, low_reg, val_lo, taken);
-}
-
-void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                                  int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                  RegisterClass dest_reg_class ATTRIBUTE_UNUSED) {
-  // TODO: Generalize the IT below to accept more than one-instruction loads.
-  DCHECK(InexpensiveConstantInt(true_val));
-  DCHECK(InexpensiveConstantInt(false_val));
-
-  if ((true_val == 0 && code == kCondEq) ||
-      (false_val == 0 && code == kCondNe)) {
-    OpRegRegReg(kOpSub, rs_dest, left_op, right_op);
-    DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-    LIR* it = OpIT(kCondNe, "");
-    LoadConstant(rs_dest, code == kCondEq ? false_val : true_val);
-    OpEndIT(it);
-    return;
-  }
-
-  OpRegReg(kOpCmp, left_op, right_op);  // Same?
-  LIR* it = OpIT(code, "E");   // if-convert the test
-  LoadConstant(rs_dest, true_val);      // .eq case - load true
-  LoadConstant(rs_dest, false_val);     // .eq case - load true
-  OpEndIT(it);
-}
-
-void ArmMir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
-  RegLocation rl_result;
-  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
-  RegLocation rl_dest = mir_graph_->GetDest(mir);
-  // Avoid using float regs here.
-  RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg;
-  RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
-  rl_src = LoadValue(rl_src, src_reg_class);
-  ConditionCode ccode = mir->meta.ccode;
-  if (mir->ssa_rep->num_uses == 1) {
-    // CONST case
-    int true_val = mir->dalvikInsn.vB;
-    int false_val = mir->dalvikInsn.vC;
-    rl_result = EvalLoc(rl_dest, result_reg_class, true);
-    // Change kCondNe to kCondEq for the special cases below.
-    if (ccode == kCondNe) {
-      ccode = kCondEq;
-      std::swap(true_val, false_val);
-    }
-    bool cheap_false_val = InexpensiveConstantInt(false_val);
-    if (cheap_false_val && ccode == kCondEq && (true_val == 0 || true_val == -1)) {
-      OpRegRegImm(kOpSub, rl_result.reg, rl_src.reg, -true_val);
-      DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-      LIR* it = OpIT(true_val == 0 ? kCondNe : kCondUge, "");
-      LoadConstant(rl_result.reg, false_val);
-      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
-    } else if (cheap_false_val && ccode == kCondEq && true_val == 1) {
-      OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, 1);
-      DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-      LIR* it = OpIT(kCondLs, "");
-      LoadConstant(rl_result.reg, false_val);
-      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
-    } else if (cheap_false_val && InexpensiveConstantInt(true_val)) {
-      OpRegImm(kOpCmp, rl_src.reg, 0);
-      LIR* it = OpIT(ccode, "E");
-      LoadConstant(rl_result.reg, true_val);
-      LoadConstant(rl_result.reg, false_val);
-      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
-    } else {
-      // Unlikely case - could be tuned.
-      RegStorage t_reg1 = AllocTypedTemp(false, result_reg_class);
-      RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
-      LoadConstant(t_reg1, true_val);
-      LoadConstant(t_reg2, false_val);
-      OpRegImm(kOpCmp, rl_src.reg, 0);
-      LIR* it = OpIT(ccode, "E");
-      OpRegCopy(rl_result.reg, t_reg1);
-      OpRegCopy(rl_result.reg, t_reg2);
-      OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
-    }
-  } else {
-    // MOVE case
-    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
-    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
-    rl_true = LoadValue(rl_true, result_reg_class);
-    rl_false = LoadValue(rl_false, result_reg_class);
-    rl_result = EvalLoc(rl_dest, result_reg_class, true);
-    OpRegImm(kOpCmp, rl_src.reg, 0);
-    LIR* it = nullptr;
-    if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {  // Is the "true" case already in place?
-      it = OpIT(NegateComparison(ccode), "");
-      OpRegCopy(rl_result.reg, rl_false.reg);
-    } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {  // False case in place?
-      it = OpIT(ccode, "");
-      OpRegCopy(rl_result.reg, rl_true.reg);
-    } else {  // Normal - select between the two.
-      it = OpIT(ccode, "E");
-      OpRegCopy(rl_result.reg, rl_true.reg);
-      OpRegCopy(rl_result.reg, rl_false.reg);
-    }
-    OpEndIT(it);  // Add a scheduling barrier to keep the IT shadow intact
-  }
-  StoreValue(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
-  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
-  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
-  // Normalize such that if either operand is constant, src2 will be constant.
-  ConditionCode ccode = mir->meta.ccode;
-  if (rl_src1.is_const) {
-    std::swap(rl_src1, rl_src2);
-    ccode = FlipComparisonOrder(ccode);
-  }
-  if (rl_src2.is_const) {
-    rl_src2 = UpdateLocWide(rl_src2);
-    // Do special compare/branch against simple const operand if not already in registers.
-    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-    if ((rl_src2.location != kLocPhysReg) &&
-        ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
-      GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
-      return;
-    }
-  }
-  LIR* taken = &block_label_list_[bb->taken];
-  LIR* not_taken = &block_label_list_[bb->fall_through];
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
-  switch (ccode) {
-    case kCondEq:
-      OpCondBranch(kCondNe, not_taken);
-      break;
-    case kCondNe:
-      OpCondBranch(kCondNe, taken);
-      break;
-    case kCondLt:
-      OpCondBranch(kCondLt, taken);
-      OpCondBranch(kCondGt, not_taken);
-      ccode = kCondUlt;
-      break;
-    case kCondLe:
-      OpCondBranch(kCondLt, taken);
-      OpCondBranch(kCondGt, not_taken);
-      ccode = kCondLs;
-      break;
-    case kCondGt:
-      OpCondBranch(kCondGt, taken);
-      OpCondBranch(kCondLt, not_taken);
-      ccode = kCondHi;
-      break;
-    case kCondGe:
-      OpCondBranch(kCondGt, taken);
-      OpCondBranch(kCondLt, not_taken);
-      ccode = kCondUge;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected ccode: " << ccode;
-  }
-  OpRegReg(kOpCmp, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
-  OpCondBranch(ccode, taken);
-}
-
-/*
- * Generate a register comparison to an immediate and branch.  Caller
- * is responsible for setting branch target field.
- */
-LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
-  LIR* branch = nullptr;
-  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
-  /*
-   * A common use of OpCmpImmBranch is for null checks, and using the Thumb 16-bit
-   * compare-and-branch if zero is ideal if it will reach.  However, because null checks
-   * branch forward to a slow path, they will frequently not reach - and thus have to
-   * be converted to a long form during assembly (which will trigger another assembly
-   * pass).  Here we estimate the branch distance for checks, and if large directly
-   * generate the long form in an attempt to avoid an extra assembly pass.
-   * TODO: consider interspersing slowpaths in code following unconditional branches.
-   */
-  bool skip = ((target != nullptr) && (target->opcode == kPseudoThrowTarget));
-  skip &= ((mir_graph_->GetNumDalvikInsns() - current_dalvik_offset_) > 64);
-  if (!skip && reg.Low8() && (check_value == 0)) {
-    if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
-      branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
-                       reg.GetReg(), 0);
-    } else if (arm_cond == kArmCondLs) {
-      // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz.
-      // This case happens for a bounds check of array[0].
-      branch = NewLIR2(kThumb2Cbz, reg.GetReg(), 0);
-    }
-  }
-
-  if (branch == nullptr) {
-    OpRegImm(kOpCmp, reg, check_value);
-    branch = NewLIR2(kThumbBCond, 0, arm_cond);
-  }
-
-  branch->target = target;
-  return branch;
-}
-
-LIR* ArmMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
-  LIR* res;
-  int opcode;
-  // If src or dest is a pair, we'll be using low reg.
-  if (r_dest.IsPair()) {
-    r_dest = r_dest.GetLow();
-  }
-  if (r_src.IsPair()) {
-    r_src = r_src.GetLow();
-  }
-  if (r_dest.IsFloat() || r_src.IsFloat())
-    return OpFpRegCopy(r_dest, r_src);
-  if (r_dest.Low8() && r_src.Low8())
-    opcode = kThumbMovRR;
-  else if (!r_dest.Low8() && !r_src.Low8())
-     opcode = kThumbMovRR_H2H;
-  else if (r_dest.Low8())
-     opcode = kThumbMovRR_H2L;
-  else
-     opcode = kThumbMovRR_L2H;
-  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
-  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
-    res->flags.is_nop = true;
-  }
-  return res;
-}
-
-void ArmMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
-  if (r_dest != r_src) {
-    LIR* res = OpRegCopyNoInsert(r_dest, r_src);
-    AppendLIR(res);
-  }
-}
-
-void ArmMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
-  if (r_dest != r_src) {
-    bool dest_fp = r_dest.IsFloat();
-    bool src_fp = r_src.IsFloat();
-    DCHECK(r_dest.Is64Bit());
-    DCHECK(r_src.Is64Bit());
-    // Note: If the register is get by register allocator, it should never be a pair.
-    // But some functions in mir_2_lir assume 64-bit registers are 32-bit register pairs.
-    // TODO: Rework Mir2Lir::LoadArg() and Mir2Lir::LoadArgDirect().
-    if (dest_fp && r_dest.IsPair()) {
-      r_dest = As64BitFloatReg(r_dest);
-    }
-    if (src_fp && r_src.IsPair()) {
-      r_src = As64BitFloatReg(r_src);
-    }
-    if (dest_fp) {
-      if (src_fp) {
-        OpRegCopy(r_dest, r_src);
-      } else {
-        NewLIR3(kThumb2Fmdrr, r_dest.GetReg(), r_src.GetLowReg(), r_src.GetHighReg());
-      }
-    } else {
-      if (src_fp) {
-        NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_src.GetReg());
-      } else {
-        // Handle overlap
-        if (r_src.GetHighReg() != r_dest.GetLowReg()) {
-          OpRegCopy(r_dest.GetLow(), r_src.GetLow());
-          OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
-        } else if (r_src.GetLowReg() != r_dest.GetHighReg()) {
-          OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
-          OpRegCopy(r_dest.GetLow(), r_src.GetLow());
-        } else {
-          RegStorage r_tmp = AllocTemp();
-          OpRegCopy(r_tmp, r_src.GetHigh());
-          OpRegCopy(r_dest.GetLow(), r_src.GetLow());
-          OpRegCopy(r_dest.GetHigh(), r_tmp);
-          FreeTemp(r_tmp);
-        }
-      }
-    }
-  }
-}
-
-// Table of magic divisors
-struct MagicTable {
-  uint32_t magic;
-  uint32_t shift;
-  DividePattern pattern;
-};
-
-static const MagicTable magic_table[] = {
-  {0, 0, DivideNone},        // 0
-  {0, 0, DivideNone},        // 1
-  {0, 0, DivideNone},        // 2
-  {0x55555556, 0, Divide3},  // 3
-  {0, 0, DivideNone},        // 4
-  {0x66666667, 1, Divide5},  // 5
-  {0x2AAAAAAB, 0, Divide3},  // 6
-  {0x92492493, 2, Divide7},  // 7
-  {0, 0, DivideNone},        // 8
-  {0x38E38E39, 1, Divide5},  // 9
-  {0x66666667, 2, Divide5},  // 10
-  {0x2E8BA2E9, 1, Divide5},  // 11
-  {0x2AAAAAAB, 1, Divide5},  // 12
-  {0x4EC4EC4F, 2, Divide5},  // 13
-  {0x92492493, 3, Divide7},  // 14
-  {0x88888889, 3, Divide7},  // 15
-};
-
-// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, bool is_div,
-                                    RegLocation rl_src, RegLocation rl_dest, int lit) {
-  if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
-    return false;
-  }
-  DividePattern pattern = magic_table[lit].pattern;
-  if (pattern == DivideNone) {
-    return false;
-  }
-
-  RegStorage r_magic = AllocTemp();
-  LoadConstant(r_magic, magic_table[lit].magic);
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegStorage r_hi = AllocTemp();
-  RegStorage r_lo = AllocTemp();
-
-  // rl_dest and rl_src might overlap.
-  // Reuse r_hi to save the div result for reminder case.
-  RegStorage r_div_result = is_div ? rl_result.reg : r_hi;
-
-  NewLIR4(kThumb2Smull, r_lo.GetReg(), r_hi.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
-  switch (pattern) {
-    case Divide3:
-      OpRegRegRegShift(kOpSub, r_div_result, r_hi, rl_src.reg, EncodeShift(kArmAsr, 31));
-      break;
-    case Divide5:
-      OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
-      OpRegRegRegShift(kOpRsub, r_div_result, r_lo, r_hi,
-                       EncodeShift(kArmAsr, magic_table[lit].shift));
-      break;
-    case Divide7:
-      OpRegReg(kOpAdd, r_hi, rl_src.reg);
-      OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
-      OpRegRegRegShift(kOpRsub, r_div_result, r_lo, r_hi,
-                       EncodeShift(kArmAsr, magic_table[lit].shift));
-      break;
-    default:
-      LOG(FATAL) << "Unexpected pattern: " << pattern;
-  }
-
-  if (!is_div) {
-    // div_result = src / lit
-    // tmp1 = div_result * lit
-    // dest = src - tmp1
-    RegStorage tmp1 = r_lo;
-    EasyMultiplyOp ops[2];
-
-    bool canEasyMultiply = GetEasyMultiplyTwoOps(lit, ops);
-    DCHECK_NE(canEasyMultiply, false);
-
-    GenEasyMultiplyTwoOps(tmp1, r_div_result, ops);
-    OpRegRegReg(kOpSub, rl_result.reg, rl_src.reg, tmp1);
-  }
-
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-// Try to convert *lit to 1 RegRegRegShift/RegRegShift form.
-bool ArmMir2Lir::GetEasyMultiplyOp(int lit, ArmMir2Lir::EasyMultiplyOp* op) {
-  if (lit == 0) {
-    // Special case for *divide-by-zero*. The ops won't actually be used to generate code, as
-    // GenArithOpIntLit will directly generate exception-throwing code, and multiply-by-zero will
-    // have been optimized away earlier.
-    op->op = kOpInvalid;
-    op->shift = 0;
-    return true;
-  }
-
-  if (IsPowerOfTwo(lit)) {
-    op->op = kOpLsl;
-    op->shift = CTZ(lit);
-    return true;
-  }
-
-  // At this point lit != 1 (which is a power of two).
-  DCHECK_NE(lit, 1);
-  if (IsPowerOfTwo(lit - 1)) {
-    op->op = kOpAdd;
-    op->shift = CTZ(lit - 1);
-    return true;
-  }
-
-  if (lit == -1) {
-    // Can be created as neg.
-    op->op = kOpNeg;
-    op->shift = 0;
-    return true;
-  } else if (IsPowerOfTwo(lit + 1)) {
-    op->op = kOpRsub;
-    op->shift = CTZ(lit + 1);
-    return true;
-  }
-
-  op->op = kOpInvalid;
-  op->shift = 0;
-  return false;
-}
-
-// Try to convert *lit to 1~2 RegRegRegShift/RegRegShift forms.
-bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) {
-  DCHECK_NE(lit, 1);           // A case of "1" should have been folded.
-  DCHECK_NE(lit, -1);          // A case of "-1" should have been folded.
-  if (GetEasyMultiplyOp(lit, &ops[0])) {
-    ops[1].op = kOpInvalid;
-    ops[1].shift = 0;
-    return true;
-  }
-
-  DCHECK_NE(lit, 0);           // Should be handled above.
-  DCHECK(!IsPowerOfTwo(lit));  // Same.
-
-  int lit1 = lit;              // With the DCHECKs, it's clear we don't get "0", "1" or "-1" for
-  uint32_t shift = CTZ(lit1);  // lit1.
-  if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
-    ops[1].op = kOpLsl;
-    ops[1].shift = shift;
-    return true;
-  }
-
-  lit1 = lit - 1;              // With the DCHECKs, it's clear we don't get "0" or "1" for lit1.
-  shift = CTZ(lit1);
-  if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
-    ops[1].op = kOpAdd;
-    ops[1].shift = shift;
-    return true;
-  }
-
-  lit1 = lit + 1;              // With the DCHECKs, it's clear we don't get "0" here.
-  shift = CTZ(lit1);
-  if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
-    ops[1].op = kOpRsub;
-    ops[1].shift = shift;
-    return true;
-  }
-
-  ops[1].op = kOpInvalid;
-  ops[1].shift = 0;
-
-  return false;
-}
-
-// Generate instructions to do multiply.
-// Additional temporary register is required,
-// if it need to generate 2 instructions and src/dest overlap.
-void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops) {
-  // tmp1 = (( src << shift1) + [ src | -src | 0 ] ) | -src
-  // dest = (tmp1 << shift2) + [ src | -src | 0 ]
-
-  RegStorage r_tmp1;
-  if (ops[1].op == kOpInvalid) {
-    r_tmp1 = r_dest;
-  } else if (r_dest.GetReg() != r_src.GetReg()) {
-    r_tmp1 = r_dest;
-  } else {
-    r_tmp1 = AllocTemp();
-  }
-
-  switch (ops[0].op) {
-    case kOpLsl:
-      OpRegRegImm(kOpLsl, r_tmp1, r_src, ops[0].shift);
-      break;
-    case kOpAdd:
-      OpRegRegRegShift(kOpAdd, r_tmp1, r_src, r_src, EncodeShift(kArmLsl, ops[0].shift));
-      break;
-    case kOpRsub:
-      OpRegRegRegShift(kOpRsub, r_tmp1, r_src, r_src, EncodeShift(kArmLsl, ops[0].shift));
-      break;
-    case kOpNeg:
-      OpRegReg(kOpNeg, r_tmp1, r_src);
-      break;
-    default:
-      DCHECK_EQ(ops[0].op, kOpInvalid);
-      break;
-  }
-
-  switch (ops[1].op) {
-    case kOpInvalid:
-      return;
-    case kOpLsl:
-      OpRegRegImm(kOpLsl, r_dest, r_tmp1, ops[1].shift);
-      break;
-    case kOpAdd:
-      OpRegRegRegShift(kOpAdd, r_dest, r_src, r_tmp1, EncodeShift(kArmLsl, ops[1].shift));
-      break;
-    case kOpRsub:
-      OpRegRegRegShift(kOpRsub, r_dest, r_src, r_tmp1, EncodeShift(kArmLsl, ops[1].shift));
-      break;
-    // No negation allowed in second op.
-    default:
-      LOG(FATAL) << "Unexpected opcode passed to GenEasyMultiplyTwoOps";
-      break;
-  }
-}
-
-bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
-  EasyMultiplyOp ops[2];
-
-  if (!GetEasyMultiplyTwoOps(lit, ops)) {
-    return false;
-  }
-
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-  GenEasyMultiplyTwoOps(rl_result.reg, rl_src.reg, ops);
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                  RegLocation rl_src1 ATTRIBUTE_UNUSED,
-                                  RegLocation rl_src2 ATTRIBUTE_UNUSED,
-                                  bool is_div ATTRIBUTE_UNUSED,
-                                  int flags ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
-  UNREACHABLE();
-}
-
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                     RegLocation rl_src1 ATTRIBUTE_UNUSED,
-                                     int lit ATTRIBUTE_UNUSED,
-                                     bool is_div ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
-  UNREACHABLE();
-}
-
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-  // Put the literal in a temp.
-  RegStorage lit_temp = AllocTemp();
-  LoadConstant(lit_temp, lit);
-  // Use the generic case for div/rem with arg2 in a register.
-  // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
-  rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
-  FreeTemp(lit_temp);
-
-  return rl_result;
-}
-
-RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
-                                  bool is_div) {
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (is_div) {
-    // Simple case, use sdiv instruction.
-    OpRegRegReg(kOpDiv, rl_result.reg, reg1, reg2);
-  } else {
-    // Remainder case, use the following code:
-    // temp = reg1 / reg2      - integer division
-    // temp = temp * reg2
-    // dest = reg1 - temp
-
-    RegStorage temp = AllocTemp();
-    OpRegRegReg(kOpDiv, temp, reg1, reg2);
-    OpRegReg(kOpMul, temp, reg2);
-    OpRegRegReg(kOpSub, rl_result.reg, reg1, temp);
-    FreeTemp(temp);
-  }
-
-  return rl_result;
-}
-
-bool ArmMir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
-  DCHECK_EQ(cu_->instruction_set, kThumb2);
-  if (is_long) {
-    return false;
-  }
-  RegLocation rl_src1 = info->args[0];
-  RegLocation rl_src2 = info->args[1];
-  rl_src1 = LoadValue(rl_src1, kCoreReg);
-  rl_src2 = LoadValue(rl_src2, kCoreReg);
-  RegLocation rl_dest = InlineTarget(info);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
-  LIR* it = OpIT((is_min) ? kCondGt : kCondLt, "E");
-  OpRegReg(kOpMov, rl_result.reg, rl_src2.reg);
-  OpRegReg(kOpMov, rl_result.reg, rl_src1.reg);
-  OpEndIT(it);
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
-  RegLocation rl_src_address = info->args[0];  // long address
-  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
-  RegLocation rl_dest = InlineTarget(info);
-  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (size == k64) {
-    // Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
-    if (rl_address.reg.GetReg() != rl_result.reg.GetLowReg()) {
-      Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow());
-      Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh());
-    } else {
-      Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh());
-      Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow());
-    }
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
-    // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
-    LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
-    StoreValue(rl_dest, rl_result);
-  }
-  return true;
-}
-
-bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
-  RegLocation rl_src_address = info->args[0];  // long address
-  rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[1]
-  RegLocation rl_src_value = info->args[2];  // [size] value
-  RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
-  if (size == k64) {
-    // Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
-    RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
-    StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32, kNotVolatile);
-    StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32, kNotVolatile);
-  } else {
-    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
-    // Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
-    RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
-    StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
-  }
-  return true;
-}
-
-// Generate a CAS with memory_order_seq_cst semantics.
-bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
-  DCHECK_EQ(cu_->instruction_set, kThumb2);
-  // Unused - RegLocation rl_src_unsafe = info->args[0];
-  RegLocation rl_src_obj = info->args[1];  // Object - known non-null
-  RegLocation rl_src_offset = info->args[2];  // long low
-  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
-  RegLocation rl_src_expected = info->args[4];  // int, long or Object
-  // If is_long, high half is in info->args[5]
-  RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
-  // If is_long, high half is in info->args[7]
-  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
-
-  // We have only 5 temporary registers available and actually only 4 if the InlineTarget
-  // above locked one of the temps. For a straightforward CAS64 we need 7 registers:
-  // r_ptr (1), new_value (2), expected(2) and ldrexd result (2). If neither expected nor
-  // new_value is in a non-temp core register we shall reload them in the ldrex/strex loop
-  // into the same temps, reducing the number of required temps down to 5. We shall work
-  // around the potentially locked temp by using LR for r_ptr, unconditionally.
-  // TODO: Pass information about the need for more temps to the stack frame generation
-  // code so that we can rely on being able to allocate enough temps.
-  DCHECK(!GetRegInfo(rs_rARM_LR)->IsTemp());
-  MarkTemp(rs_rARM_LR);
-  FreeTemp(rs_rARM_LR);
-  LockTemp(rs_rARM_LR);
-  bool load_early = true;
-  if (is_long) {
-    RegStorage expected_reg = rl_src_expected.reg.IsPair() ? rl_src_expected.reg.GetLow() :
-        rl_src_expected.reg;
-    RegStorage new_val_reg = rl_src_new_value.reg.IsPair() ? rl_src_new_value.reg.GetLow() :
-        rl_src_new_value.reg;
-    bool expected_is_core_reg = rl_src_expected.location == kLocPhysReg && !expected_reg.IsFloat();
-    bool new_value_is_core_reg = rl_src_new_value.location == kLocPhysReg && !new_val_reg.IsFloat();
-    bool expected_is_good_reg = expected_is_core_reg && !IsTemp(expected_reg);
-    bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(new_val_reg);
-
-    if (!expected_is_good_reg && !new_value_is_good_reg) {
-      // None of expected/new_value is non-temp reg, need to load both late
-      load_early = false;
-      // Make sure they are not in the temp regs and the load will not be skipped.
-      if (expected_is_core_reg) {
-        FlushRegWide(rl_src_expected.reg);
-        ClobberSReg(rl_src_expected.s_reg_low);
-        ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
-        rl_src_expected.location = kLocDalvikFrame;
-      }
-      if (new_value_is_core_reg) {
-        FlushRegWide(rl_src_new_value.reg);
-        ClobberSReg(rl_src_new_value.s_reg_low);
-        ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
-        rl_src_new_value.location = kLocDalvikFrame;
-      }
-    }
-  }
-
-  // Prevent reordering with prior memory operations.
-  GenMemBarrier(kAnyStore);
-
-  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
-  RegLocation rl_new_value;
-  if (!is_long) {
-    rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
-  } else if (load_early) {
-    rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
-  }
-
-  if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
-    // Mark card for object assuming new value is stored.
-    MarkGCCard(0, rl_new_value.reg, rl_object.reg);
-  }
-
-  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
-
-  RegStorage r_ptr = rs_rARM_LR;
-  OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
-
-  // Free now unneeded rl_object and rl_offset to give more temps.
-  ClobberSReg(rl_object.s_reg_low);
-  FreeTemp(rl_object.reg);
-  ClobberSReg(rl_offset.s_reg_low);
-  FreeTemp(rl_offset.reg);
-
-  RegLocation rl_expected;
-  if (!is_long) {
-    rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
-  } else if (load_early) {
-    rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
-  } else {
-    // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
-    RegStorage low_reg = AllocTemp();
-    RegStorage high_reg = AllocTemp();
-    rl_new_value.reg = RegStorage::MakeRegPair(low_reg, high_reg);
-    rl_expected = rl_new_value;
-  }
-
-  // do {
-  //   tmp = [r_ptr] - expected;
-  // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
-  // result = tmp != 0;
-
-  RegStorage r_tmp = AllocTemp();
-  LIR* target = NewLIR0(kPseudoTargetLabel);
-
-  LIR* it = nullptr;
-  if (is_long) {
-    RegStorage r_tmp_high = AllocTemp();
-    if (!load_early) {
-      LoadValueDirectWide(rl_src_expected, rl_expected.reg);
-    }
-    NewLIR3(kThumb2Ldrexd, r_tmp.GetReg(), r_tmp_high.GetReg(), r_ptr.GetReg());
-    OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetLow());
-    OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHigh());
-    if (!load_early) {
-      LoadValueDirectWide(rl_src_new_value, rl_new_value.reg);
-    }
-    // Make sure we use ORR that sets the ccode
-    if (r_tmp.Low8() && r_tmp_high.Low8()) {
-      NewLIR2(kThumbOrr, r_tmp.GetReg(), r_tmp_high.GetReg());
-    } else {
-      NewLIR4(kThumb2OrrRRRs, r_tmp.GetReg(), r_tmp.GetReg(), r_tmp_high.GetReg(), 0);
-    }
-    FreeTemp(r_tmp_high);  // Now unneeded
-
-    DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-    it = OpIT(kCondEq, "T");
-    NewLIR4(kThumb2Strexd /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetLowReg(), rl_new_value.reg.GetHighReg(), r_ptr.GetReg());
-
-  } else {
-    NewLIR3(kThumb2Ldrex, r_tmp.GetReg(), r_ptr.GetReg(), 0);
-    OpRegReg(kOpSub, r_tmp, rl_expected.reg);
-    DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-    it = OpIT(kCondEq, "T");
-    NewLIR4(kThumb2Strex /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
-  }
-
-  // Still one conditional left from OpIT(kCondEq, "T") from either branch
-  OpRegImm(kOpCmp /* eq */, r_tmp, 1);
-  OpEndIT(it);
-
-  OpCondBranch(kCondEq, target);
-
-  if (!load_early) {
-    FreeTemp(rl_expected.reg);  // Now unneeded.
-  }
-
-  // Prevent reordering with subsequent memory operations.
-  GenMemBarrier(kLoadAny);
-
-  // result := (tmp1 != 0) ? 0 : 1;
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
-  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-  it = OpIT(kCondUlt, "");
-  LoadConstant(rl_result.reg, 0); /* cc */
-  FreeTemp(r_tmp);  // Now unneeded.
-  OpEndIT(it);     // Barrier to terminate OpIT.
-
-  StoreValue(rl_dest, rl_result);
-
-  // Now, restore lr to its non-temp status.
-  Clobber(rs_rARM_LR);
-  UnmarkTemp(rs_rARM_LR);
-  return true;
-}
-
-bool ArmMir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
-  constexpr int kLargeArrayThreshold = 256;
-
-  RegLocation rl_src = info->args[0];
-  RegLocation rl_src_pos = info->args[1];
-  RegLocation rl_dst = info->args[2];
-  RegLocation rl_dst_pos = info->args[3];
-  RegLocation rl_length = info->args[4];
-  // Compile time check, handle exception by non-inline method to reduce related meta-data.
-  if ((rl_src_pos.is_const && (mir_graph_->ConstantValue(rl_src_pos) < 0)) ||
-      (rl_dst_pos.is_const && (mir_graph_->ConstantValue(rl_dst_pos) < 0)) ||
-      (rl_length.is_const && (mir_graph_->ConstantValue(rl_length) < 0))) {
-    return false;
-  }
-
-  ClobberCallerSave();
-  LockCallTemps();  // Prepare for explicit register usage.
-  LockTemp(rs_r12);
-  RegStorage rs_src = rs_r0;
-  RegStorage rs_dst = rs_r1;
-  LoadValueDirectFixed(rl_src, rs_src);
-  LoadValueDirectFixed(rl_dst, rs_dst);
-
-  // Handle null pointer exception in slow-path.
-  LIR* src_check_branch = OpCmpImmBranch(kCondEq, rs_src, 0, nullptr);
-  LIR* dst_check_branch = OpCmpImmBranch(kCondEq, rs_dst, 0, nullptr);
-  // Handle potential overlapping in slow-path.
-  LIR* src_dst_same = OpCmpBranch(kCondEq, rs_src, rs_dst, nullptr);
-  // Handle exception or big length in slow-path.
-  RegStorage rs_length = rs_r2;
-  LoadValueDirectFixed(rl_length, rs_length);
-  LIR* len_neg_or_too_big = OpCmpImmBranch(kCondHi, rs_length, kLargeArrayThreshold, nullptr);
-  // Src bounds check.
-  RegStorage rs_pos = rs_r3;
-  RegStorage rs_arr_length = rs_r12;
-  LoadValueDirectFixed(rl_src_pos, rs_pos);
-  LIR* src_pos_negative = OpCmpImmBranch(kCondLt, rs_pos, 0, nullptr);
-  Load32Disp(rs_src, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
-  OpRegReg(kOpSub, rs_arr_length, rs_pos);
-  LIR* src_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
-  // Dst bounds check.
-  LoadValueDirectFixed(rl_dst_pos, rs_pos);
-  LIR* dst_pos_negative = OpCmpImmBranch(kCondLt, rs_pos, 0, nullptr);
-  Load32Disp(rs_dst, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
-  OpRegReg(kOpSub, rs_arr_length, rs_pos);
-  LIR* dst_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
-
-  // Everything is checked now.
-  OpRegImm(kOpAdd, rs_dst, mirror::Array::DataOffset(2).Int32Value());
-  OpRegReg(kOpAdd, rs_dst, rs_pos);
-  OpRegReg(kOpAdd, rs_dst, rs_pos);
-  OpRegImm(kOpAdd, rs_src, mirror::Array::DataOffset(2).Int32Value());
-  LoadValueDirectFixed(rl_src_pos, rs_pos);
-  OpRegReg(kOpAdd, rs_src, rs_pos);
-  OpRegReg(kOpAdd, rs_src, rs_pos);
-
-  RegStorage rs_tmp = rs_pos;
-  OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
-
-  // Copy one element.
-  OpRegRegImm(kOpAnd, rs_tmp, rs_length, 2);
-  LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
-  OpRegImm(kOpSub, rs_length, 2);
-  LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
-  StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
-
-  // Copy two elements.
-  LIR *begin_loop = NewLIR0(kPseudoTargetLabel);
-  LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_length, 0, nullptr);
-  OpRegImm(kOpSub, rs_length, 4);
-  LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
-  StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
-  OpUnconditionalBranch(begin_loop);
-
-  LIR *check_failed = NewLIR0(kPseudoTargetLabel);
-  LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
-  LIR* return_point = NewLIR0(kPseudoTargetLabel);
-
-  src_check_branch->target = check_failed;
-  dst_check_branch->target = check_failed;
-  src_dst_same->target = check_failed;
-  len_neg_or_too_big->target = check_failed;
-  src_pos_negative->target = check_failed;
-  src_bad_len->target = check_failed;
-  dst_pos_negative->target = check_failed;
-  dst_bad_len->target = check_failed;
-  jmp_to_begin_loop->target = begin_loop;
-  jmp_to_ret->target = return_point;
-
-  AddIntrinsicSlowPath(info, launchpad_branch, return_point);
-  ClobberCallerSave();  // We must clobber everything because slow path will return here
-
-  return true;
-}
-
-void ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-  LIR* lir = NewLIR2(kThumb2LdrPcRel12, reg.GetReg(), 0);
-  lir->target = target;
-}
-
-bool ArmMir2Lir::CanUseOpPcRelDexCacheArrayLoad() const {
-  return dex_cache_arrays_layout_.Valid();
-}
-
-void ArmMir2Lir::OpPcRelDexCacheArrayAddr(const DexFile* dex_file, int offset, RegStorage r_dest) {
-  LIR* movw = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), 0);
-  LIR* movt = NewLIR2(kThumb2MovImm16H, r_dest.GetReg(), 0);
-  ArmOpcode add_pc_opcode = (r_dest.GetRegNum() < 8) ? kThumbAddRRLH : kThumbAddRRHH;
-  LIR* add_pc = NewLIR2(add_pc_opcode, r_dest.GetReg(), rs_rARM_PC.GetReg());
-  add_pc->flags.fixup = kFixupLabel;
-  movw->operands[2] = WrapPointer(dex_file);
-  movw->operands[3] = offset;
-  movw->operands[4] = WrapPointer(add_pc);
-  movt->operands[2] = movw->operands[2];
-  movt->operands[3] = movw->operands[3];
-  movt->operands[4] = movw->operands[4];
-  dex_cache_access_insns_.push_back(movw);
-  dex_cache_access_insns_.push_back(movt);
-}
-
-void ArmMir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
-                                          bool wide) {
-  DCHECK(!wide) << "Unsupported";
-  if (dex_cache_arrays_base_reg_.Valid()) {
-    LoadRefDisp(dex_cache_arrays_base_reg_, offset - dex_cache_arrays_min_offset_,
-                r_dest, kNotVolatile);
-  } else {
-    OpPcRelDexCacheArrayAddr(dex_file, offset, r_dest);
-    LoadRefDisp(r_dest, 0, r_dest, kNotVolatile);
-  }
-}
-
-LIR* ArmMir2Lir::OpVldm(RegStorage r_base, int count) {
-  return NewLIR3(kThumb2Vldms, r_base.GetReg(), rs_fr0.GetReg(), count);
-}
-
-LIR* ArmMir2Lir::OpVstm(RegStorage r_base, int count) {
-  return NewLIR3(kThumb2Vstms, r_base.GetReg(), rs_fr0.GetReg(), count);
-}
-
-void ArmMir2Lir::GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                                RegLocation rl_src3, bool is_sub) {
-  rl_src1 = LoadValue(rl_src1, kCoreReg);
-  rl_src2 = LoadValue(rl_src2, kCoreReg);
-  rl_src3 = LoadValue(rl_src3, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  NewLIR4(is_sub ? kThumb2Mls : kThumb2Mla, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
-          rl_src2.reg.GetReg(), rl_src3.reg.GetReg());
-  StoreValue(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
-                                               RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
-                                               int first_bit, int second_bit) {
-  OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
-                   EncodeShift(kArmLsl, second_bit - first_bit));
-  if (first_bit != 0) {
-    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
-  }
-}
-
-void ArmMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
-  DCHECK(reg.IsPair());   // TODO: support k64BitSolo.
-  RegStorage t_reg = AllocTemp();
-  NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), reg.GetLowReg(), reg.GetHighReg(), 0);
-  FreeTemp(t_reg);
-  GenDivZeroCheck(kCondEq);
-}
-
-// Test suspend flag, return target of taken suspend branch
-LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
-#ifdef ARM_R4_SUSPEND_FLAG
-  NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
-  return OpCondBranch((target == nullptr) ? kCondEq : kCondNe, target);
-#else
-  RegStorage t_reg = AllocTemp();
-  LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(),
-    t_reg, kUnsignedHalf, kNotVolatile);
-  LIR* cmp_branch = OpCmpImmBranch((target == nullptr) ? kCondNe : kCondEq, t_reg,
-    0, target);
-  FreeTemp(t_reg);
-  return cmp_branch;
-#endif
-}
-
-// Decrement register and branch on condition
-LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
-  // Combine sub & test using sub setflags encoding here
-  OpRegRegImm(kOpSub, reg, reg, 1);  // For value == 1, this should set flags.
-  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-  return OpCondBranch(c_code, target);
-}
-
-bool ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-  if (!cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
-    return false;
-  }
-  // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
-  LIR* barrier = last_lir_insn_;
-
-  int dmb_flavor;
-  // TODO: revisit Arm barrier kinds
-  switch (barrier_kind) {
-    case kAnyStore: dmb_flavor = kISH; break;
-    case kLoadAny: dmb_flavor = kISH; break;
-    case kStoreStore: dmb_flavor = kISHST; break;
-    case kAnyAny: dmb_flavor = kISH; break;
-    default:
-      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
-      dmb_flavor = kSY;  // quiet gcc.
-      break;
-  }
-
-  bool ret = false;
-
-  // If the same barrier already exists, don't generate another.
-  if (barrier == nullptr
-      || (barrier != nullptr && (barrier->opcode != kThumb2Dmb || barrier->operands[0] != dmb_flavor))) {
-    barrier = NewLIR1(kThumb2Dmb, dmb_flavor);
-    ret = true;
-  }
-
-  // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
-  DCHECK(!barrier->flags.use_def_invalid);
-  barrier->u.m.def_mask = &kEncodeAll;
-  return ret;
-}
-
-void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegStorage z_reg = AllocTemp();
-  LoadConstantNoClobber(z_reg, 0);
-  // Check for destructive overlap
-  if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
-    RegStorage t_reg = AllocTemp();
-    OpRegCopy(t_reg, rl_result.reg.GetLow());
-    OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
-    OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
-    FreeTemp(t_reg);
-  } else {
-    OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
-    OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, rl_src.reg.GetHigh());
-  }
-  FreeTemp(z_reg);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::GenMulLong(Instruction::Code opcode ATTRIBUTE_UNUSED, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  /*
-   * tmp1     = src1.hi * src2.lo;  // src1.hi is no longer needed
-   * dest     = src1.lo * src2.lo;
-   * tmp1    += src1.lo * src2.hi;
-   * dest.hi += tmp1;
-   *
-   * To pull off inline multiply, we have a worst-case requirement of 7 temporary
-   * registers.  Normally for Arm, we get 5.  We can get to 6 by including
-   * lr in the temp set.  The only problematic case is all operands and result are
-   * distinct, and none have been promoted.  In that case, we can succeed by aggressively
-   * freeing operand temp registers after they are no longer needed.  All other cases
-   * can proceed normally.  We'll just punt on the case of the result having a misaligned
-   * overlap with either operand and send that case to a runtime handler.
-   */
-  RegLocation rl_result;
-  if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
-    FlushAllRegs();
-    CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
-    rl_result = GetReturnWide(kCoreReg);
-    StoreValueWide(rl_dest, rl_result);
-    return;
-  }
-
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-
-  int reg_status = 0;
-  RegStorage res_lo;
-  RegStorage res_hi;
-  bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
-      !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
-  bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
-  bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
-  // Check if rl_dest is *not* either operand and we have enough temp registers.
-  if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
-      (dest_promoted || src1_promoted || src2_promoted)) {
-    // In this case, we do not need to manually allocate temp registers for result.
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    res_lo = rl_result.reg.GetLow();
-    res_hi = rl_result.reg.GetHigh();
-  } else {
-    res_lo = AllocTemp();
-    if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
-      // In this case, we have enough temp registers to be allocated for result.
-      res_hi = AllocTemp();
-      reg_status = 1;
-    } else {
-      // In this case, all temps are now allocated.
-      // res_hi will be allocated after we can free src1_hi.
-      reg_status = 2;
-    }
-  }
-
-  // Temporarily add LR to the temp pool, and assign it to tmp1
-  MarkTemp(rs_rARM_LR);
-  FreeTemp(rs_rARM_LR);
-  RegStorage tmp1 = rs_rARM_LR;
-  LockTemp(rs_rARM_LR);
-
-  if (rl_src1.reg == rl_src2.reg) {
-    DCHECK(res_hi.Valid());
-    DCHECK(res_lo.Valid());
-    NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
-    NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
-            rl_src1.reg.GetLowReg());
-    OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
-  } else {
-    NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
-    if (reg_status == 2) {
-      DCHECK(!res_hi.Valid());
-      DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
-      DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
-      // Will force free src1_hi, so must clobber.
-      Clobber(rl_src1.reg);
-      FreeTemp(rl_src1.reg.GetHigh());
-      res_hi = AllocTemp();
-    }
-    DCHECK(res_hi.Valid());
-    DCHECK(res_lo.Valid());
-    NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
-            rl_src1.reg.GetLowReg());
-    NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
-            tmp1.GetReg());
-    NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
-    if (reg_status == 2) {
-      FreeTemp(rl_src1.reg.GetLow());
-    }
-  }
-
-  if (reg_status != 0) {
-    // We had manually allocated registers for rl_result.
-    // Now construct a RegLocation.
-    rl_result = GetReturnWide(kCoreReg);  // Just using as a template.
-    rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
-  }
-
-  // Free tmp1 but keep LR as temp for StoreValueWide() if needed.
-  FreeTemp(tmp1);
-
-  StoreValueWide(rl_dest, rl_result);
-
-  // Now, restore lr to its non-temp status.
-  Clobber(rs_rARM_LR);
-  UnmarkTemp(rs_rARM_LR);
-}
-
-void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                                RegLocation rl_src2, int flags) {
-  switch (opcode) {
-    case Instruction::MUL_LONG:
-    case Instruction::MUL_LONG_2ADDR:
-      GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
-      return;
-    case Instruction::NEG_LONG:
-      GenNegLong(rl_dest, rl_src2);
-      return;
-
-    default:
-      break;
-  }
-
-  // Fallback for all other ops.
-  Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-}
-
-/*
- * Generate array load
- */
-void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
-                             RegLocation rl_index, RegLocation rl_dest, int scale) {
-  RegisterClass reg_class = RegClassBySize(size);
-  int len_offset = mirror::Array::LengthOffset().Int32Value();
-  int data_offset;
-  RegLocation rl_result;
-  bool constant_index = rl_index.is_const;
-  rl_array = LoadValue(rl_array, kRefReg);
-  if (!constant_index) {
-    rl_index = LoadValue(rl_index, kCoreReg);
-  }
-
-  if (rl_dest.wide) {
-    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
-  } else {
-    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
-  }
-
-  // If index is constant, just fold it into the data offset
-  if (constant_index) {
-    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
-  }
-
-  /* null object? */
-  GenNullCheck(rl_array.reg, opt_flags);
-
-  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
-  RegStorage reg_len;
-  if (needs_range_check) {
-    reg_len = AllocTemp();
-    /* Get len */
-    Load32Disp(rl_array.reg, len_offset, reg_len);
-    MarkPossibleNullPointerException(opt_flags);
-  } else {
-    ForceImplicitNullCheck(rl_array.reg, opt_flags);
-  }
-  if (rl_dest.wide || rl_dest.fp || constant_index) {
-    RegStorage reg_ptr;
-    if (constant_index) {
-      reg_ptr = rl_array.reg;  // NOTE: must not alter reg_ptr in constant case.
-    } else {
-      // No special indexed operation, lea + load w/ displacement
-      reg_ptr = AllocTempRef();
-      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
-      FreeTemp(rl_index.reg);
-    }
-    rl_result = EvalLoc(rl_dest, reg_class, true);
-
-    if (needs_range_check) {
-      if (constant_index) {
-        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
-      } else {
-        GenArrayBoundsCheck(rl_index.reg, reg_len);
-      }
-      FreeTemp(reg_len);
-    }
-    LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
-    if (!constant_index) {
-      FreeTemp(reg_ptr);
-    }
-    if (rl_dest.wide) {
-      StoreValueWide(rl_dest, rl_result);
-    } else {
-      StoreValue(rl_dest, rl_result);
-    }
-  } else {
-    // Offset base, then use indexed load
-    RegStorage reg_ptr = AllocTempRef();
-    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
-    FreeTemp(rl_array.reg);
-    rl_result = EvalLoc(rl_dest, reg_class, true);
-
-    if (needs_range_check) {
-      GenArrayBoundsCheck(rl_index.reg, reg_len);
-      FreeTemp(reg_len);
-    }
-    LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
-    FreeTemp(reg_ptr);
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-/*
- * Generate array store
- *
- */
-void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
-                             RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
-  RegisterClass reg_class = RegClassBySize(size);
-  int len_offset = mirror::Array::LengthOffset().Int32Value();
-  bool constant_index = rl_index.is_const;
-
-  int data_offset;
-  if (size == k64 || size == kDouble) {
-    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
-  } else {
-    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
-  }
-
-  // If index is constant, just fold it into the data offset.
-  if (constant_index) {
-    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
-  }
-
-  rl_array = LoadValue(rl_array, kRefReg);
-  if (!constant_index) {
-    rl_index = LoadValue(rl_index, kCoreReg);
-  }
-
-  RegStorage reg_ptr;
-  bool allocated_reg_ptr_temp = false;
-  if (constant_index) {
-    reg_ptr = rl_array.reg;
-  } else if (IsTemp(rl_array.reg) && !card_mark) {
-    Clobber(rl_array.reg);
-    reg_ptr = rl_array.reg;
-  } else {
-    allocated_reg_ptr_temp = true;
-    reg_ptr = AllocTempRef();
-  }
-
-  /* null object? */
-  GenNullCheck(rl_array.reg, opt_flags);
-
-  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
-  RegStorage reg_len;
-  if (needs_range_check) {
-    reg_len = AllocTemp();
-    // NOTE: max live temps(4) here.
-    /* Get len */
-    Load32Disp(rl_array.reg, len_offset, reg_len);
-    MarkPossibleNullPointerException(opt_flags);
-  } else {
-    ForceImplicitNullCheck(rl_array.reg, opt_flags);
-  }
-  /* at this point, reg_ptr points to array, 2 live temps */
-  if (rl_src.wide || rl_src.fp || constant_index) {
-    if (rl_src.wide) {
-      rl_src = LoadValueWide(rl_src, reg_class);
-    } else {
-      rl_src = LoadValue(rl_src, reg_class);
-    }
-    if (!constant_index) {
-      OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
-    }
-    if (needs_range_check) {
-      if (constant_index) {
-        GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
-      } else {
-        GenArrayBoundsCheck(rl_index.reg, reg_len);
-      }
-      FreeTemp(reg_len);
-    }
-
-    StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
-  } else {
-    /* reg_ptr -> array data */
-    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
-    rl_src = LoadValue(rl_src, reg_class);
-    if (needs_range_check) {
-      GenArrayBoundsCheck(rl_index.reg, reg_len);
-      FreeTemp(reg_len);
-    }
-    StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
-  }
-  if (allocated_reg_ptr_temp) {
-    FreeTemp(reg_ptr);
-  }
-  if (card_mark) {
-    MarkGCCard(opt_flags, rl_src.reg, rl_array.reg);
-  }
-}
-
-
-void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
-                                   RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
-                                   int flags ATTRIBUTE_UNUSED) {
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  // Per spec, we only care about low 6 bits of shift amount.
-  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
-  if (shift_amount == 0) {
-    StoreValueWide(rl_dest, rl_src);
-    return;
-  }
-  if (PartiallyIntersects(rl_src, rl_dest)) {
-    GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
-    return;
-  }
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  switch (opcode) {
-    case Instruction::SHL_LONG:
-    case Instruction::SHL_LONG_2ADDR:
-      if (shift_amount == 1) {
-        OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), rl_src.reg.GetLow());
-        OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), rl_src.reg.GetHigh());
-      } else if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg);
-        LoadConstant(rl_result.reg.GetLow(), 0);
-      } else if (shift_amount > 31) {
-        OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetLow(), shift_amount - 32);
-        LoadConstant(rl_result.reg.GetLow(), 0);
-      } else {
-        OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
-        OpRegRegRegShift(kOpOr, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), rl_src.reg.GetLow(),
-                         EncodeShift(kArmLsr, 32 - shift_amount));
-        OpRegRegImm(kOpLsl, rl_result.reg.GetLow(), rl_src.reg.GetLow(), shift_amount);
-      }
-      break;
-    case Instruction::SHR_LONG:
-    case Instruction::SHR_LONG_2ADDR:
-      if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
-        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
-      } else if (shift_amount > 31) {
-        OpRegRegImm(kOpAsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
-        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
-      } else {
-        RegStorage t_reg = AllocTemp();
-        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
-        OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
-                         EncodeShift(kArmLsl, 32 - shift_amount));
-        FreeTemp(t_reg);
-        OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
-      }
-      break;
-    case Instruction::USHR_LONG:
-    case Instruction::USHR_LONG_2ADDR:
-      if (shift_amount == 32) {
-        OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
-        LoadConstant(rl_result.reg.GetHigh(), 0);
-      } else if (shift_amount > 31) {
-        OpRegRegImm(kOpLsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
-        LoadConstant(rl_result.reg.GetHigh(), 0);
-      } else {
-        RegStorage t_reg = AllocTemp();
-        OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
-        OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
-                         EncodeShift(kArmLsl, 32 - shift_amount));
-        FreeTemp(t_reg);
-        OpRegRegImm(kOpLsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
-      }
-      break;
-    default:
-      LOG(FATAL) << "Unexpected case";
-  }
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
-                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                                   int flags) {
-  if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
-    if (!rl_src2.is_const) {
-      // Don't bother with special handling for subtract from immediate.
-      GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-      return;
-    }
-  } else {
-    // Normalize
-    if (!rl_src2.is_const) {
-      DCHECK(rl_src1.is_const);
-      std::swap(rl_src1, rl_src2);
-    }
-  }
-  if (PartiallyIntersects(rl_src1, rl_dest)) {
-    GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-    return;
-  }
-  DCHECK(rl_src2.is_const);
-  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-  uint32_t val_lo = Low32Bits(val);
-  uint32_t val_hi = High32Bits(val);
-  int32_t mod_imm_lo = ModifiedImmediate(val_lo);
-  int32_t mod_imm_hi = ModifiedImmediate(val_hi);
-
-  // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
-  switch (opcode) {
-    case Instruction::ADD_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-    case Instruction::SUB_LONG:
-    case Instruction::SUB_LONG_2ADDR:
-      if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
-        GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-        return;
-      }
-      break;
-    default:
-      break;
-  }
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
-  switch (opcode) {
-    case Instruction::ADD_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-      NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
-      NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
-      break;
-    case Instruction::OR_LONG:
-    case Instruction::OR_LONG_2ADDR:
-      if ((val_lo != 0) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
-        OpRegRegImm(kOpOr, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
-      }
-      if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
-        OpRegRegImm(kOpOr, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
-      }
-      break;
-    case Instruction::XOR_LONG:
-    case Instruction::XOR_LONG_2ADDR:
-      OpRegRegImm(kOpXor, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
-      OpRegRegImm(kOpXor, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
-      break;
-    case Instruction::AND_LONG:
-    case Instruction::AND_LONG_2ADDR:
-      if ((val_lo != 0xffffffff) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
-        OpRegRegImm(kOpAnd, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
-      }
-      if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
-        OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
-      }
-      break;
-    case Instruction::SUB_LONG_2ADDR:
-    case Instruction::SUB_LONG:
-      NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
-      NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected opcode " << opcode;
-  }
-  StoreValueWide(rl_dest, rl_result);
-}
-
-bool ArmMir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
-                                  RegLocation rl_src, RegLocation rl_dest, int lit) {
-  if (lit < 2) {
-    return false;
-  }
-
-  // ARM does either not support a division instruction, or it is potentially expensive. Look for
-  // more special cases.
-  if (!IsPowerOfTwo(lit)) {
-    return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
-  }
-
-  return Mir2Lir::HandleEasyDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
deleted file mode 100644
index 355485e..0000000
--- a/compiler/dex/quick/arm/target_arm.cc
+++ /dev/null
@@ -1,1015 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_arm.h"
-
-#include <inttypes.h>
-
-#include <string>
-#include <sstream>
-
-#include "backend_arm.h"
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-
-namespace art {
-
-#ifdef ARM_R4_SUSPEND_FLAG
-static constexpr RegStorage core_regs_arr[] =
-    {rs_r0, rs_r1, rs_r2, rs_r3, rs_rARM_SUSPEND, rs_r5, rs_r6, rs_r7, rs_r8, rs_rARM_SELF,
-     rs_r10, rs_r11, rs_r12, rs_rARM_SP, rs_rARM_LR, rs_rARM_PC};
-#else
-static constexpr RegStorage core_regs_arr[] =
-    {rs_r0, rs_r1, rs_r2, rs_r3, rs_r4, rs_r5, rs_r6, rs_r7, rs_r8, rs_rARM_SELF,
-     rs_r10, rs_r11, rs_r12, rs_rARM_SP, rs_rARM_LR, rs_rARM_PC};
-#endif
-static constexpr RegStorage sp_regs_arr[] =
-    {rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, rs_fr8, rs_fr9, rs_fr10,
-     rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15, rs_fr16, rs_fr17, rs_fr18, rs_fr19, rs_fr20,
-     rs_fr21, rs_fr22, rs_fr23, rs_fr24, rs_fr25, rs_fr26, rs_fr27, rs_fr28, rs_fr29, rs_fr30,
-     rs_fr31};
-static constexpr RegStorage dp_regs_arr[] =
-    {rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7, rs_dr8, rs_dr9, rs_dr10,
-     rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15};
-#ifdef ARM_R4_SUSPEND_FLAG
-static constexpr RegStorage reserved_regs_arr[] =
-    {rs_rARM_SUSPEND, rs_rARM_SELF, rs_rARM_SP, rs_rARM_LR, rs_rARM_PC};
-static constexpr RegStorage core_temps_arr[] = {rs_r0, rs_r1, rs_r2, rs_r3, rs_r12};
-#else
-static constexpr RegStorage reserved_regs_arr[] =
-    {rs_rARM_SELF, rs_rARM_SP, rs_rARM_LR, rs_rARM_PC};
-static constexpr RegStorage core_temps_arr[] = {rs_r0, rs_r1, rs_r2, rs_r3, rs_r4, rs_r12};
-#endif
-static constexpr RegStorage sp_temps_arr[] =
-    {rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7, rs_fr8, rs_fr9, rs_fr10,
-     rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15};
-static constexpr RegStorage dp_temps_arr[] =
-    {rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7};
-
-static constexpr ArrayRef<const RegStorage> empty_pool;
-static constexpr ArrayRef<const RegStorage> core_regs(core_regs_arr);
-static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
-static constexpr ArrayRef<const RegStorage> reserved_regs(reserved_regs_arr);
-static constexpr ArrayRef<const RegStorage> core_temps(core_temps_arr);
-static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
-
-RegLocation ArmMir2Lir::LocCReturn() {
-  return arm_loc_c_return;
-}
-
-RegLocation ArmMir2Lir::LocCReturnRef() {
-  return arm_loc_c_return;
-}
-
-RegLocation ArmMir2Lir::LocCReturnWide() {
-  return arm_loc_c_return_wide;
-}
-
-RegLocation ArmMir2Lir::LocCReturnFloat() {
-  return arm_loc_c_return_float;
-}
-
-RegLocation ArmMir2Lir::LocCReturnDouble() {
-  return arm_loc_c_return_double;
-}
-
-// Return a target-dependent special register.
-RegStorage ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
-  RegStorage res_reg;
-  switch (reg) {
-    case kSelf: res_reg = rs_rARM_SELF; break;
-#ifdef ARM_R4_SUSPEND_FLAG
-    case kSuspend: res_reg =  rs_rARM_SUSPEND; break;
-#else
-    case kSuspend: res_reg = RegStorage::InvalidReg(); break;
-#endif
-    case kLr: res_reg =  rs_rARM_LR; break;
-    case kPc: res_reg =  rs_rARM_PC; break;
-    case kSp: res_reg =  rs_rARM_SP; break;
-    case kArg0: res_reg = rs_r0; break;
-    case kArg1: res_reg = rs_r1; break;
-    case kArg2: res_reg = rs_r2; break;
-    case kArg3: res_reg = rs_r3; break;
-    case kFArg0: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r0 : rs_fr0; break;
-    case kFArg1: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r1 : rs_fr1; break;
-    case kFArg2: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r2 : rs_fr2; break;
-    case kFArg3: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r3 : rs_fr3; break;
-    case kFArg4: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr4; break;
-    case kFArg5: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr5; break;
-    case kFArg6: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr6; break;
-    case kFArg7: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr7; break;
-    case kFArg8: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr8; break;
-    case kFArg9: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr9; break;
-    case kFArg10: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr10; break;
-    case kFArg11: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr11; break;
-    case kFArg12: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr12; break;
-    case kFArg13: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr13; break;
-    case kFArg14: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr14; break;
-    case kFArg15: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr15; break;
-    case kRet0: res_reg = rs_r0; break;
-    case kRet1: res_reg = rs_r1; break;
-    case kInvokeTgt: res_reg = rs_rARM_LR; break;
-    case kHiddenArg: res_reg = rs_r12; break;
-    case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
-    case kCount: res_reg = RegStorage::InvalidReg(); break;
-    default: res_reg = RegStorage::InvalidReg();
-  }
-  return res_reg;
-}
-
-/*
- * Decode the register id.
- */
-ResourceMask ArmMir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
-  return GetRegMaskArm(reg);
-}
-
-constexpr ResourceMask ArmMir2Lir::GetRegMaskArm(RegStorage reg) {
-  return reg.IsDouble()
-      /* Each double register is equal to a pair of single-precision FP registers */
-      ? ResourceMask::TwoBits(reg.GetRegNum() * 2 + kArmFPReg0)
-      : ResourceMask::Bit(reg.IsSingle() ? reg.GetRegNum() + kArmFPReg0 : reg.GetRegNum());
-}
-
-constexpr ResourceMask ArmMir2Lir::EncodeArmRegList(int reg_list) {
-  return ResourceMask::RawMask(static_cast<uint64_t >(reg_list), 0u);
-}
-
-constexpr ResourceMask ArmMir2Lir::EncodeArmRegFpcsList(int reg_list) {
-  return ResourceMask::RawMask(static_cast<uint64_t >(reg_list) << kArmFPReg16, 0u);
-}
-
-ResourceMask ArmMir2Lir::GetPCUseDefEncoding() const {
-  return ResourceMask::Bit(kArmRegPC);
-}
-
-// Thumb2 specific setup.  TODO: inline?:
-void ArmMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                          ResourceMask* use_mask, ResourceMask* def_mask) {
-  DCHECK_EQ(cu_->instruction_set, kThumb2);
-  DCHECK(!lir->flags.use_def_invalid);
-
-  int opcode = lir->opcode;
-
-  // These flags are somewhat uncommon - bypass if we can.
-  if ((flags & (REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0 | REG_DEF_LIST1 |
-                REG_DEF_FPCS_LIST0 | REG_DEF_FPCS_LIST2 | REG_USE_PC | IS_IT | REG_USE_LIST0 |
-                REG_USE_LIST1 | REG_USE_FPCS_LIST0 | REG_USE_FPCS_LIST2 | REG_DEF_LR)) != 0) {
-    if (flags & REG_DEF_SP) {
-      def_mask->SetBit(kArmRegSP);
-    }
-
-    if (flags & REG_USE_SP) {
-      use_mask->SetBit(kArmRegSP);
-    }
-
-    if (flags & REG_DEF_LIST0) {
-      def_mask->SetBits(EncodeArmRegList(lir->operands[0]));
-    }
-
-    if (flags & REG_DEF_LIST1) {
-      def_mask->SetBits(EncodeArmRegList(lir->operands[1]));
-    }
-
-    if (flags & REG_DEF_FPCS_LIST0) {
-      def_mask->SetBits(EncodeArmRegList(lir->operands[0]));
-    }
-
-    if (flags & REG_DEF_FPCS_LIST2) {
-      for (int i = 0; i < lir->operands[2]; i++) {
-        SetupRegMask(def_mask, lir->operands[1] + i);
-      }
-    }
-
-    if (flags & REG_USE_PC) {
-      use_mask->SetBit(kArmRegPC);
-    }
-
-    /* Conservatively treat the IT block */
-    if (flags & IS_IT) {
-      *def_mask = kEncodeAll;
-    }
-
-    if (flags & REG_USE_LIST0) {
-      use_mask->SetBits(EncodeArmRegList(lir->operands[0]));
-    }
-
-    if (flags & REG_USE_LIST1) {
-      use_mask->SetBits(EncodeArmRegList(lir->operands[1]));
-    }
-
-    if (flags & REG_USE_FPCS_LIST0) {
-      use_mask->SetBits(EncodeArmRegList(lir->operands[0]));
-    }
-
-    if (flags & REG_USE_FPCS_LIST2) {
-      for (int i = 0; i < lir->operands[2]; i++) {
-        SetupRegMask(use_mask, lir->operands[1] + i);
-      }
-    }
-    /* Fixup for kThumbPush/lr and kThumbPop/pc */
-    if (opcode == kThumbPush || opcode == kThumbPop) {
-      constexpr ResourceMask r8Mask = GetRegMaskArm(rs_r8);
-      if ((opcode == kThumbPush) && (use_mask->Intersects(r8Mask))) {
-        use_mask->ClearBits(r8Mask);
-        use_mask->SetBit(kArmRegLR);
-      } else if ((opcode == kThumbPop) && (def_mask->Intersects(r8Mask))) {
-        def_mask->ClearBits(r8Mask);
-        def_mask->SetBit(kArmRegPC);;
-      }
-    }
-    if (flags & REG_DEF_LR) {
-      def_mask->SetBit(kArmRegLR);
-    }
-  }
-}
-
-ArmConditionCode ArmMir2Lir::ArmConditionEncoding(ConditionCode ccode) {
-  ArmConditionCode res;
-  switch (ccode) {
-    case kCondEq: res = kArmCondEq; break;
-    case kCondNe: res = kArmCondNe; break;
-    case kCondCs: res = kArmCondCs; break;
-    case kCondCc: res = kArmCondCc; break;
-    case kCondUlt: res = kArmCondCc; break;
-    case kCondUge: res = kArmCondCs; break;
-    case kCondMi: res = kArmCondMi; break;
-    case kCondPl: res = kArmCondPl; break;
-    case kCondVs: res = kArmCondVs; break;
-    case kCondVc: res = kArmCondVc; break;
-    case kCondHi: res = kArmCondHi; break;
-    case kCondLs: res = kArmCondLs; break;
-    case kCondGe: res = kArmCondGe; break;
-    case kCondLt: res = kArmCondLt; break;
-    case kCondGt: res = kArmCondGt; break;
-    case kCondLe: res = kArmCondLe; break;
-    case kCondAl: res = kArmCondAl; break;
-    case kCondNv: res = kArmCondNv; break;
-    default:
-      LOG(FATAL) << "Bad condition code " << ccode;
-      res = static_cast<ArmConditionCode>(0);  // Quiet gcc
-  }
-  return res;
-}
-
-static const char* core_reg_names[16] = {
-  "r0",
-  "r1",
-  "r2",
-  "r3",
-  "r4",
-  "r5",
-  "r6",
-  "r7",
-  "r8",
-  "rSELF",
-  "r10",
-  "r11",
-  "r12",
-  "sp",
-  "lr",
-  "pc",
-};
-
-
-static const char* shift_names[4] = {
-  "lsl",
-  "lsr",
-  "asr",
-  "ror"};
-
-/* Decode and print a ARM register name */
-static char* DecodeRegList(int opcode, int vector, char* buf, size_t buf_size) {
-  int i;
-  bool printed = false;
-  buf[0] = 0;
-  for (i = 0; i < 16; i++, vector >>= 1) {
-    if (vector & 0x1) {
-      int reg_id = i;
-      if (opcode == kThumbPush && i == 8) {
-        reg_id = rs_rARM_LR.GetRegNum();
-      } else if (opcode == kThumbPop && i == 8) {
-        reg_id = rs_rARM_PC.GetRegNum();
-      }
-      if (printed) {
-        snprintf(buf + strlen(buf), buf_size - strlen(buf), ", r%d", reg_id);
-      } else {
-        printed = true;
-        snprintf(buf, buf_size, "r%d", reg_id);
-      }
-    }
-  }
-  return buf;
-}
-
-static char*  DecodeFPCSRegList(int count, int base, char* buf, size_t buf_size) {
-  snprintf(buf, buf_size, "s%d", base);
-  for (int i = 1; i < count; i++) {
-    snprintf(buf + strlen(buf), buf_size - strlen(buf), ", s%d", base + i);
-  }
-  return buf;
-}
-
-static int32_t ExpandImmediate(int value) {
-  int32_t mode = (value & 0xf00) >> 8;
-  uint32_t bits = value & 0xff;
-  switch (mode) {
-    case 0:
-      return bits;
-     case 1:
-      return (bits << 16) | bits;
-     case 2:
-      return (bits << 24) | (bits << 8);
-     case 3:
-      return (bits << 24) | (bits << 16) | (bits << 8) | bits;
-    default:
-      break;
-  }
-  bits = (bits | 0x80) << 24;
-  return bits >> (((value & 0xf80) >> 7) - 8);
-}
-
-const char* cc_names[] = {"eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
-                         "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"};
-/*
- * Interpret a format string and build a string no longer than size
- * See format key in Assemble.c.
- */
-std::string ArmMir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) {
-  std::string buf;
-  int i;
-  const char* fmt_end = &fmt[strlen(fmt)];
-  char tbuf[256];
-  const char* name;
-  char nc;
-  while (fmt < fmt_end) {
-    int operand;
-    if (*fmt == '!') {
-      fmt++;
-      DCHECK_LT(fmt, fmt_end);
-      nc = *fmt++;
-      if (nc == '!') {
-        strcpy(tbuf, "!");
-      } else {
-         DCHECK_LT(fmt, fmt_end);
-         DCHECK_LT(static_cast<unsigned>(nc-'0'), 4U);
-         operand = lir->operands[nc-'0'];
-         switch (*fmt++) {
-           case 'H':
-             if (operand != 0) {
-               snprintf(tbuf, arraysize(tbuf), ", %s %d", shift_names[operand & 0x3], operand >> 2);
-             } else {
-               strcpy(tbuf, "");
-             }
-             break;
-           case 'B':
-             switch (operand) {
-               case kSY:
-                 name = "sy";
-                 break;
-               case kST:
-                 name = "st";
-                 break;
-               case kISH:
-                 name = "ish";
-                 break;
-               case kISHST:
-                 name = "ishst";
-                 break;
-               case kNSH:
-                 name = "nsh";
-                 break;
-               case kNSHST:
-                 name = "shst";
-                 break;
-               default:
-                 name = "DecodeError2";
-                 break;
-             }
-             strcpy(tbuf, name);
-             break;
-           case 'b':
-             strcpy(tbuf, "0000");
-             for (i = 3; i >= 0; i--) {
-               tbuf[i] += operand & 1;
-               operand >>= 1;
-             }
-             break;
-           case 'n':
-             operand = ~ExpandImmediate(operand);
-             snprintf(tbuf, arraysize(tbuf), "%d [%#x]", operand, operand);
-             break;
-           case 'm':
-             operand = ExpandImmediate(operand);
-             snprintf(tbuf, arraysize(tbuf), "%d [%#x]", operand, operand);
-             break;
-           case 's':
-             snprintf(tbuf, arraysize(tbuf), "s%d", RegStorage::RegNum(operand));
-             break;
-           case 'S':
-             snprintf(tbuf, arraysize(tbuf), "d%d", RegStorage::RegNum(operand));
-             break;
-           case 'h':
-             snprintf(tbuf, arraysize(tbuf), "%04x", operand);
-             break;
-           case 'M':
-           case 'd':
-             snprintf(tbuf, arraysize(tbuf), "%d", operand);
-             break;
-           case 'C':
-             operand = RegStorage::RegNum(operand);
-             DCHECK_LT(operand, static_cast<int>(
-                 sizeof(core_reg_names)/sizeof(core_reg_names[0])));
-             snprintf(tbuf, arraysize(tbuf), "%s", core_reg_names[operand]);
-             break;
-           case 'E':
-             snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
-             break;
-           case 'F':
-             snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
-             break;
-           case 'c':
-             strcpy(tbuf, cc_names[operand]);
-             break;
-           case 't':
-             snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
-                 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 + (operand << 1),
-                 lir->target);
-             break;
-           case 'T':
-             snprintf(tbuf, arraysize(tbuf), "%s", PrettyMethod(
-                 static_cast<uint32_t>(lir->operands[1]),
-                 *UnwrapPointer<DexFile>(lir->operands[2])).c_str());
-             break;
-           case 'u': {
-             int offset_1 = lir->operands[0];
-             int offset_2 = NEXT_LIR(lir)->operands[0];
-             uintptr_t target =
-                 (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) &
-                 ~3) + (offset_1 << 21 >> 9) + (offset_2 << 1)) &
-                 0xfffffffc;
-             snprintf(tbuf, arraysize(tbuf), "%p", reinterpret_cast<void *>(target));
-             break;
-          }
-
-           /* Nothing to print for BLX_2 */
-           case 'v':
-             strcpy(tbuf, "see above");
-             break;
-           case 'R':
-             DecodeRegList(lir->opcode, operand, tbuf, arraysize(tbuf));
-             break;
-           case 'P':
-             DecodeFPCSRegList(operand, 16, tbuf, arraysize(tbuf));
-             break;
-           case 'Q':
-             DecodeFPCSRegList(operand, 0, tbuf, arraysize(tbuf));
-             break;
-           default:
-             strcpy(tbuf, "DecodeError1");
-             break;
-        }
-        buf += tbuf;
-      }
-    } else {
-       buf += *fmt++;
-    }
-  }
-  // Dump thread offset.
-  std::string fmt_str = GetTargetInstFmt(lir->opcode);
-  if (std::string::npos != fmt_str.find(", [!1C, #!2") && rARM_SELF == lir->operands[1] &&
-      std::string::npos != buf.find(", [")) {
-    int offset = lir->operands[2];
-    if (std::string::npos != fmt_str.find("#!2d")) {
-    } else if (std::string::npos != fmt_str.find("#!2E")) {
-      offset *= 4;
-    } else if (std::string::npos != fmt_str.find("#!2F")) {
-      offset *= 2;
-    } else {
-      LOG(FATAL) << "Should not reach here";
-    }
-    std::ostringstream tmp_stream;
-    Thread::DumpThreadOffset<4>(tmp_stream, offset);
-    buf += "  ; ";
-    buf += tmp_stream.str();
-  }
-  return buf;
-}
-
-void ArmMir2Lir::DumpResourceMask(LIR* arm_lir, const ResourceMask& mask, const char* prefix) {
-  char buf[256];
-  buf[0] = 0;
-
-  if (mask.Equals(kEncodeAll)) {
-    strcpy(buf, "all");
-  } else {
-    char num[8];
-    int i;
-
-    for (i = 0; i < kArmRegEnd; i++) {
-      if (mask.HasBit(i)) {
-        snprintf(num, arraysize(num), "%d ", i);
-        strcat(buf, num);
-      }
-    }
-
-    if (mask.HasBit(ResourceMask::kCCode)) {
-      strcat(buf, "cc ");
-    }
-    if (mask.HasBit(ResourceMask::kFPStatus)) {
-      strcat(buf, "fpcc ");
-    }
-
-    /* Memory bits */
-    if (arm_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
-      snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
-               DECODE_ALIAS_INFO_REG(arm_lir->flags.alias_info),
-               DECODE_ALIAS_INFO_WIDE(arm_lir->flags.alias_info) ? "(+1)" : "");
-    }
-    if (mask.HasBit(ResourceMask::kLiteral)) {
-      strcat(buf, "lit ");
-    }
-
-    if (mask.HasBit(ResourceMask::kHeapRef)) {
-      strcat(buf, "heap ");
-    }
-    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
-      strcat(buf, "noalias ");
-    }
-  }
-  if (buf[0]) {
-    LOG(INFO) << prefix << ": " << buf;
-  }
-}
-
-bool ArmMir2Lir::IsUnconditionalBranch(LIR* lir) {
-  return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond));
-}
-
-RegisterClass ArmMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
-  if (UNLIKELY(is_volatile)) {
-    // On arm, atomic 64-bit load/store requires a core register pair.
-    // Smaller aligned load/store is atomic for both core and fp registers.
-    if (size == k64 || size == kDouble) {
-      return kCoreReg;
-    }
-  }
-  return RegClassBySize(size);
-}
-
-ArmMir2Lir::ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
-    : Mir2Lir(cu, mir_graph, arena),
-      call_method_insns_(arena->Adapter()),
-      dex_cache_access_insns_(arena->Adapter()),
-      dex_cache_arrays_base_reg_(RegStorage::InvalidReg()) {
-  call_method_insns_.reserve(100);
-  // Sanity check - make sure encoding map lines up.
-  for (int i = 0; i < kArmLast; i++) {
-    DCHECK_EQ(ArmMir2Lir::EncodingMap[i].opcode, i)
-        << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
-        << " is wrong: expecting " << i << ", seeing "
-        << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
-  }
-}
-
-Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                          ArenaAllocator* const arena) {
-  return new ArmMir2Lir(cu, mir_graph, arena);
-}
-
-void ArmMir2Lir::CompilerInitializeRegAlloc() {
-  reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */,
-                                            sp_regs, dp_regs,
-                                            reserved_regs, empty_pool /* reserved64 */,
-                                            core_temps, empty_pool /* core64_temps */,
-                                            sp_temps, dp_temps));
-
-  // Target-specific adjustments.
-
-  // Alias single precision floats to appropriate half of overlapping double.
-  for (RegisterInfo* info : reg_pool_->sp_regs_) {
-    int sp_reg_num = info->GetReg().GetRegNum();
-    int dp_reg_num = sp_reg_num >> 1;
-    RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
-    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
-    // Double precision register's master storage should refer to itself.
-    DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
-    // Redirect single precision's master storage to master.
-    info->SetMaster(dp_reg_info);
-    // Singles should show a single 32-bit mask bit, at first referring to the low half.
-    DCHECK_EQ(info->StorageMask(), RegisterInfo::kLowSingleStorageMask);
-    if (sp_reg_num & 1) {
-      // For odd singles, change to use the high word of the backing double.
-      info->SetStorageMask(RegisterInfo::kHighSingleStorageMask);
-    }
-  }
-
-#ifdef ARM_R4_SUSPEND_FLAG
-  // TODO: re-enable this when we can safely save r4 over the suspension code path.
-  bool no_suspend = NO_SUSPEND;  // || !Runtime::Current()->ExplicitSuspendChecks();
-  if (no_suspend) {
-    GetRegInfo(rs_rARM_SUSPEND)->MarkFree();
-  }
-#endif
-
-  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
-  // TODO: adjust when we roll to hard float calling convention.
-  reg_pool_->next_core_reg_ = 2;
-  reg_pool_->next_sp_reg_ = 0;
-  reg_pool_->next_dp_reg_ = 0;
-}
-
-/*
- * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
- * instructions might call out to C/assembly helper functions.  Until
- * machinery is in place, always spill lr.
- */
-
-void ArmMir2Lir::AdjustSpillMask() {
-  core_spill_mask_ |= (1 << rs_rARM_LR.GetRegNum());
-  num_core_spills_++;
-}
-
-/*
- * Mark a callee-save fp register as promoted.  Note that
- * vpush/vpop uses contiguous register lists so we must
- * include any holes in the mask.  Associate holes with
- * Dalvik register INVALID_VREG (0xFFFFU).
- */
-void ArmMir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) {
-  DCHECK_GE(reg.GetRegNum(), ARM_FP_CALLEE_SAVE_BASE);
-  int adjusted_reg_num = reg.GetRegNum() - ARM_FP_CALLEE_SAVE_BASE;
-  // Ensure fp_vmap_table is large enough
-  int table_size = fp_vmap_table_.size();
-  for (int i = table_size; i < (adjusted_reg_num + 1); i++) {
-    fp_vmap_table_.push_back(INVALID_VREG);
-  }
-  // Add the current mapping
-  fp_vmap_table_[adjusted_reg_num] = v_reg;
-  // Size of fp_vmap_table is high-water mark, use to set mask
-  num_fp_spills_ = fp_vmap_table_.size();
-  fp_spill_mask_ = ((1 << num_fp_spills_) - 1) << ARM_FP_CALLEE_SAVE_BASE;
-}
-
-void ArmMir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) {
-  // TEMP: perform as 2 singles.
-  int reg_num = reg.GetRegNum() << 1;
-  RegStorage lo = RegStorage::Solo32(RegStorage::kFloatingPoint | reg_num);
-  RegStorage hi = RegStorage::Solo32(RegStorage::kFloatingPoint | reg_num | 1);
-  MarkPreservedSingle(v_reg, lo);
-  MarkPreservedSingle(v_reg + 1, hi);
-}
-
-/* Clobber all regs that might be used by an external C call */
-void ArmMir2Lir::ClobberCallerSave() {
-  // TODO: rework this - it's gotten even more ugly.
-  Clobber(rs_r0);
-  Clobber(rs_r1);
-  Clobber(rs_r2);
-  Clobber(rs_r3);
-  Clobber(rs_r12);
-  Clobber(rs_r14lr);
-  Clobber(rs_fr0);
-  Clobber(rs_fr1);
-  Clobber(rs_fr2);
-  Clobber(rs_fr3);
-  Clobber(rs_fr4);
-  Clobber(rs_fr5);
-  Clobber(rs_fr6);
-  Clobber(rs_fr7);
-  Clobber(rs_fr8);
-  Clobber(rs_fr9);
-  Clobber(rs_fr10);
-  Clobber(rs_fr11);
-  Clobber(rs_fr12);
-  Clobber(rs_fr13);
-  Clobber(rs_fr14);
-  Clobber(rs_fr15);
-  Clobber(rs_dr0);
-  Clobber(rs_dr1);
-  Clobber(rs_dr2);
-  Clobber(rs_dr3);
-  Clobber(rs_dr4);
-  Clobber(rs_dr5);
-  Clobber(rs_dr6);
-  Clobber(rs_dr7);
-}
-
-RegLocation ArmMir2Lir::GetReturnWideAlt() {
-  RegLocation res = LocCReturnWide();
-  res.reg.SetLowReg(rs_r2.GetReg());
-  res.reg.SetHighReg(rs_r3.GetReg());
-  Clobber(rs_r2);
-  Clobber(rs_r3);
-  MarkInUse(rs_r2);
-  MarkInUse(rs_r3);
-  MarkWide(res.reg);
-  return res;
-}
-
-RegLocation ArmMir2Lir::GetReturnAlt() {
-  RegLocation res = LocCReturn();
-  res.reg.SetReg(rs_r1.GetReg());
-  Clobber(rs_r1);
-  MarkInUse(rs_r1);
-  return res;
-}
-
-/* To be used when explicitly managing register use */
-void ArmMir2Lir::LockCallTemps() {
-  LockTemp(rs_r0);
-  LockTemp(rs_r1);
-  LockTemp(rs_r2);
-  LockTemp(rs_r3);
-  if (!kArm32QuickCodeUseSoftFloat) {
-    LockTemp(rs_fr0);
-    LockTemp(rs_fr1);
-    LockTemp(rs_fr2);
-    LockTemp(rs_fr3);
-    LockTemp(rs_fr4);
-    LockTemp(rs_fr5);
-    LockTemp(rs_fr6);
-    LockTemp(rs_fr7);
-    LockTemp(rs_fr8);
-    LockTemp(rs_fr9);
-    LockTemp(rs_fr10);
-    LockTemp(rs_fr11);
-    LockTemp(rs_fr12);
-    LockTemp(rs_fr13);
-    LockTemp(rs_fr14);
-    LockTemp(rs_fr15);
-    LockTemp(rs_dr0);
-    LockTemp(rs_dr1);
-    LockTemp(rs_dr2);
-    LockTemp(rs_dr3);
-    LockTemp(rs_dr4);
-    LockTemp(rs_dr5);
-    LockTemp(rs_dr6);
-    LockTemp(rs_dr7);
-  }
-}
-
-/* To be used when explicitly managing register use */
-void ArmMir2Lir::FreeCallTemps() {
-  FreeTemp(rs_r0);
-  FreeTemp(rs_r1);
-  FreeTemp(rs_r2);
-  FreeTemp(rs_r3);
-  FreeTemp(TargetReg(kHiddenArg));
-  if (!kArm32QuickCodeUseSoftFloat) {
-    FreeTemp(rs_fr0);
-    FreeTemp(rs_fr1);
-    FreeTemp(rs_fr2);
-    FreeTemp(rs_fr3);
-    FreeTemp(rs_fr4);
-    FreeTemp(rs_fr5);
-    FreeTemp(rs_fr6);
-    FreeTemp(rs_fr7);
-    FreeTemp(rs_fr8);
-    FreeTemp(rs_fr9);
-    FreeTemp(rs_fr10);
-    FreeTemp(rs_fr11);
-    FreeTemp(rs_fr12);
-    FreeTemp(rs_fr13);
-    FreeTemp(rs_fr14);
-    FreeTemp(rs_fr15);
-    FreeTemp(rs_dr0);
-    FreeTemp(rs_dr1);
-    FreeTemp(rs_dr2);
-    FreeTemp(rs_dr3);
-    FreeTemp(rs_dr4);
-    FreeTemp(rs_dr5);
-    FreeTemp(rs_dr6);
-    FreeTemp(rs_dr7);
-  }
-}
-
-RegStorage ArmMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
-  LoadWordDisp(rs_rARM_SELF, GetThreadOffset<4>(trampoline).Int32Value(), rs_rARM_LR);
-  return rs_rARM_LR;
-}
-
-LIR* ArmMir2Lir::CheckSuspendUsingLoad() {
-  RegStorage tmp = rs_r0;
-  Load32Disp(rs_rARM_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
-  LIR* load2 = Load32Disp(tmp, 0, tmp);
-  return load2;
-}
-
-uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return ArmMir2Lir::EncodingMap[opcode].flags;
-}
-
-const char* ArmMir2Lir::GetTargetInstName(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return ArmMir2Lir::EncodingMap[opcode].name;
-}
-
-const char* ArmMir2Lir::GetTargetInstFmt(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return ArmMir2Lir::EncodingMap[opcode].fmt;
-}
-
-/*
- * Somewhat messy code here.  We want to allocate a pair of contiguous
- * physical single-precision floating point registers starting with
- * an even numbered reg.  It is possible that the paired s_reg (s_reg+1)
- * has already been allocated - try to fit if possible.  Fail to
- * allocate if we can't meet the requirements for the pair of
- * s_reg<=sX[even] & (s_reg+1)<= sX+1.
- */
-// TODO: needs rewrite to support non-backed 64-bit float regs.
-RegStorage ArmMir2Lir::AllocPreservedDouble(int s_reg) {
-  RegStorage res;
-  int v_reg = mir_graph_->SRegToVReg(s_reg);
-  int p_map_idx = SRegToPMap(s_reg);
-  if (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg) {
-    // Upper reg is already allocated.  Can we fit?
-    int high_reg = promotion_map_[p_map_idx+1].fp_reg;
-    if ((high_reg & 1) == 0) {
-      // High reg is even - fail.
-      return res;  // Invalid.
-    }
-    // Is the low reg of the pair free?
-    // FIXME: rework.
-    RegisterInfo* p = GetRegInfo(RegStorage::FloatSolo32(high_reg - 1));
-    if (p->InUse() || p->IsTemp()) {
-      // Already allocated or not preserved - fail.
-      return res;  // Invalid.
-    }
-    // OK - good to go.
-    res = RegStorage::FloatSolo64(p->GetReg().GetRegNum() >> 1);
-    p->MarkInUse();
-    MarkPreservedSingle(v_reg, p->GetReg());
-  } else {
-    /*
-     * TODO: until runtime support is in, make sure we avoid promoting the same vreg to
-     * different underlying physical registers.
-     */
-    for (RegisterInfo* info : reg_pool_->dp_regs_) {
-      if (!info->IsTemp() && !info->InUse()) {
-        res = info->GetReg();
-        info->MarkInUse();
-        MarkPreservedDouble(v_reg, info->GetReg());
-        break;
-      }
-    }
-  }
-  if (res.Valid()) {
-    RegisterInfo* info = GetRegInfo(res);
-    promotion_map_[p_map_idx].fp_location = kLocPhysReg;
-    promotion_map_[p_map_idx].fp_reg =
-        info->FindMatchingView(RegisterInfo::kLowSingleStorageMask)->GetReg().GetReg();
-    promotion_map_[p_map_idx+1].fp_location = kLocPhysReg;
-    promotion_map_[p_map_idx+1].fp_reg =
-        info->FindMatchingView(RegisterInfo::kHighSingleStorageMask)->GetReg().GetReg();
-  }
-  return res;
-}
-
-// Reserve a callee-save sp single register.
-RegStorage ArmMir2Lir::AllocPreservedSingle(int s_reg) {
-  RegStorage res;
-  for (RegisterInfo* info : reg_pool_->sp_regs_) {
-    if (!info->IsTemp() && !info->InUse()) {
-      res = info->GetReg();
-      int p_map_idx = SRegToPMap(s_reg);
-      int v_reg = mir_graph_->SRegToVReg(s_reg);
-      GetRegInfo(res)->MarkInUse();
-      MarkPreservedSingle(v_reg, res);
-      promotion_map_[p_map_idx].fp_location = kLocPhysReg;
-      promotion_map_[p_map_idx].fp_reg = res.GetReg();
-      break;
-    }
-  }
-  return res;
-}
-
-void ArmMir2Lir::InstallLiteralPools() {
-  patches_.reserve(call_method_insns_.size() + dex_cache_access_insns_.size());
-
-  // PC-relative calls to methods.
-  for (LIR* p : call_method_insns_) {
-    DCHECK_EQ(p->opcode, kThumb2Bl);
-    uint32_t target_method_idx = p->operands[1];
-    const DexFile* target_dex_file = UnwrapPointer<DexFile>(p->operands[2]);
-    patches_.push_back(LinkerPatch::RelativeCodePatch(p->offset,
-                                                      target_dex_file, target_method_idx));
-  }
-
-  // PC-relative dex cache array accesses.
-  for (LIR* p : dex_cache_access_insns_) {
-    DCHECK(p->opcode = kThumb2MovImm16 || p->opcode == kThumb2MovImm16H);
-    const LIR* add_pc = UnwrapPointer<LIR>(p->operands[4]);
-    DCHECK(add_pc->opcode == kThumbAddRRLH || add_pc->opcode == kThumbAddRRHH);
-    const DexFile* dex_file = UnwrapPointer<DexFile>(p->operands[2]);
-    uint32_t offset = p->operands[3];
-    DCHECK(!p->flags.is_nop);
-    DCHECK(!add_pc->flags.is_nop);
-    patches_.push_back(LinkerPatch::DexCacheArrayPatch(p->offset,
-                                                       dex_file, add_pc->offset, offset));
-  }
-
-  // And do the normal processing.
-  Mir2Lir::InstallLiteralPools();
-}
-
-RegStorage ArmMir2Lir::InToRegStorageArmMapper::GetNextReg(ShortyArg arg) {
-  const RegStorage coreArgMappingToPhysicalReg[] =
-      {rs_r1, rs_r2, rs_r3};
-  const int coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
-  const RegStorage fpArgMappingToPhysicalReg[] =
-      {rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
-       rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15};
-  constexpr uint32_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
-  static_assert(fpArgMappingToPhysicalRegSize % 2 == 0, "Number of FP Arg regs is not even");
-
-  RegStorage result = RegStorage::InvalidReg();
-  // Regard double as long, float as int for kArm32QuickCodeUseSoftFloat.
-  if (arg.IsFP() && !kArm32QuickCodeUseSoftFloat) {
-    if (arg.IsWide()) {
-      cur_fp_double_reg_ = std::max(cur_fp_double_reg_, RoundUp(cur_fp_reg_, 2));
-      if (cur_fp_double_reg_ < fpArgMappingToPhysicalRegSize) {
-        result = RegStorage::MakeRegPair(fpArgMappingToPhysicalReg[cur_fp_double_reg_],
-                                         fpArgMappingToPhysicalReg[cur_fp_double_reg_ + 1]);
-        result = As64BitFloatReg(result);
-        cur_fp_double_reg_ += 2;
-      }
-    } else {
-      if (cur_fp_reg_ % 2 == 0) {
-        cur_fp_reg_ = std::max(cur_fp_double_reg_, cur_fp_reg_);
-      }
-      if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
-        result = fpArgMappingToPhysicalReg[cur_fp_reg_];
-        cur_fp_reg_++;
-      }
-    }
-  } else {
-    if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
-      if (!kArm32QuickCodeUseSoftFloat && arg.IsWide() && cur_core_reg_ == 0) {
-        // Skip r1, and use r2-r3 for the register pair.
-        cur_core_reg_++;
-      }
-      result = coreArgMappingToPhysicalReg[cur_core_reg_++];
-      if (arg.IsWide() && cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
-        result = RegStorage::MakeRegPair(result, coreArgMappingToPhysicalReg[cur_core_reg_++]);
-      }
-    }
-  }
-  return result;
-}
-
-int ArmMir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
-  if (kArm32QuickCodeUseSoftFloat) {
-    return Mir2Lir::GenDalvikArgsBulkCopy(info, first, count);
-  }
-  /*
-   * TODO: Improve by adding block copy for large number of arguments.  For now, just
-   * copy a Dalvik vreg at a time.
-   */
-  return count;
-}
-
-void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
-  DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode));
-  RegLocation rl_src[3];
-  RegLocation rl_dest = mir_graph_->GetBadLoc();
-  rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
-  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
-    case kMirOpMaddInt:
-      rl_dest = mir_graph_->GetDest(mir);
-      rl_src[0] = mir_graph_->GetSrc(mir, 0);
-      rl_src[1] = mir_graph_->GetSrc(mir, 1);
-      rl_src[2]= mir_graph_->GetSrc(mir, 2);
-      GenMaddMsubInt(rl_dest, rl_src[0], rl_src[1], rl_src[2], false);
-      break;
-    case kMirOpMsubInt:
-      rl_dest = mir_graph_->GetDest(mir);
-      rl_src[0] = mir_graph_->GetSrc(mir, 0);
-      rl_src[1] = mir_graph_->GetSrc(mir, 1);
-      rl_src[2]= mir_graph_->GetSrc(mir, 2);
-      GenMaddMsubInt(rl_dest, rl_src[0], rl_src[1], rl_src[2], true);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << mir->dalvikInsn.opcode;
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
deleted file mode 100644
index c31f46b..0000000
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ /dev/null
@@ -1,1314 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_arm.h"
-
-#include "arch/arm/instruction_set_features_arm.h"
-#include "arm_lir.h"
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "driver/compiler_driver.h"
-
-namespace art {
-
-/* This file contains codegen for the Thumb ISA. */
-
-static int32_t EncodeImmSingle(int32_t value) {
-  int32_t res;
-  int32_t bit_a =  (value & 0x80000000) >> 31;
-  int32_t not_bit_b = (value & 0x40000000) >> 30;
-  int32_t bit_b =  (value & 0x20000000) >> 29;
-  int32_t b_smear =  (value & 0x3e000000) >> 25;
-  int32_t slice =   (value & 0x01f80000) >> 19;
-  int32_t zeroes =  (value & 0x0007ffff);
-  if (zeroes != 0)
-    return -1;
-  if (bit_b) {
-    if ((not_bit_b != 0) || (b_smear != 0x1f))
-      return -1;
-  } else {
-    if ((not_bit_b != 1) || (b_smear != 0x0))
-      return -1;
-  }
-  res = (bit_a << 7) | (bit_b << 6) | slice;
-  return res;
-}
-
-/*
- * Determine whether value can be encoded as a Thumb2 floating point
- * immediate.  If not, return -1.  If so return encoded 8-bit value.
- */
-static int32_t EncodeImmDouble(int64_t value) {
-  int32_t res;
-  int32_t bit_a = (value & INT64_C(0x8000000000000000)) >> 63;
-  int32_t not_bit_b = (value & INT64_C(0x4000000000000000)) >> 62;
-  int32_t bit_b = (value & INT64_C(0x2000000000000000)) >> 61;
-  int32_t b_smear = (value & INT64_C(0x3fc0000000000000)) >> 54;
-  int32_t slice =  (value & INT64_C(0x003f000000000000)) >> 48;
-  uint64_t zeroes = (value & INT64_C(0x0000ffffffffffff));
-  if (zeroes != 0ull)
-    return -1;
-  if (bit_b) {
-    if ((not_bit_b != 0) || (b_smear != 0xff))
-      return -1;
-  } else {
-    if ((not_bit_b != 1) || (b_smear != 0x0))
-      return -1;
-  }
-  res = (bit_a << 7) | (bit_b << 6) | slice;
-  return res;
-}
-
-LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
-  DCHECK(RegStorage::IsSingle(r_dest));
-  if (value == 0) {
-    // TODO: we need better info about the target CPU.  a vector exclusive or
-    //       would probably be better here if we could rely on its existance.
-    // Load an immediate +2.0 (which encodes to 0)
-    NewLIR2(kThumb2Vmovs_IMM8, r_dest, 0);
-    // +0.0 = +2.0 - +2.0
-    return NewLIR3(kThumb2Vsubs, r_dest, r_dest, r_dest);
-  } else {
-    int encoded_imm = EncodeImmSingle(value);
-    if (encoded_imm >= 0) {
-      return NewLIR2(kThumb2Vmovs_IMM8, r_dest, encoded_imm);
-    }
-  }
-  LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
-  if (data_target == nullptr) {
-    data_target = AddWordData(&literal_list_, value);
-  }
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-  LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
-                          r_dest, rs_r15pc.GetReg(), 0, 0, 0, data_target);
-  AppendLIR(load_pc_rel);
-  return load_pc_rel;
-}
-
-/*
- * Determine whether value can be encoded as a Thumb2 modified
- * immediate.  If not, return -1.  If so, return i:imm3:a:bcdefgh form.
- */
-int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
-  uint32_t b0 = value & 0xff;
-
-  /* Note: case of value==0 must use 0:000:0:0000000 encoding */
-  if (value <= 0xFF)
-    return b0;  // 0:000:a:bcdefgh
-  if (value == ((b0 << 16) | b0))
-    return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
-  if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
-    return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
-  b0 = (value >> 8) & 0xff;
-  if (value == ((b0 << 24) | (b0 << 8)))
-    return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
-  /* Can we do it with rotation? */
-  int z_leading = CLZ(value);
-  int z_trailing = CTZ(value);
-  /* A run of eight or fewer active bits? */
-  if ((z_leading + z_trailing) < 24)
-    return -1;  /* No - bail */
-  /* left-justify the constant, discarding msb (known to be 1) */
-  value <<= z_leading + 1;
-  /* Create bcdefgh */
-  value >>= 25;
-  /* Put it all together */
-  return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
-}
-
-bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) {
-  return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
-}
-
-bool ArmMir2Lir::InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
-  switch (opcode) {
-    case Instruction::ADD_INT:
-    case Instruction::ADD_INT_2ADDR:
-    case Instruction::SUB_INT:
-    case Instruction::SUB_INT_2ADDR:
-      if ((value >> 12) == (value >> 31)) {  // Signed 12-bit, RRI12 versions of ADD/SUB.
-        return true;
-      }
-      FALLTHROUGH_INTENDED;
-    case Instruction::IF_EQ:
-    case Instruction::IF_NE:
-    case Instruction::IF_LT:
-    case Instruction::IF_GE:
-    case Instruction::IF_GT:
-    case Instruction::IF_LE:
-      return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(-value) >= 0);
-    case Instruction::SHL_INT:
-    case Instruction::SHL_INT_2ADDR:
-    case Instruction::SHR_INT:
-    case Instruction::SHR_INT_2ADDR:
-    case Instruction::USHR_INT:
-    case Instruction::USHR_INT_2ADDR:
-      return true;
-    case Instruction::CONST:
-    case Instruction::CONST_4:
-    case Instruction::CONST_16:
-      if ((value >> 16) == 0) {
-        return true;  // movw, 16-bit unsigned.
-      }
-      FALLTHROUGH_INTENDED;
-    case Instruction::AND_INT:
-    case Instruction::AND_INT_2ADDR:
-    case Instruction::AND_INT_LIT16:
-    case Instruction::AND_INT_LIT8:
-    case Instruction::OR_INT:
-    case Instruction::OR_INT_2ADDR:
-    case Instruction::OR_INT_LIT16:
-    case Instruction::OR_INT_LIT8:
-      return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
-    case Instruction::XOR_INT:
-    case Instruction::XOR_INT_2ADDR:
-    case Instruction::XOR_INT_LIT16:
-    case Instruction::XOR_INT_LIT8:
-      return (ModifiedImmediate(value) >= 0);
-    case Instruction::MUL_INT:
-    case Instruction::MUL_INT_2ADDR:
-    case Instruction::MUL_INT_LIT8:
-    case Instruction::MUL_INT_LIT16:
-    case Instruction::DIV_INT:
-    case Instruction::DIV_INT_2ADDR:
-    case Instruction::DIV_INT_LIT8:
-    case Instruction::DIV_INT_LIT16:
-    case Instruction::REM_INT:
-    case Instruction::REM_INT_2ADDR:
-    case Instruction::REM_INT_LIT8:
-    case Instruction::REM_INT_LIT16: {
-      EasyMultiplyOp ops[2];
-      return GetEasyMultiplyTwoOps(value, ops);
-    }
-    default:
-      return false;
-  }
-}
-
-bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) {
-  return EncodeImmSingle(value) >= 0;
-}
-
-bool ArmMir2Lir::InexpensiveConstantLong(int64_t value) {
-  return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
-}
-
-bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) {
-  return EncodeImmDouble(value) >= 0;
-}
-
-/*
- * Load a immediate using a shortcut if possible; otherwise
- * grab from the per-translation literal pool.
- *
- * No additional register clobbering operation performed. Use this version when
- * 1) r_dest is freshly returned from AllocTemp or
- * 2) The codegen is under fixed register usage
- */
-LIR* ArmMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
-  LIR* res;
-  int mod_imm;
-
-  if (r_dest.IsFloat()) {
-    return LoadFPConstantValue(r_dest.GetReg(), value);
-  }
-
-  /* See if the value can be constructed cheaply */
-  if (r_dest.Low8() && (value >= 0) && (value <= 255)) {
-    return NewLIR2(kThumbMovImm, r_dest.GetReg(), value);
-  }
-  /* Check Modified immediate special cases */
-  mod_imm = ModifiedImmediate(value);
-  if (mod_imm >= 0) {
-    res = NewLIR2(kThumb2MovI8M, r_dest.GetReg(), mod_imm);
-    return res;
-  }
-  mod_imm = ModifiedImmediate(~value);
-  if (mod_imm >= 0) {
-    res = NewLIR2(kThumb2MvnI8M, r_dest.GetReg(), mod_imm);
-    return res;
-  }
-  /* 16-bit immediate? */
-  if ((value & 0xffff) == value) {
-    res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), value);
-    return res;
-  }
-  /* Do a low/high pair */
-  res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), Low16Bits(value));
-  NewLIR2(kThumb2MovImm16H, r_dest.GetReg(), High16Bits(value));
-  return res;
-}
-
-LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) {
-  LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched  during assembly */);
-  res->target = target;
-  return res;
-}
-
-LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
-  LIR* branch = NewLIR2(kThumbBCond, 0 /* offset to be patched */,
-                        ArmConditionEncoding(cc));
-  branch->target = target;
-  return branch;
-}
-
-LIR* ArmMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
-  ArmOpcode opcode = kThumbBkpt;
-  switch (op) {
-    case kOpBlx:
-      opcode = kThumbBlxR;
-      break;
-    case kOpBx:
-      opcode = kThumbBx;
-      break;
-    default:
-      LOG(FATAL) << "Bad opcode " << op;
-  }
-  return NewLIR1(opcode, r_dest_src.GetReg());
-}
-
-LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
-                               int shift) {
-  bool thumb_form =
-      ((shift == 0) && r_dest_src1.Low8() && r_src2.Low8());
-  ArmOpcode opcode = kThumbBkpt;
-  switch (op) {
-    case kOpAdc:
-      opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
-      break;
-    case kOpAnd:
-      opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
-      break;
-    case kOpBic:
-      opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
-      break;
-    case kOpCmn:
-      DCHECK_EQ(shift, 0);
-      opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
-      break;
-    case kOpCmp:
-      if (thumb_form)
-        opcode = kThumbCmpRR;
-      else if ((shift == 0) && !r_dest_src1.Low8() && !r_src2.Low8())
-        opcode = kThumbCmpHH;
-      else if ((shift == 0) && r_dest_src1.Low8())
-        opcode = kThumbCmpLH;
-      else if (shift == 0)
-        opcode = kThumbCmpHL;
-      else
-        opcode = kThumb2CmpRR;
-      break;
-    case kOpXor:
-      opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
-      break;
-    case kOpMov:
-      DCHECK_EQ(shift, 0);
-      if (r_dest_src1.Low8() && r_src2.Low8())
-        opcode = kThumbMovRR;
-      else if (!r_dest_src1.Low8() && !r_src2.Low8())
-        opcode = kThumbMovRR_H2H;
-      else if (r_dest_src1.Low8())
-        opcode = kThumbMovRR_H2L;
-      else
-        opcode = kThumbMovRR_L2H;
-      break;
-    case kOpMul:
-      DCHECK_EQ(shift, 0);
-      opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
-      break;
-    case kOpMvn:
-      opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
-      break;
-    case kOpNeg:
-      DCHECK_EQ(shift, 0);
-      opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
-      break;
-    case kOpOr:
-      opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
-      break;
-    case kOpSbc:
-      opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
-      break;
-    case kOpTst:
-      opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
-      break;
-    case kOpLsl:
-      DCHECK_EQ(shift, 0);
-      opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
-      break;
-    case kOpLsr:
-      DCHECK_EQ(shift, 0);
-      opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
-      break;
-    case kOpAsr:
-      DCHECK_EQ(shift, 0);
-      opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
-      break;
-    case kOpRor:
-      DCHECK_EQ(shift, 0);
-      opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
-      break;
-    case kOpAdd:
-      opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
-      break;
-    case kOpSub:
-      opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
-      break;
-    case kOpRev:
-      DCHECK_EQ(shift, 0);
-      if (!thumb_form) {
-        // Binary, but rm is encoded twice.
-        return NewLIR3(kThumb2RevRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
-      }
-      opcode = kThumbRev;
-      break;
-    case kOpRevsh:
-      DCHECK_EQ(shift, 0);
-      if (!thumb_form) {
-        // Binary, but rm is encoded twice.
-        return NewLIR3(kThumb2RevshRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
-      }
-      opcode = kThumbRevsh;
-      break;
-    case kOp2Byte:
-      DCHECK_EQ(shift, 0);
-      return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 8);
-    case kOp2Short:
-      DCHECK_EQ(shift, 0);
-      return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
-    case kOp2Char:
-      DCHECK_EQ(shift, 0);
-      return NewLIR4(kThumb2Ubfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
-    default:
-      LOG(FATAL) << "Bad opcode: " << op;
-      break;
-  }
-  DCHECK(!IsPseudoLirOp(opcode));
-  if (EncodingMap[opcode].flags & IS_BINARY_OP) {
-    return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
-  } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
-    if (EncodingMap[opcode].field_loc[2].kind == kFmtShift) {
-      return NewLIR3(opcode, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
-    } else {
-      return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg());
-    }
-  } else if (EncodingMap[opcode].flags & IS_QUAD_OP) {
-    return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
-  } else {
-    LOG(FATAL) << "Unexpected encoding operand count";
-    return nullptr;
-  }
-}
-
-LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
-  return OpRegRegShift(op, r_dest_src1, r_src2, 0);
-}
-
-LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
-                             RegStorage r_base ATTRIBUTE_UNUSED,
-                             int offset ATTRIBUTE_UNUSED,
-                             MoveType move_type ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL);
-  UNREACHABLE();
-}
-
-LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
-                             int offset ATTRIBUTE_UNUSED,
-                             RegStorage r_src ATTRIBUTE_UNUSED,
-                             MoveType move_type ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL);
-  UNREACHABLE();
-}
-
-LIR* ArmMir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
-                              ConditionCode cc ATTRIBUTE_UNUSED,
-                              RegStorage r_dest ATTRIBUTE_UNUSED,
-                              RegStorage r_src ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
-  UNREACHABLE();
-}
-
-LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
-                                  RegStorage r_src2, int shift) {
-  ArmOpcode opcode = kThumbBkpt;
-  bool thumb_form = (shift == 0) && r_dest.Low8() && r_src1.Low8() && r_src2.Low8();
-  switch (op) {
-    case kOpAdd:
-      opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
-      break;
-    case kOpSub:
-      opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
-      break;
-    case kOpRsub:
-      opcode = kThumb2RsubRRR;
-      break;
-    case kOpAdc:
-      opcode = kThumb2AdcRRR;
-      break;
-    case kOpAnd:
-      opcode = kThumb2AndRRR;
-      break;
-    case kOpBic:
-      opcode = kThumb2BicRRR;
-      break;
-    case kOpXor:
-      opcode = kThumb2EorRRR;
-      break;
-    case kOpMul:
-      DCHECK_EQ(shift, 0);
-      opcode = kThumb2MulRRR;
-      break;
-    case kOpDiv:
-      DCHECK_EQ(shift, 0);
-      opcode = kThumb2SdivRRR;
-      break;
-    case kOpOr:
-      opcode = kThumb2OrrRRR;
-      break;
-    case kOpSbc:
-      opcode = kThumb2SbcRRR;
-      break;
-    case kOpLsl:
-      DCHECK_EQ(shift, 0);
-      opcode = kThumb2LslRRR;
-      break;
-    case kOpLsr:
-      DCHECK_EQ(shift, 0);
-      opcode = kThumb2LsrRRR;
-      break;
-    case kOpAsr:
-      DCHECK_EQ(shift, 0);
-      opcode = kThumb2AsrRRR;
-      break;
-    case kOpRor:
-      DCHECK_EQ(shift, 0);
-      opcode = kThumb2RorRRR;
-      break;
-    default:
-      LOG(FATAL) << "Bad opcode: " << op;
-      break;
-  }
-  DCHECK(!IsPseudoLirOp(opcode));
-  if (EncodingMap[opcode].flags & IS_QUAD_OP) {
-    return NewLIR4(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
-  } else {
-    DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
-    return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
-  }
-}
-
-LIR* ArmMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
-  return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
-}
-
-LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
-  bool neg = (value < 0);
-  int32_t abs_value = (neg) ? -value : value;
-  ArmOpcode opcode = kThumbBkpt;
-  ArmOpcode alt_opcode = kThumbBkpt;
-  bool all_low_regs = r_dest.Low8() && r_src1.Low8();
-  int32_t mod_imm = ModifiedImmediate(value);
-
-  switch (op) {
-    case kOpLsl:
-      if (all_low_regs)
-        return NewLIR3(kThumbLslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
-      else
-        return NewLIR3(kThumb2LslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
-    case kOpLsr:
-      if (all_low_regs)
-        return NewLIR3(kThumbLsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
-      else
-        return NewLIR3(kThumb2LsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
-    case kOpAsr:
-      if (all_low_regs)
-        return NewLIR3(kThumbAsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
-      else
-        return NewLIR3(kThumb2AsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
-    case kOpRor:
-      return NewLIR3(kThumb2RorRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
-    case kOpAdd:
-      if (r_dest.Low8() && (r_src1 == rs_r13sp) && (value <= 1020) && ((value & 0x3) == 0)) {
-        return NewLIR3(kThumbAddSpRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
-      } else if (r_dest.Low8() && (r_src1 == rs_r15pc) &&
-          (value <= 1020) && ((value & 0x3) == 0)) {
-        return NewLIR3(kThumbAddPcRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
-      }
-      FALLTHROUGH_INTENDED;
-    case kOpSub:
-      if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
-        if (op == kOpAdd)
-          opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
-        else
-          opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
-        return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
-      }
-      if (mod_imm < 0) {
-        mod_imm = ModifiedImmediate(-value);
-        if (mod_imm >= 0) {
-          op = (op == kOpAdd) ? kOpSub : kOpAdd;
-        }
-      }
-      if (mod_imm < 0 && (abs_value >> 12) == 0) {
-        // This is deliberately used only if modified immediate encoding is inadequate since
-        // we sometimes actually use the flags for small values but not necessarily low regs.
-        if (op == kOpAdd)
-          opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
-        else
-          opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
-        return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
-      }
-      if (op == kOpSub) {
-        opcode = kThumb2SubRRI8M;
-        alt_opcode = kThumb2SubRRR;
-      } else {
-        opcode = kThumb2AddRRI8M;
-        alt_opcode = kThumb2AddRRR;
-      }
-      break;
-    case kOpRsub:
-      opcode = kThumb2RsubRRI8M;
-      alt_opcode = kThumb2RsubRRR;
-      break;
-    case kOpAdc:
-      opcode = kThumb2AdcRRI8M;
-      alt_opcode = kThumb2AdcRRR;
-      break;
-    case kOpSbc:
-      opcode = kThumb2SbcRRI8M;
-      alt_opcode = kThumb2SbcRRR;
-      break;
-    case kOpOr:
-      opcode = kThumb2OrrRRI8M;
-      alt_opcode = kThumb2OrrRRR;
-      if (mod_imm < 0) {
-        mod_imm = ModifiedImmediate(~value);
-        if (mod_imm >= 0) {
-          opcode = kThumb2OrnRRI8M;
-        }
-      }
-      break;
-    case kOpAnd:
-      if (mod_imm < 0) {
-        mod_imm = ModifiedImmediate(~value);
-        if (mod_imm >= 0) {
-          return NewLIR3(kThumb2BicRRI8M, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
-        }
-      }
-      opcode = kThumb2AndRRI8M;
-      alt_opcode = kThumb2AndRRR;
-      break;
-    case kOpXor:
-      opcode = kThumb2EorRRI8M;
-      alt_opcode = kThumb2EorRRR;
-      break;
-    case kOpMul:
-      // TUNING: power of 2, shift & add
-      mod_imm = -1;
-      alt_opcode = kThumb2MulRRR;
-      break;
-    case kOpCmp: {
-      LIR* res;
-      if (mod_imm >= 0) {
-        res = NewLIR2(kThumb2CmpRI8M, r_src1.GetReg(), mod_imm);
-      } else {
-        mod_imm = ModifiedImmediate(-value);
-        if (mod_imm >= 0) {
-          res = NewLIR2(kThumb2CmnRI8M, r_src1.GetReg(), mod_imm);
-        } else {
-          RegStorage r_tmp = AllocTemp();
-          res = LoadConstant(r_tmp, value);
-          OpRegReg(kOpCmp, r_src1, r_tmp);
-          FreeTemp(r_tmp);
-        }
-      }
-      return res;
-    }
-    default:
-      LOG(FATAL) << "Bad opcode: " << op;
-  }
-
-  if (mod_imm >= 0) {
-    return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
-  } else {
-    RegStorage r_scratch = AllocTemp();
-    LoadConstant(r_scratch, value);
-    LIR* res;
-    if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
-      res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
-    else
-      res = NewLIR3(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
-    FreeTemp(r_scratch);
-    return res;
-  }
-}
-
-/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
-LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
-  bool neg = (value < 0);
-  int32_t abs_value = (neg) ? -value : value;
-  bool short_form = (((abs_value & 0xff) == abs_value) && r_dest_src1.Low8());
-  ArmOpcode opcode = kThumbBkpt;
-  switch (op) {
-    case kOpAdd:
-      if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
-        DCHECK_EQ((value & 0x3), 0);
-        return NewLIR1(kThumbAddSpI7, value >> 2);
-      } else if (short_form) {
-        opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
-      }
-      break;
-    case kOpSub:
-      if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
-        DCHECK_EQ((value & 0x3), 0);
-        return NewLIR1(kThumbSubSpI7, value >> 2);
-      } else if (short_form) {
-        opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
-      }
-      break;
-    case kOpCmp:
-      if (!neg && short_form) {
-        opcode = kThumbCmpRI8;
-      } else {
-        short_form = false;
-      }
-      break;
-    default:
-      /* Punt to OpRegRegImm - if bad case catch it there */
-      short_form = false;
-      break;
-  }
-  if (short_form) {
-    return NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
-  } else {
-    return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
-  }
-}
-
-LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
-  LIR* res = nullptr;
-  int32_t val_lo = Low32Bits(value);
-  int32_t val_hi = High32Bits(value);
-  if (r_dest.IsFloat()) {
-    DCHECK(!r_dest.IsPair());
-    if ((val_lo == 0) && (val_hi == 0)) {
-      // TODO: we need better info about the target CPU.  a vector exclusive or
-      //       would probably be better here if we could rely on its existance.
-      // Load an immediate +2.0 (which encodes to 0)
-      NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), 0);
-      // +0.0 = +2.0 - +2.0
-      res = NewLIR3(kThumb2Vsubd, r_dest.GetReg(), r_dest.GetReg(), r_dest.GetReg());
-    } else {
-      int encoded_imm = EncodeImmDouble(value);
-      if (encoded_imm >= 0) {
-        res = NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), encoded_imm);
-      }
-    }
-  } else {
-    // NOTE: Arm32 assumption here.
-    DCHECK(r_dest.IsPair());
-    if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
-      res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
-      LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
-    }
-  }
-  if (res == nullptr) {
-    // No short form - load from the literal pool.
-    LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-    if (data_target == nullptr) {
-      data_target = AddWideData(&literal_list_, val_lo, val_hi);
-    }
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-    if (r_dest.IsFloat()) {
-      res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
-                   r_dest.GetReg(), rs_r15pc.GetReg(), 0, 0, 0, data_target);
-    } else {
-      DCHECK(r_dest.IsPair());
-      res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
-                   r_dest.GetLowReg(), r_dest.GetHighReg(), rs_r15pc.GetReg(), 0, 0, data_target);
-    }
-    AppendLIR(res);
-  }
-  return res;
-}
-
-int ArmMir2Lir::EncodeShift(int code, int amount) {
-  return ((amount & 0x1f) << 2) | code;
-}
-
-LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
-                                 int scale, OpSize size) {
-  bool all_low_regs = r_base.Low8() && r_index.Low8() && r_dest.Low8();
-  LIR* load;
-  ArmOpcode opcode = kThumbBkpt;
-  bool thumb_form = (all_low_regs && (scale == 0));
-  RegStorage reg_ptr;
-
-  if (r_dest.IsFloat()) {
-    if (r_dest.IsSingle()) {
-      DCHECK((size == k32) || (size == kSingle) || (size == kReference));
-      opcode = kThumb2Vldrs;
-      size = kSingle;
-    } else {
-      DCHECK(r_dest.IsDouble());
-      DCHECK((size == k64) || (size == kDouble));
-      opcode = kThumb2Vldrd;
-      size = kDouble;
-    }
-  } else {
-    if (size == kSingle)
-      size = k32;
-  }
-
-  switch (size) {
-    case kDouble:  // fall-through
-    // Intentional fall-though.
-    case kSingle:
-      reg_ptr = AllocTemp();
-      if (scale) {
-        NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
-                EncodeShift(kArmLsl, scale));
-      } else {
-        OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
-      }
-      load = NewLIR3(opcode, r_dest.GetReg(), reg_ptr.GetReg(), 0);
-      FreeTemp(reg_ptr);
-      return load;
-    case k32:
-    // Intentional fall-though.
-    case kReference:
-      opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
-      break;
-    case kUnsignedHalf:
-      opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
-      break;
-    case kSignedHalf:
-      opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
-      break;
-    case kUnsignedByte:
-      opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
-      break;
-    case kSignedByte:
-      opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
-      break;
-    default:
-      LOG(FATAL) << "Bad size: " << size;
-  }
-  if (thumb_form)
-    load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
-  else
-    load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
-
-  return load;
-}
-
-LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
-                                  int scale, OpSize size) {
-  bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
-  LIR* store = nullptr;
-  ArmOpcode opcode = kThumbBkpt;
-  bool thumb_form = (all_low_regs && (scale == 0));
-  RegStorage reg_ptr;
-
-  if (r_src.IsFloat()) {
-    if (r_src.IsSingle()) {
-      DCHECK((size == k32) || (size == kSingle) || (size == kReference));
-      opcode = kThumb2Vstrs;
-      size = kSingle;
-    } else {
-      DCHECK(r_src.IsDouble());
-      DCHECK((size == k64) || (size == kDouble));
-      DCHECK_EQ((r_src.GetReg() & 0x1), 0);
-      opcode = kThumb2Vstrd;
-      size = kDouble;
-    }
-  } else {
-    if (size == kSingle)
-      size = k32;
-  }
-
-  switch (size) {
-    case kDouble:  // fall-through
-    // Intentional fall-though.
-    case kSingle:
-      reg_ptr = AllocTemp();
-      if (scale) {
-        NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
-                EncodeShift(kArmLsl, scale));
-      } else {
-        OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
-      }
-      store = NewLIR3(opcode, r_src.GetReg(), reg_ptr.GetReg(), 0);
-      FreeTemp(reg_ptr);
-      return store;
-    case k32:
-    // Intentional fall-though.
-    case kReference:
-      opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
-      break;
-    case kUnsignedHalf:
-    // Intentional fall-though.
-    case kSignedHalf:
-      opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
-      break;
-    case kUnsignedByte:
-    // Intentional fall-though.
-    case kSignedByte:
-      opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
-      break;
-    default:
-      LOG(FATAL) << "Bad size: " << size;
-  }
-  if (thumb_form)
-    store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
-  else
-    store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
-
-  return store;
-}
-
-// Helper function for LoadBaseDispBody()/StoreBaseDispBody().
-LIR* ArmMir2Lir::LoadStoreUsingInsnWithOffsetImm8Shl2(ArmOpcode opcode, RegStorage r_base,
-                                                      int displacement, RegStorage r_src_dest,
-                                                      RegStorage r_work) {
-  DCHECK_ALIGNED(displacement, 4);
-  constexpr int kOffsetMask = 0xff << 2;
-  int encoded_disp = (displacement & kOffsetMask) >> 2;  // Within range of the instruction.
-  RegStorage r_ptr = r_base;
-  if ((displacement & ~kOffsetMask) != 0) {
-    r_ptr = r_work.Valid() ? r_work : AllocTemp();
-    // Add displacement & ~kOffsetMask to base, it's a single instruction for up to +-256KiB.
-    OpRegRegImm(kOpAdd, r_ptr, r_base, displacement & ~kOffsetMask);
-  }
-  LIR* lir = nullptr;
-  if (!r_src_dest.IsPair()) {
-    lir = NewLIR3(opcode, r_src_dest.GetReg(), r_ptr.GetReg(), encoded_disp);
-  } else {
-    lir = NewLIR4(opcode, r_src_dest.GetLowReg(), r_src_dest.GetHighReg(), r_ptr.GetReg(),
-                  encoded_disp);
-  }
-  if ((displacement & ~kOffsetMask) != 0 && !r_work.Valid()) {
-    FreeTemp(r_ptr);
-  }
-  return lir;
-}
-
-/*
- * Load value from base + displacement.  Optionally perform null check
- * on base (which must have an associated s_reg and MIR).  If not
- * performing null check, incoming MIR can be null.
- */
-LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
-                                  OpSize size) {
-  LIR* load = nullptr;
-  ArmOpcode opcode16 = kThumbBkpt;  // 16-bit Thumb opcode.
-  ArmOpcode opcode32 = kThumbBkpt;  // 32-bit Thumb2 opcode.
-  bool short_form = false;
-  bool all_low = r_dest.Is32Bit() && r_base.Low8() && r_dest.Low8();
-  int scale = 0;  // Used for opcode16 and some indexed loads.
-  bool already_generated = false;
-  switch (size) {
-    case kDouble:
-    // Intentional fall-though.
-    case k64:
-      if (r_dest.IsFloat()) {
-        DCHECK(!r_dest.IsPair());
-        load = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vldrd, r_base, displacement, r_dest);
-      } else {
-        DCHECK(r_dest.IsPair());
-        // Use the r_dest.GetLow() for the temporary pointer if needed.
-        load = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2LdrdI8, r_base, displacement, r_dest,
-                                                    r_dest.GetLow());
-      }
-      already_generated = true;
-      break;
-    case kSingle:
-    // Intentional fall-though.
-    case k32:
-    // Intentional fall-though.
-    case kReference:
-      if (r_dest.IsFloat()) {
-        DCHECK(r_dest.IsSingle());
-        load = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vldrs, r_base, displacement, r_dest);
-        already_generated = true;
-        break;
-      }
-      DCHECK_ALIGNED(displacement, 4);
-      scale = 2;
-      if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
-          (displacement >= 0)) {
-        short_form = true;
-        opcode16 = kThumbLdrPcRel;
-      } else if (r_dest.Low8() && (r_base == rs_rARM_SP) && (displacement <= 1020) &&
-                 (displacement >= 0)) {
-        short_form = true;
-        opcode16 = kThumbLdrSpRel;
-      } else {
-        short_form = all_low && (displacement >> (5 + scale)) == 0;
-        opcode16 = kThumbLdrRRI5;
-        opcode32 = kThumb2LdrRRI12;
-      }
-      break;
-    case kUnsignedHalf:
-      DCHECK_ALIGNED(displacement, 2);
-      scale = 1;
-      short_form = all_low && (displacement >> (5 + scale)) == 0;
-      opcode16 = kThumbLdrhRRI5;
-      opcode32 = kThumb2LdrhRRI12;
-      break;
-    case kSignedHalf:
-      DCHECK_ALIGNED(displacement, 2);
-      scale = 1;
-      DCHECK_EQ(opcode16, kThumbBkpt);  // Not available.
-      opcode32 = kThumb2LdrshRRI12;
-      break;
-    case kUnsignedByte:
-      DCHECK_EQ(scale, 0);  // Keep scale = 0.
-      short_form = all_low && (displacement >> (5 + scale)) == 0;
-      opcode16 = kThumbLdrbRRI5;
-      opcode32 = kThumb2LdrbRRI12;
-      break;
-    case kSignedByte:
-      DCHECK_EQ(scale, 0);  // Keep scale = 0.
-      DCHECK_EQ(opcode16, kThumbBkpt);  // Not available.
-      opcode32 = kThumb2LdrsbRRI12;
-      break;
-    default:
-      LOG(FATAL) << "Bad size: " << size;
-  }
-
-  if (!already_generated) {
-    if (short_form) {
-      load = NewLIR3(opcode16, r_dest.GetReg(), r_base.GetReg(), displacement >> scale);
-    } else if ((displacement >> 12) == 0) {  // Thumb2 form.
-      load = NewLIR3(opcode32, r_dest.GetReg(), r_base.GetReg(), displacement);
-    } else if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) &&
-        InexpensiveConstantInt(displacement & ~0x00000fff, Instruction::ADD_INT)) {
-      // In this case, using LoadIndexed would emit 3 insns (movw+movt+ldr) but we can
-      // actually do it in two because we know that the kOpAdd is a single insn. On the
-      // other hand, we introduce an extra dependency, so this is not necessarily faster.
-      if (opcode16 != kThumbBkpt && r_dest.Low8() &&
-          InexpensiveConstantInt(displacement & ~(0x1f << scale), Instruction::ADD_INT)) {
-        // We can use the 16-bit Thumb opcode for the load.
-        OpRegRegImm(kOpAdd, r_dest, r_base, displacement & ~(0x1f << scale));
-        load = NewLIR3(opcode16, r_dest.GetReg(), r_dest.GetReg(), (displacement >> scale) & 0x1f);
-      } else {
-        DCHECK_NE(opcode32, kThumbBkpt);
-        OpRegRegImm(kOpAdd, r_dest, r_base, displacement & ~0x00000fff);
-        load = NewLIR3(opcode32, r_dest.GetReg(), r_dest.GetReg(), displacement & 0x00000fff);
-      }
-    } else {
-      if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) ||
-          (scale != 0 && InexpensiveConstantInt(displacement, Instruction::CONST))) {
-        scale = 0;  // Prefer unscaled indexing if the same number of insns.
-      }
-      RegStorage reg_offset = AllocTemp();
-      LoadConstant(reg_offset, displacement >> scale);
-      DCHECK(!r_dest.IsFloat());
-      load = LoadBaseIndexed(r_base, reg_offset, r_dest, scale, size);
-      FreeTemp(reg_offset);
-    }
-  }
-
-  // TODO: in future may need to differentiate Dalvik accesses w/ spills
-  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    DCHECK_EQ(r_base, rs_rARM_SP);
-    AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
-  }
-  return load;
-}
-
-LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                              OpSize size, VolatileKind is_volatile) {
-  // TODO: base this on target.
-  if (size == kWord) {
-    size = k32;
-  }
-  LIR* load;
-  if (is_volatile == kVolatile && (size == k64 || size == kDouble) &&
-      !cu_->compiler_driver->GetInstructionSetFeatures()->
-          AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()) {
-    // Only 64-bit load needs special handling.
-    // If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
-    DCHECK(!r_dest.IsFloat());  // See RegClassForFieldLoadSave().
-    // Use LDREXD for the atomic load. (Expect displacement > 0, don't optimize for == 0.)
-    RegStorage r_ptr = AllocTemp();
-    OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
-    load = NewLIR3(kThumb2Ldrexd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_ptr.GetReg());
-    FreeTemp(r_ptr);
-  } else {
-    load = LoadBaseDispBody(r_base, displacement, r_dest, size);
-  }
-
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    GenMemBarrier(kLoadAny);
-  }
-
-  return load;
-}
-
-
-LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
-                                   OpSize size) {
-  LIR* store = nullptr;
-  ArmOpcode opcode16 = kThumbBkpt;  // 16-bit Thumb opcode.
-  ArmOpcode opcode32 = kThumbBkpt;  // 32-bit Thumb2 opcode.
-  bool short_form = false;
-  bool all_low = r_src.Is32Bit() && r_base.Low8() && r_src.Low8();
-  int scale = 0;  // Used for opcode16 and some indexed loads.
-  bool already_generated = false;
-  switch (size) {
-    case kDouble:
-    // Intentional fall-though.
-    case k64:
-      if (r_src.IsFloat()) {
-        // Note: If the register is retrieved by register allocator, it should never be a pair.
-        // But some functions in mir2lir assume 64-bit registers are 32-bit register pairs.
-        // TODO: Rework Mir2Lir::LoadArg() and Mir2Lir::LoadArgDirect().
-        if (r_src.IsPair()) {
-          r_src = As64BitFloatReg(r_src);
-        }
-        DCHECK(!r_src.IsPair());
-        store = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vstrd, r_base, displacement, r_src);
-      } else {
-        DCHECK(r_src.IsPair());
-        store = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2StrdI8, r_base, displacement, r_src);
-      }
-      already_generated = true;
-      break;
-    case kSingle:
-    // Intentional fall-through.
-    case k32:
-    // Intentional fall-through.
-    case kReference:
-      if (r_src.IsFloat()) {
-        DCHECK(r_src.IsSingle());
-        store = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vstrs, r_base, displacement, r_src);
-        already_generated = true;
-        break;
-      }
-      DCHECK_ALIGNED(displacement, 4);
-      scale = 2;
-      if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
-        short_form = true;
-        opcode16 = kThumbStrSpRel;
-      } else {
-        short_form = all_low && (displacement >> (5 + scale)) == 0;
-        opcode16 = kThumbStrRRI5;
-        opcode32 = kThumb2StrRRI12;
-      }
-      break;
-    case kUnsignedHalf:
-    case kSignedHalf:
-      DCHECK_ALIGNED(displacement, 2);
-      scale = 1;
-      short_form = all_low && (displacement >> (5 + scale)) == 0;
-      opcode16 = kThumbStrhRRI5;
-      opcode32 = kThumb2StrhRRI12;
-      break;
-    case kUnsignedByte:
-    case kSignedByte:
-      DCHECK_EQ(scale, 0);  // Keep scale = 0.
-      short_form = all_low && (displacement >> (5 + scale)) == 0;
-      opcode16 = kThumbStrbRRI5;
-      opcode32 = kThumb2StrbRRI12;
-      break;
-    default:
-      LOG(FATAL) << "Bad size: " << size;
-  }
-  if (!already_generated) {
-    if (short_form) {
-      store = NewLIR3(opcode16, r_src.GetReg(), r_base.GetReg(), displacement >> scale);
-    } else if ((displacement >> 12) == 0) {
-      store = NewLIR3(opcode32, r_src.GetReg(), r_base.GetReg(), displacement);
-    } else if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) &&
-        InexpensiveConstantInt(displacement & ~0x00000fff, Instruction::ADD_INT)) {
-      // In this case, using StoreIndexed would emit 3 insns (movw+movt+str) but we can
-      // actually do it in two because we know that the kOpAdd is a single insn. On the
-      // other hand, we introduce an extra dependency, so this is not necessarily faster.
-      RegStorage r_scratch = AllocTemp();
-      if (opcode16 != kThumbBkpt && r_src.Low8() && r_scratch.Low8() &&
-          InexpensiveConstantInt(displacement & ~(0x1f << scale), Instruction::ADD_INT)) {
-        // We can use the 16-bit Thumb opcode for the load.
-        OpRegRegImm(kOpAdd, r_scratch, r_base, displacement & ~(0x1f << scale));
-        store = NewLIR3(opcode16, r_src.GetReg(), r_scratch.GetReg(),
-                        (displacement >> scale) & 0x1f);
-      } else {
-        DCHECK_NE(opcode32, kThumbBkpt);
-        OpRegRegImm(kOpAdd, r_scratch, r_base, displacement & ~0x00000fff);
-        store = NewLIR3(opcode32, r_src.GetReg(), r_scratch.GetReg(), displacement & 0x00000fff);
-      }
-      FreeTemp(r_scratch);
-    } else {
-      if (!InexpensiveConstantInt(displacement >> scale, Instruction::CONST) ||
-          (scale != 0 && InexpensiveConstantInt(displacement, Instruction::CONST))) {
-        scale = 0;  // Prefer unscaled indexing if the same number of insns.
-      }
-      RegStorage r_scratch = AllocTemp();
-      LoadConstant(r_scratch, displacement >> scale);
-      DCHECK(!r_src.IsFloat());
-      store = StoreBaseIndexed(r_base, r_scratch, r_src, scale, size);
-      FreeTemp(r_scratch);
-    }
-  }
-
-  // TODO: In future, may need to differentiate Dalvik & spill accesses
-  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    DCHECK_EQ(r_base, rs_rARM_SP);
-    AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
-  }
-  return store;
-}
-
-LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                               OpSize size, VolatileKind is_volatile) {
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    // Ensure that prior accesses become visible to other threads first.
-    GenMemBarrier(kAnyStore);
-  }
-
-  LIR* null_ck_insn;
-  if (is_volatile == kVolatile && (size == k64 || size == kDouble) &&
-      !cu_->compiler_driver->GetInstructionSetFeatures()->
-          AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()) {
-    // Only 64-bit store needs special handling.
-    // If the cpu supports LPAE, aligned STRD is atomic - fall through to StoreBaseDisp().
-    // Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
-    DCHECK(!r_src.IsFloat());  // See RegClassForFieldLoadSave().
-    RegStorage r_ptr = AllocTemp();
-    OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
-    LIR* fail_target = NewLIR0(kPseudoTargetLabel);
-    // We have only 5 temporary registers available and if r_base, r_src and r_ptr already
-    // take 4, we can't directly allocate 2 more for LDREXD temps. In that case clobber r_ptr
-    // in LDREXD and recalculate it from r_base.
-    RegStorage r_temp = AllocTemp();
-    RegStorage r_temp_high = AllocTemp(false);  // We may not have another temp.
-    if (r_temp_high.Valid()) {
-      null_ck_insn = NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_temp_high.GetReg(), r_ptr.GetReg());
-      FreeTemp(r_temp_high);
-      FreeTemp(r_temp);
-    } else {
-      // If we don't have another temp, clobber r_ptr in LDREXD and reload it.
-      null_ck_insn = NewLIR3(kThumb2Ldrexd, r_temp.GetReg(), r_ptr.GetReg(), r_ptr.GetReg());
-      FreeTemp(r_temp);  // May need the temp for kOpAdd.
-      OpRegRegImm(kOpAdd, r_ptr, r_base, displacement);
-    }
-    NewLIR4(kThumb2Strexd, r_temp.GetReg(), r_src.GetLowReg(), r_src.GetHighReg(), r_ptr.GetReg());
-    OpCmpImmBranch(kCondNe, r_temp, 0, fail_target);
-    FreeTemp(r_ptr);
-  } else {
-    // TODO: base this on target.
-    if (size == kWord) {
-      size = k32;
-    }
-
-    null_ck_insn = StoreBaseDispBody(r_base, displacement, r_src, size);
-  }
-
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    // Preserve order with respect to any subsequent volatile loads.
-    // We need StoreLoad, but that generally requires the most expensive barrier.
-    GenMemBarrier(kAnyAny);
-  }
-
-  return null_ck_insn;
-}
-
-LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
-  int opcode;
-  DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
-  if (r_dest.IsDouble()) {
-    opcode = kThumb2Vmovd;
-  } else {
-    if (r_dest.IsSingle()) {
-      opcode = r_src.IsSingle() ? kThumb2Vmovs : kThumb2Fmsr;
-    } else {
-      DCHECK(r_src.IsSingle());
-      opcode = kThumb2Fmrs;
-    }
-  }
-  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
-  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
-    res->flags.is_nop = true;
-  }
-  return res;
-}
-
-LIR* ArmMir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
-                       RegStorage r_base ATTRIBUTE_UNUSED,
-                       int disp ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpMem for Arm";
-  UNREACHABLE();
-}
-
-LIR* ArmMir2Lir::InvokeTrampoline(OpKind op,
-                                  RegStorage r_tgt,
-                                  // The address of the trampoline is already loaded into r_tgt.
-                                  QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
-  return OpReg(op, r_tgt);
-}
-
-size_t ArmMir2Lir::GetInstructionOffset(LIR* lir) {
-  uint64_t check_flags = GetTargetInstFlags(lir->opcode);
-  DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
-  size_t offset = (check_flags & IS_TERTIARY_OP) ? lir->operands[2] : 0;
-
-  if (check_flags & SCALED_OFFSET_X2) {
-    offset = offset * 2;
-  } else if (check_flags & SCALED_OFFSET_X4) {
-    offset = offset * 4;
-  }
-  return offset;
-}
-
-void ArmMir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) {
-  // Start with the default counts.
-  Mir2Lir::CountRefs(core_counts, fp_counts, num_regs);
-
-  if (pc_rel_temp_ != nullptr) {
-    // Now, if the dex cache array base temp is used only once outside any loops (weight = 1),
-    // avoid the promotion, otherwise boost the weight by factor 3 because the full PC-relative
-    // load sequence is 4 instructions long and by promoting the PC base we save up to 3
-    // instructions per use.
-    int p_map_idx = SRegToPMap(pc_rel_temp_->s_reg_low);
-    if (core_counts[p_map_idx].count == 1) {
-      core_counts[p_map_idx].count = 0;
-    } else {
-      core_counts[p_map_idx].count *= 3;
-    }
-  }
-}
-
-void ArmMir2Lir::DoPromotion() {
-  if (CanUseOpPcRelDexCacheArrayLoad()) {
-    pc_rel_temp_ = mir_graph_->GetNewCompilerTemp(kCompilerTempBackend, false);
-  }
-
-  Mir2Lir::DoPromotion();
-
-  if (pc_rel_temp_ != nullptr) {
-    // Now, if the dex cache array base temp is promoted, remember the register but
-    // always remove the temp's stack location to avoid unnecessarily bloating the stack.
-    dex_cache_arrays_base_reg_ = mir_graph_->reg_location_[pc_rel_temp_->s_reg_low].reg;
-    DCHECK(!dex_cache_arrays_base_reg_.Valid() || !dex_cache_arrays_base_reg_.IsFloat());
-    mir_graph_->RemoveLastCompilerTemp(kCompilerTempBackend, false, pc_rel_temp_);
-    pc_rel_temp_ = nullptr;
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
deleted file mode 100644
index 2253d10..0000000
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_ARM64_ARM64_LIR_H_
-#define ART_COMPILER_DEX_QUICK_ARM64_ARM64_LIR_H_
-
-#include "dex/compiler_enums.h"
-#include "dex/reg_location.h"
-#include "dex/reg_storage.h"
-
-namespace art {
-
-/*
- * Runtime register usage conventions.
- *
- * r0     : As in C/C++ w0 is 32-bit return register and x0 is 64-bit.
- * r0-r7  : Argument registers in both Dalvik and C/C++ conventions.
- *          However, for Dalvik->Dalvik calls we'll pass the target's Method*
- *          pointer in x0 as a hidden arg0. Otherwise used as codegen scratch
- *          registers.
- * r8-r15 : Caller save registers (used as temporary registers).
- * r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
- *          the linker, by the trampolines and other stubs (the backend uses
- *          these as temporary registers).
- * r18    : Caller save register (used as temporary register).
- * r19    : (rxSELF) is reserved (pointer to thread-local storage).
- * r20-r29: Callee save registers (promotion targets).
- * r30    : (lr) is reserved (the link register).
- * rsp    : (sp) is reserved (the stack pointer).
- * rzr    : (zr) is reserved (the zero register).
- *
- * 19 core temps that codegen can use (r0-r18).
- * 9 core registers that can be used for promotion.
- *
- * Floating-point registers
- * v0-v31
- *
- * v0     : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
- *          This is analogous to the C/C++ (hard-float) calling convention.
- * v0-v7  : Floating-point argument registers in both Dalvik and C/C++ conventions.
- *          Also used as temporary and codegen scratch registers.
- *
- * v0-v7 and v16-v31 : trashed across C calls.
- * v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
- *
- * v16-v31: Used as codegen temp/scratch.
- * v8-v15 : Can be used for promotion.
- *
- * Calling convention (Hard-float)
- *     o On a call to a Dalvik method, pass target's Method* in x0
- *     o r1-r7, v0-v7 will be used for the first 7+8 arguments
- *     o Arguments which cannot be put in registers are placed in appropriate
- *       out slots by the caller.
- *     o Maintain a 16-byte stack alignment
- *
- *  Stack frame diagram (stack grows down, higher addresses at top):
- *
- * +--------------------------------------------+
- * | IN[ins-1]                                  |  {Note: resides in caller's frame}
- * |       .                                    |
- * | IN[0]                                      |
- * | caller's method ArtMethod*                 |  {Pointer sized reference}
- * +============================================+  {Note: start of callee's frame}
- * | spill region                               |  {variable sized - will include lr if non-leaf}
- * +--------------------------------------------+
- * |   ...filler word...                        |  {Note: used as 2nd word of V[locals-1] if long}
- * +--------------------------------------------+
- * | V[locals-1]                                |
- * | V[locals-2]                                |
- * |      .                                     |
- * |      .                                     |
- * | V[1]                                       |
- * | V[0]                                       |
- * +--------------------------------------------+
- * |   0 to 3 words padding                     |
- * +--------------------------------------------+
- * | OUT[outs-1]                                |
- * | OUT[outs-2]                                |
- * |       .                                    |
- * | OUT[0]                                     |
- * | current method ArtMethod*                  | <<== sp w/ 16-byte alignment
- * +============================================+
- */
-
-// First FP callee save.
-#define A64_FP_CALLEE_SAVE_BASE 8
-
-// Temporary macros, used to mark code which wants to distinguish betweek zr/sp.
-#define A64_REG_IS_SP(reg_num) ((reg_num) == rwsp || (reg_num) == rsp)
-#define A64_REG_IS_ZR(reg_num) ((reg_num) == rwzr || (reg_num) == rxzr)
-#define A64_REGSTORAGE_IS_SP_OR_ZR(rs) (((rs).GetRegNum() & 0x1f) == 0x1f)
-
-enum A64ResourceEncodingPos {
-  kA64GPReg0   = 0,
-  kA64RegLR    = 30,
-  kA64RegSP    = 31,
-  kA64FPReg0   = 32,
-  kA64RegEnd   = 64,
-};
-
-#define IS_SIGNED_IMM(size, value) \
-  ((value) >= -(1 << ((size) - 1)) && (value) < (1 << ((size) - 1)))
-#define IS_SIGNED_IMM7(value) IS_SIGNED_IMM(7, value)
-#define IS_SIGNED_IMM9(value) IS_SIGNED_IMM(9, value)
-#define IS_SIGNED_IMM12(value) IS_SIGNED_IMM(12, value)
-#define IS_SIGNED_IMM14(value) IS_SIGNED_IMM(14, value)
-#define IS_SIGNED_IMM19(value) IS_SIGNED_IMM(19, value)
-#define IS_SIGNED_IMM21(value) IS_SIGNED_IMM(21, value)
-#define IS_SIGNED_IMM26(value) IS_SIGNED_IMM(26, value)
-
-// Quick macro used to define the registers.
-#define A64_REGISTER_CODE_LIST(R) \
-  R(0)  R(1)  R(2)  R(3)  R(4)  R(5)  R(6)  R(7) \
-  R(8)  R(9)  R(10) R(11) R(12) R(13) R(14) R(15) \
-  R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-  R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
-// Registers (integer) values.
-enum A64NativeRegisterPool {  // private marker to avoid generate-operator-out.py from processing.
-#  define A64_DEFINE_REGISTERS(nr) \
-    rw##nr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | nr, \
-    rx##nr = RegStorage::k64BitSolo | RegStorage::kCoreRegister | nr, \
-    rf##nr = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | nr, \
-    rd##nr = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | nr,
-  A64_REGISTER_CODE_LIST(A64_DEFINE_REGISTERS)
-#undef A64_DEFINE_REGISTERS
-
-  rxzr = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0x3f,
-  rwzr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0x3f,
-  rsp = rx31,
-  rwsp = rw31,
-
-  // Aliases which are not defined in "ARM Architecture Reference, register names".
-  rxIP0 = rx16,
-  rxIP1 = rx17,
-  rxSELF = rx19,
-  rxLR = rx30,
-  /*
-   * FIXME: It's a bit awkward to define both 32 and 64-bit views of these - we'll only ever use
-   * the 64-bit view. However, for now we'll define a 32-bit view to keep these from being
-   * allocated as 32-bit temp registers.
-   */
-  rwIP0 = rw16,
-  rwIP1 = rw17,
-  rwSELF = rw19,
-  rwLR = rw30,
-};
-
-#define A64_DEFINE_REGSTORAGES(nr) \
-  constexpr RegStorage rs_w##nr(RegStorage::kValid | rw##nr); \
-  constexpr RegStorage rs_x##nr(RegStorage::kValid | rx##nr); \
-  constexpr RegStorage rs_f##nr(RegStorage::kValid | rf##nr); \
-  constexpr RegStorage rs_d##nr(RegStorage::kValid | rd##nr);
-A64_REGISTER_CODE_LIST(A64_DEFINE_REGSTORAGES)
-#undef A64_DEFINE_REGSTORAGES
-
-constexpr RegStorage rs_xzr(RegStorage::kValid | rxzr);
-constexpr RegStorage rs_wzr(RegStorage::kValid | rwzr);
-constexpr RegStorage rs_xIP0(RegStorage::kValid | rxIP0);
-constexpr RegStorage rs_wIP0(RegStorage::kValid | rwIP0);
-constexpr RegStorage rs_xIP1(RegStorage::kValid | rxIP1);
-constexpr RegStorage rs_wIP1(RegStorage::kValid | rwIP1);
-// Reserved registers.
-constexpr RegStorage rs_xSELF(RegStorage::kValid | rxSELF);
-constexpr RegStorage rs_sp(RegStorage::kValid | rsp);
-constexpr RegStorage rs_xLR(RegStorage::kValid | rxLR);
-// TODO: eliminate the need for these.
-constexpr RegStorage rs_wSELF(RegStorage::kValid | rwSELF);
-constexpr RegStorage rs_wsp(RegStorage::kValid | rwsp);
-constexpr RegStorage rs_wLR(RegStorage::kValid | rwLR);
-
-// RegisterLocation templates return values (following the hard-float calling convention).
-const RegLocation a64_loc_c_return =
-    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_w0, INVALID_SREG, INVALID_SREG};
-const RegLocation a64_loc_c_return_ref =
-    {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1, rs_x0, INVALID_SREG, INVALID_SREG};
-const RegLocation a64_loc_c_return_wide =
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rs_x0, INVALID_SREG, INVALID_SREG};
-const RegLocation a64_loc_c_return_float =
-    {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, rs_f0, INVALID_SREG, INVALID_SREG};
-const RegLocation a64_loc_c_return_double =
-    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, rs_d0, INVALID_SREG, INVALID_SREG};
-
-/**
- * @brief Shift-type to be applied to a register via EncodeShift().
- */
-enum A64ShiftEncodings {
-  kA64Lsl = 0x0,
-  kA64Lsr = 0x1,
-  kA64Asr = 0x2,
-  kA64Ror = 0x3
-};
-
-/**
- * @brief Extend-type to be applied to a register via EncodeExtend().
- */
-enum A64RegExtEncodings {
-  kA64Uxtb = 0x0,
-  kA64Uxth = 0x1,
-  kA64Uxtw = 0x2,
-  kA64Uxtx = 0x3,
-  kA64Sxtb = 0x4,
-  kA64Sxth = 0x5,
-  kA64Sxtw = 0x6,
-  kA64Sxtx = 0x7
-};
-
-#define ENCODE_NO_SHIFT (EncodeShift(kA64Lsl, 0))
-#define ENCODE_NO_EXTEND (EncodeExtend(kA64Uxtx, 0))
-/*
- * The following enum defines the list of supported A64 instructions by the
- * assembler. Their corresponding EncodingMap positions will be defined in
- * assemble_arm64.cc.
- */
-enum A64Opcode {
-  kA64First = 0,
-  kA64Adc3rrr = kA64First,  // adc [00011010000] rm[20-16] [000000] rn[9-5] rd[4-0].
-  kA64Add4RRdT,      // add [s001000100] imm_12[21-10] rn[9-5] rd[4-0].
-  kA64Add4rrro,      // add [00001011000] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
-  kA64Add4RRre,      // add [00001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
-  kA64Adr2xd,        // adr [0] immlo[30-29] [10000] immhi[23-5] rd[4-0].
-  kA64Adrp2xd,       // adrp [1] immlo[30-29] [10000] immhi[23-5] rd[4-0].
-  kA64And3Rrl,       // and [00010010] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
-  kA64And4rrro,      // and [00001010] shift[23-22] [N=0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
-  kA64Asr3rrd,       // asr [0001001100] immr[21-16] imms[15-10] rn[9-5] rd[4-0].
-  kA64Asr3rrr,       // asr alias of "sbfm arg0, arg1, arg2, {#31/#63}".
-  kA64B2ct,          // b.cond [01010100] imm_19[23-5] [0] cond[3-0].
-  kA64Blr1x,         // blr [1101011000111111000000] rn[9-5] [00000].
-  kA64Br1x,          // br  [1101011000011111000000] rn[9-5] [00000].
-  kA64Bl1t,          // bl  [100101] imm26[25-0].
-  kA64Brk1d,         // brk [11010100001] imm_16[20-5] [00000].
-  kA64B1t,           // b   [00010100] offset_26[25-0].
-  kA64Cbnz2rt,       // cbnz[00110101] imm_19[23-5] rt[4-0].
-  kA64Cbz2rt,        // cbz [00110100] imm_19[23-5] rt[4-0].
-  kA64Cmn3rro,       // cmn [s0101011] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] [11111].
-  kA64Cmn3Rre,       // cmn [s0101011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] [11111].
-  kA64Cmn3RdT,       // cmn [00110001] shift[23-22] imm_12[21-10] rn[9-5] [11111].
-  kA64Cmp3rro,       // cmp [s1101011] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] [11111].
-  kA64Cmp3Rre,       // cmp [s1101011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] [11111].
-  kA64Cmp3RdT,       // cmp [01110001] shift[23-22] imm_12[21-10] rn[9-5] [11111].
-  kA64Csel4rrrc,     // csel[s0011010100] rm[20-16] cond[15-12] [00] rn[9-5] rd[4-0].
-  kA64Csinc4rrrc,    // csinc [s0011010100] rm[20-16] cond[15-12] [01] rn[9-5] rd[4-0].
-  kA64Csinv4rrrc,    // csinv [s1011010100] rm[20-16] cond[15-12] [00] rn[9-5] rd[4-0].
-  kA64Csneg4rrrc,    // csneg [s1011010100] rm[20-16] cond[15-12] [01] rn[9-5] rd[4-0].
-  kA64Dmb1B,         // dmb [11010101000000110011] CRm[11-8] [10111111].
-  kA64Eor3Rrl,       // eor [s10100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
-  kA64Eor4rrro,      // eor [s1001010] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
-  kA64Extr4rrrd,     // extr[s00100111N0] rm[20-16] imm_s[15-10] rn[9-5] rd[4-0].
-  kA64Fabs2ff,       // fabs[000111100s100000110000] rn[9-5] rd[4-0].
-  kA64Fadd3fff,      // fadd[000111100s1] rm[20-16] [001010] rn[9-5] rd[4-0].
-  kA64Fcmp1f,        // fcmp[000111100s100000001000] rn[9-5] [01000].
-  kA64Fcmp2ff,       // fcmp[000111100s1] rm[20-16] [001000] rn[9-5] [00000].
-  kA64Fcvtzs2wf,     // fcvtzs [000111100s111000000000] rn[9-5] rd[4-0].
-  kA64Fcvtzs2xf,     // fcvtzs [100111100s111000000000] rn[9-5] rd[4-0].
-  kA64Fcvt2Ss,       // fcvt   [0001111000100010110000] rn[9-5] rd[4-0].
-  kA64Fcvt2sS,       // fcvt   [0001111001100010010000] rn[9-5] rd[4-0].
-  kA64Fcvtms2ws,     // fcvtms [0001111000110000000000] rn[9-5] rd[4-0].
-  kA64Fcvtms2xS,     // fcvtms [1001111001110000000000] rn[9-5] rd[4-0].
-  kA64Fdiv3fff,      // fdiv[000111100s1] rm[20-16] [000110] rn[9-5] rd[4-0].
-  kA64Fmax3fff,      // fmax[000111100s1] rm[20-16] [010010] rn[9-5] rd[4-0].
-  kA64Fmin3fff,      // fmin[000111100s1] rm[20-16] [010110] rn[9-5] rd[4-0].
-  kA64Fmov2ff,       // fmov[000111100s100000010000] rn[9-5] rd[4-0].
-  kA64Fmov2fI,       // fmov[000111100s1] imm_8[20-13] [10000000] rd[4-0].
-  kA64Fmov2sw,       // fmov[0001111000100111000000] rn[9-5] rd[4-0].
-  kA64Fmov2Sx,       // fmov[1001111001100111000000] rn[9-5] rd[4-0].
-  kA64Fmov2ws,       // fmov[0001111001101110000000] rn[9-5] rd[4-0].
-  kA64Fmov2xS,       // fmov[1001111001101111000000] rn[9-5] rd[4-0].
-  kA64Fmul3fff,      // fmul[000111100s1] rm[20-16] [000010] rn[9-5] rd[4-0].
-  kA64Fneg2ff,       // fneg[000111100s100001010000] rn[9-5] rd[4-0].
-  kA64Frintp2ff,     // frintp [000111100s100100110000] rn[9-5] rd[4-0].
-  kA64Frintm2ff,     // frintm [000111100s100101010000] rn[9-5] rd[4-0].
-  kA64Frintn2ff,     // frintn [000111100s100100010000] rn[9-5] rd[4-0].
-  kA64Frintz2ff,     // frintz [000111100s100101110000] rn[9-5] rd[4-0].
-  kA64Fsqrt2ff,      // fsqrt[000111100s100001110000] rn[9-5] rd[4-0].
-  kA64Fsub3fff,      // fsub[000111100s1] rm[20-16] [001110] rn[9-5] rd[4-0].
-  kA64Ldrb3wXd,      // ldrb[0011100101] imm_12[21-10] rn[9-5] rt[4-0].
-  kA64Ldrb3wXx,      // ldrb[00111000011] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
-  kA64Ldrsb3rXd,     // ldrsb[001110011s] imm_12[21-10] rn[9-5] rt[4-0].
-  kA64Ldrsb3rXx,     // ldrsb[0011 1000 1s1] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
-  kA64Ldrh3wXF,      // ldrh[0111100101] imm_12[21-10] rn[9-5] rt[4-0].
-  kA64Ldrh4wXxd,     // ldrh[01111000011] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
-  kA64Ldrsh3rXF,     // ldrsh[011110011s] imm_12[21-10] rn[9-5] rt[4-0].
-  kA64Ldrsh4rXxd,    // ldrsh[011110001s1] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0]
-  kA64Ldr2fp,        // ldr [0s011100] imm_19[23-5] rt[4-0].
-  kA64Ldr2rp,        // ldr [0s011000] imm_19[23-5] rt[4-0].
-  kA64Ldr3fXD,       // ldr [1s11110100] imm_12[21-10] rn[9-5] rt[4-0].
-  kA64Ldr3rXD,       // ldr [1s111000010] imm_9[20-12] [01] rn[9-5] rt[4-0].
-  kA64Ldr4fXxG,      // ldr [1s111100011] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
-  kA64Ldr4rXxG,      // ldr [1s111000011] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
-  kA64LdrPost3rXd,   // ldr [1s111000010] imm_9[20-12] [01] rn[9-5] rt[4-0].
-  kA64Ldp4ffXD,      // ldp [0s10110101] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
-  kA64Ldp4rrXD,      // ldp [s010100101] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
-  kA64LdpPost4rrXD,  // ldp [s010100011] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
-  kA64Ldur3fXd,      // ldur[1s111100010] imm_9[20-12] [00] rn[9-5] rt[4-0].
-  kA64Ldur3rXd,      // ldur[1s111000010] imm_9[20-12] [00] rn[9-5] rt[4-0].
-  kA64Ldxr2rX,       // ldxr[1s00100001011111011111] rn[9-5] rt[4-0].
-  kA64Ldaxr2rX,      // ldaxr[1s00100001011111111111] rn[9-5] rt[4-0].
-  kA64Lsl3rrr,       // lsl [s0011010110] rm[20-16] [001000] rn[9-5] rd[4-0].
-  kA64Lsr3rrd,       // lsr alias of "ubfm arg0, arg1, arg2, #{31/63}".
-  kA64Lsr3rrr,       // lsr [s0011010110] rm[20-16] [001001] rn[9-5] rd[4-0].
-  kA64Madd4rrrr,     // madd[s0011011000] rm[20-16] [0] ra[14-10] rn[9-5] rd[4-0].
-  kA64Movk3rdM,      // mov [010100101] hw[22-21] imm_16[20-5] rd[4-0].
-  kA64Movn3rdM,      // mov [000100101] hw[22-21] imm_16[20-5] rd[4-0].
-  kA64Movz3rdM,      // mov [011100101] hw[22-21] imm_16[20-5] rd[4-0].
-  kA64Mov2rr,        // mov [00101010000] rm[20-16] [000000] [11111] rd[4-0].
-  kA64Mvn2rr,        // mov [00101010001] rm[20-16] [000000] [11111] rd[4-0].
-  kA64Mul3rrr,       // mul [00011011000] rm[20-16] [011111] rn[9-5] rd[4-0].
-  kA64Msub4rrrr,     // msub[s0011011000] rm[20-16] [1] ra[14-10] rn[9-5] rd[4-0].
-  kA64Neg3rro,       // neg alias of "sub arg0, rzr, arg1, arg2".
-  kA64Nop0,          // nop alias of "hint #0" [11010101000000110010000000011111].
-  kA64Orr3Rrl,       // orr [s01100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
-  kA64Orr4rrro,      // orr [s0101010] shift[23-22] [0] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
-  kA64Ret,           // ret [11010110010111110000001111000000].
-  kA64Rbit2rr,       // rbit [s101101011000000000000] rn[9-5] rd[4-0].
-  kA64Rev2rr,        // rev [s10110101100000000001x] rn[9-5] rd[4-0].
-  kA64Rev162rr,      // rev16[s101101011000000000001] rn[9-5] rd[4-0].
-  kA64Ror3rrr,       // ror [s0011010110] rm[20-16] [001011] rn[9-5] rd[4-0].
-  kA64Sbc3rrr,       // sbc [s0011010000] rm[20-16] [000000] rn[9-5] rd[4-0].
-  kA64Sbfm4rrdd,     // sbfm[0001001100] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
-  kA64Scvtf2fw,      // scvtf  [000111100s100010000000] rn[9-5] rd[4-0].
-  kA64Scvtf2fx,      // scvtf  [100111100s100010000000] rn[9-5] rd[4-0].
-  kA64Sdiv3rrr,      // sdiv[s0011010110] rm[20-16] [000011] rn[9-5] rd[4-0].
-  kA64Smull3xww,     // smull [10011011001] rm[20-16] [011111] rn[9-5] rd[4-0].
-  kA64Smulh3xxx,     // smulh [10011011010] rm[20-16] [011111] rn[9-5] rd[4-0].
-  kA64Stp4ffXD,      // stp [0s10110100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
-  kA64Stp4rrXD,      // stp [s010100100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
-  kA64StpPost4rrXD,  // stp [s010100010] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
-  kA64StpPre4ffXD,   // stp [0s10110110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
-  kA64StpPre4rrXD,   // stp [s010100110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
-  kA64Str3fXD,       // str [1s11110100] imm_12[21-10] rn[9-5] rt[4-0].
-  kA64Str4fXxG,      // str [1s111100001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
-  kA64Str3rXD,       // str [1s11100100] imm_12[21-10] rn[9-5] rt[4-0].
-  kA64Str4rXxG,      // str [1s111000001] rm[20-16] option[15-13] S[12-12] [10] rn[9-5] rt[4-0].
-  kA64Strb3wXd,      // strb[0011100100] imm_12[21-10] rn[9-5] rt[4-0].
-  kA64Strb3wXx,      // strb[00111000001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
-  kA64Strh3wXF,      // strh[0111100100] imm_12[21-10] rn[9-5] rt[4-0].
-  kA64Strh4wXxd,     // strh[01111000001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
-  kA64StrPost3rXd,   // str [1s111000000] imm_9[20-12] [01] rn[9-5] rt[4-0].
-  kA64Stur3fXd,      // stur[1s111100000] imm_9[20-12] [00] rn[9-5] rt[4-0].
-  kA64Stur3rXd,      // stur[1s111000000] imm_9[20-12] [00] rn[9-5] rt[4-0].
-  kA64Stxr3wrX,      // stxr[11001000000] rs[20-16] [011111] rn[9-5] rt[4-0].
-  kA64Stlxr3wrX,     // stlxr[11001000000] rs[20-16] [111111] rn[9-5] rt[4-0].
-  kA64Sub4RRdT,      // sub [s101000100] imm_12[21-10] rn[9-5] rd[4-0].
-  kA64Sub4rrro,      // sub [s1001011000] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
-  kA64Sub4RRre,      // sub [s1001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
-  kA64Subs3rRd,      // subs[s111000100] imm_12[21-10] rn[9-5] rd[4-0].
-  kA64Tst2rl,        // tst alias of "ands rzr, rn, #imm".
-  kA64Tst3rro,       // tst alias of "ands rzr, arg1, arg2, arg3".
-  kA64Tbnz3rht,      // tbnz imm_6_b5[31] [0110111] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
-  kA64Tbz3rht,       // tbz imm_6_b5[31] [0110110] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
-  kA64Ubfm4rrdd,     // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
-  kA64Last,
-  kA64NotWide = kA64First,  // 0 - Flag used to select the first instruction variant.
-  kA64Wide = 0x1000         // Flag used to select the second instruction variant.
-};
-std::ostream& operator<<(std::ostream& os, const A64Opcode& rhs);
-
-/*
- * The A64 instruction set provides two variants for many instructions. For example, "mov wN, wM"
- * and "mov xN, xM" or - for floating point instructions - "mov sN, sM" and "mov dN, dM".
- * It definitely makes sense to exploit this symmetries of the instruction set. We do this via the
- * WIDE, UNWIDE macros. For opcodes that allow it, the wide variant can be obtained by applying the
- * WIDE macro to the non-wide opcode. E.g. WIDE(kA64Sub4RRdT).
- */
-
-// Return the wide and no-wide variants of the given opcode.
-#define WIDE(op) ((A64Opcode)((op) | kA64Wide))
-#define UNWIDE(op) ((A64Opcode)((op) & ~kA64Wide))
-
-// Whether the given opcode is wide.
-#define IS_WIDE(op) (((op) & kA64Wide) != 0)
-
-enum A64OpDmbOptions {
-  kSY = 0xf,
-  kST = 0xe,
-  kISH = 0xb,
-  kISHST = 0xa,
-  kISHLD = 0x9,
-  kNSH = 0x7,
-  kNSHST = 0x6
-};
-
-// Instruction assembly field_loc kind.
-enum A64EncodingKind {
-  // All the formats below are encoded in the same way (as a kFmtBitBlt).
-  // These are grouped together, for fast handling (e.g. "if (LIKELY(fmt <= kFmtBitBlt)) ...").
-  kFmtRegW = 0,   // Word register (w) or wzr.
-  kFmtRegX,       // Extended word register (x) or xzr.
-  kFmtRegR,       // Register with same width as the instruction or zr.
-  kFmtRegWOrSp,   // Word register (w) or wsp.
-  kFmtRegXOrSp,   // Extended word register (x) or sp.
-  kFmtRegROrSp,   // Register with same width as the instruction or sp.
-  kFmtRegS,       // Single FP reg.
-  kFmtRegD,       // Double FP reg.
-  kFmtRegF,       // Single/double FP reg depending on the instruction width.
-  kFmtBitBlt,     // Bit string using end/start.
-
-  // Less likely formats.
-  kFmtUnused,     // Unused field and marks end of formats.
-  kFmtImm6Shift,  // Shift immediate, 6-bit at [31, 23..19].
-  kFmtImm21,      // Sign-extended immediate using [23..5,30..29].
-  kFmtShift,      // Register shift, 9-bit at [23..21, 15..10]..
-  kFmtExtend,     // Register extend, 9-bit at [23..21, 15..10].
-  kFmtSkip,       // Unused field, but continue to next.
-};
-std::ostream& operator<<(std::ostream& os, const A64EncodingKind & rhs);
-
-// Struct used to define the snippet positions for each A64 opcode.
-struct A64EncodingMap {
-  uint32_t wskeleton;
-  uint32_t xskeleton;
-  struct {
-    A64EncodingKind kind;
-    int end;         // end for kFmtBitBlt, 1-bit slice end for FP regs.
-    int start;       // start for kFmtBitBlt, 4-bit slice end for FP regs.
-  } field_loc[4];
-  A64Opcode opcode;  // can be WIDE()-ned to indicate it has a wide variant.
-  uint64_t flags;
-  const char* name;
-  const char* fmt;
-  int size;          // Note: size is in bytes.
-  FixupKind fixup;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_ARM64_ARM64_LIR_H_
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
deleted file mode 100644
index 25c69d1..0000000
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ /dev/null
@@ -1,1152 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_arm64.h"
-
-#include "arch/arm64/instruction_set_features_arm64.h"
-#include "arm64_lir.h"
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "driver/compiler_driver.h"
-
-namespace art {
-
-// The macros below are exclusively used in the encoding map.
-
-// Most generic way of providing two variants for one instructions.
-#define CUSTOM_VARIANTS(variant1, variant2) variant1, variant2
-
-// Used for instructions which do not have a wide variant.
-#define NO_VARIANTS(variant) \
-  CUSTOM_VARIANTS(variant, 0)
-
-// Used for instructions which have a wide variant with the sf bit set to 1.
-#define SF_VARIANTS(sf0_skeleton) \
-  CUSTOM_VARIANTS(sf0_skeleton, (sf0_skeleton | 0x80000000))
-
-// Used for instructions which have a wide variant with the size bits set to either x0 or x1.
-#define SIZE_VARIANTS(sizex0_skeleton) \
-  CUSTOM_VARIANTS(sizex0_skeleton, (sizex0_skeleton | 0x40000000))
-
-// Used for instructions which have a wide variant with the sf and n bits set to 1.
-#define SF_N_VARIANTS(sf0_n0_skeleton) \
-  CUSTOM_VARIANTS(sf0_n0_skeleton, (sf0_n0_skeleton | 0x80400000))
-
-// Used for FP instructions which have a single and double precision variants, with he type bits set
-// to either 00 or 01.
-#define FLOAT_VARIANTS(type00_skeleton) \
-  CUSTOM_VARIANTS(type00_skeleton, (type00_skeleton | 0x00400000))
-
-/*
- * opcode: A64Opcode enum
- * variants: instruction skeletons supplied via CUSTOM_VARIANTS or derived macros.
- * a{n}k: key to applying argument {n}    \
- * a{n}s: argument {n} start bit position | n = 0, 1, 2, 3
- * a{n}e: argument {n} end bit position   /
- * flags: instruction attributes (used in optimization)
- * name: mnemonic name
- * fmt: for pretty-printing
- * fixup: used for second-pass fixes (e.g. adresses fixups in branch instructions).
- */
-#define ENCODING_MAP(opcode, variants, a0k, a0s, a0e, a1k, a1s, a1e, a2k, a2s, a2e, \
-                     a3k, a3s, a3e, flags, name, fmt, fixup) \
-        {variants, {{a0k, a0s, a0e}, {a1k, a1s, a1e}, {a2k, a2s, a2e}, \
-                    {a3k, a3s, a3e}}, opcode, flags, name, fmt, 4, fixup}
-
-/* Instruction dump string format keys: !pf, where "!" is the start
- * of the key, "p" is which numeric operand to use and "f" is the
- * print format.
- *
- * [p]ositions:
- *     0 -> operands[0] (dest)
- *     1 -> operands[1] (src1)
- *     2 -> operands[2] (src2)
- *     3 -> operands[3] (extra)
- *
- * [f]ormats:
- *     d -> decimal
- *     D -> decimal*4 or decimal*8 depending on the instruction width
- *     E -> decimal*4
- *     F -> decimal*2
- *     G -> ", lsl #2" or ", lsl #3" depending on the instruction width
- *     c -> branch condition (eq, ne, etc.)
- *     t -> pc-relative target
- *     p -> pc-relative address
- *     s -> single precision floating point register
- *     S -> double precision floating point register
- *     f -> single or double precision register (depending on instruction width)
- *     I -> 8-bit immediate floating point number
- *     l -> logical immediate
- *     M -> 16-bit shift expression ("" or ", lsl #16" or ", lsl #32"...)
- *     B -> dmb option string (sy, st, ish, ishst, nsh, hshst)
- *     H -> operand shift
- *     h -> 6-bit shift immediate
- *     T -> register shift (either ", lsl #0" or ", lsl #12")
- *     e -> register extend (e.g. uxtb #1)
- *     o -> register shift (e.g. lsl #1) for Word registers
- *     w -> word (32-bit) register wn, or wzr
- *     W -> word (32-bit) register wn, or wsp
- *     x -> extended (64-bit) register xn, or xzr
- *     X -> extended (64-bit) register xn, or sp
- *     r -> register with same width as instruction, r31 -> wzr, xzr
- *     R -> register with same width as instruction, r31 -> wsp, sp
- *
- *  [!] escape.  To insert "!", use "!!"
- */
-/* NOTE: must be kept in sync with enum A64Opcode from arm64_lir.h */
-const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
-    ENCODING_MAP(WIDE(kA64Adc3rrr), SF_VARIANTS(0x1a000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES,
-                 "adc", "!0r, !1r, !2r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Add4RRdT), SF_VARIANTS(0x11000000),
-                 kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtBitBlt, 23, 22, IS_QUAD_OP | REG_DEF0_USE1,
-                 "add", "!0R, !1R, #!2d!3T", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Add4rrro), SF_VARIANTS(0x0b000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "add", "!0r, !1r, !2r!3o", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Add4RRre), SF_VARIANTS(0x0b200000),
-                 kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtRegR, 20, 16,
-                 kFmtExtend, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "add", "!0r, !1r, !2r!3e", kFixupNone),
-    // Note: adr is binary, but declared as tertiary. The third argument is used while doing the
-    //   fixups and contains information to identify the adr label.
-    ENCODING_MAP(kA64Adr2xd, NO_VARIANTS(0x10000000),
-                 kFmtRegX, 4, 0, kFmtImm21, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | NEEDS_FIXUP,
-                 "adr", "!0x, #!1d", kFixupAdr),
-    ENCODING_MAP(kA64Adrp2xd, NO_VARIANTS(0x90000000),
-                 kFmtRegX, 4, 0, kFmtImm21, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | NEEDS_FIXUP,
-                 "adrp", "!0x, #!1d", kFixupLabel),
-    ENCODING_MAP(WIDE(kA64And3Rrl), SF_VARIANTS(0x12000000),
-                 kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "and", "!0R, !1r, #!2l", kFixupNone),
-    ENCODING_MAP(WIDE(kA64And4rrro), SF_VARIANTS(0x0a000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "and", "!0r, !1r, !2r!3o", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Asr3rrd), CUSTOM_VARIANTS(0x13007c00, 0x9340fc00),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "asr", "!0r, !1r, #!2d", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Asr3rrr), SF_VARIANTS(0x1ac02800),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "asr", "!0r, !1r, !2r", kFixupNone),
-    ENCODING_MAP(kA64B2ct, NO_VARIANTS(0x54000000),
-                 kFmtBitBlt, 3, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | USES_CCODES |
-                 NEEDS_FIXUP, "b.!0c", "!1t", kFixupCondBranch),
-    ENCODING_MAP(kA64Blr1x, NO_VARIANTS(0xd63f0000),
-                 kFmtRegX, 9, 5, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_UNARY_OP | REG_USE0 | IS_BRANCH | REG_DEF_LR,
-                 "blr", "!0x", kFixupNone),
-    ENCODING_MAP(kA64Br1x, NO_VARIANTS(0xd61f0000),
-                 kFmtRegX, 9, 5, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE0 | IS_BRANCH,
-                 "br", "!0x", kFixupNone),
-    ENCODING_MAP(kA64Bl1t, NO_VARIANTS(0x94000000),
-                 kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR | NEEDS_FIXUP,
-                 "bl", "!0T", kFixupLabel),
-    ENCODING_MAP(kA64Brk1d, NO_VARIANTS(0xd4200000),
-                 kFmtBitBlt, 20, 5, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
-                 "brk", "!0d", kFixupNone),
-    ENCODING_MAP(kA64B1t, NO_VARIANTS(0x14000000),
-                 kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
-                 "b", "!0t", kFixupT1Branch),
-    ENCODING_MAP(WIDE(kA64Cbnz2rt), SF_VARIANTS(0x35000000),
-                 kFmtRegR, 4, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP,
-                 "cbnz", "!0r, !1t", kFixupCBxZ),
-    ENCODING_MAP(WIDE(kA64Cbz2rt), SF_VARIANTS(0x34000000),
-                 kFmtRegR, 4, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP,
-                 "cbz", "!0r, !1t", kFixupCBxZ),
-    ENCODING_MAP(WIDE(kA64Cmn3rro), SF_VARIANTS(0x2b00001f),
-                 kFmtRegR, 9, 5, kFmtRegR, 20, 16, kFmtShift, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
-                 "cmn", "!0r, !1r!2o", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Cmn3Rre), SF_VARIANTS(0x2b20001f),
-                 kFmtRegROrSp, 9, 5, kFmtRegR, 20, 16, kFmtExtend, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
-                 "cmn", "!0R, !1r!2e", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Cmn3RdT), SF_VARIANTS(0x3100001f),
-                 kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10, kFmtBitBlt, 23, 22,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | SETS_CCODES,
-                 "cmn", "!0R, #!1d!2T", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Cmp3rro), SF_VARIANTS(0x6b00001f),
-                 kFmtRegR, 9, 5, kFmtRegR, 20, 16, kFmtShift, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
-                 "cmp", "!0r, !1r!2o", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Cmp3Rre), SF_VARIANTS(0x6b20001f),
-                 kFmtRegROrSp, 9, 5, kFmtRegR, 20, 16, kFmtExtend, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
-                 "cmp", "!0R, !1r!2e", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Cmp3RdT), SF_VARIANTS(0x7100001f),
-                 kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10, kFmtBitBlt, 23, 22,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | SETS_CCODES,
-                 "cmp", "!0R, #!1d!2T", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Csel4rrrc), SF_VARIANTS(0x1a800000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES,
-                 "csel", "!0r, !1r, !2r, !3c", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Csinc4rrrc), SF_VARIANTS(0x1a800400),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES,
-                 "csinc", "!0r, !1r, !2r, !3c", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Csinv4rrrc), SF_VARIANTS(0x5a800000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES,
-                 "csinv", "!0r, !1r, !2r, !3c", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Csneg4rrrc), SF_VARIANTS(0x5a800400),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES,
-                 "csneg", "!0r, !1r, !2r, !3c", kFixupNone),
-    ENCODING_MAP(kA64Dmb1B, NO_VARIANTS(0xd50330bf),
-                 kFmtBitBlt, 11, 8, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_VOLATILE,
-                 "dmb", "#!0B", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Eor3Rrl), SF_VARIANTS(0x52000000),
-                 kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "eor", "!0R, !1r, #!2l", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Eor4rrro), SF_VARIANTS(0x4a000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "eor", "!0r, !1r, !2r!3o", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Extr4rrrd), SF_N_VARIANTS(0x13800000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtBitBlt, 15, 10, IS_QUAD_OP | REG_DEF0_USE12,
-                 "extr", "!0r, !1r, !2r, #!3d", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fabs2ff), FLOAT_VARIANTS(0x1e20c000),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP| REG_DEF0_USE1,
-                 "fabs", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fadd3fff), FLOAT_VARIANTS(0x1e202800),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "fadd", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fcmp1f), FLOAT_VARIANTS(0x1e202008),
-                 kFmtRegF, 9, 5, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE0 | SETS_CCODES,
-                 "fcmp", "!0f, #0", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fcmp2ff), FLOAT_VARIANTS(0x1e202000),
-                 kFmtRegF, 9, 5, kFmtRegF, 20, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
-                 "fcmp", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fcvtzs2wf), FLOAT_VARIANTS(0x1e380000),
-                 kFmtRegW, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fcvtzs", "!0w, !1f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fcvtzs2xf), FLOAT_VARIANTS(0x9e380000),
-                 kFmtRegX, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fcvtzs", "!0x, !1f", kFixupNone),
-    ENCODING_MAP(kA64Fcvt2Ss, NO_VARIANTS(0x1e22C000),
-                 kFmtRegD, 4, 0, kFmtRegS, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fcvt", "!0S, !1s", kFixupNone),
-    ENCODING_MAP(kA64Fcvt2sS, NO_VARIANTS(0x1e624000),
-                 kFmtRegS, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fcvt", "!0s, !1S", kFixupNone),
-    ENCODING_MAP(kA64Fcvtms2ws, NO_VARIANTS(0x1e300000),
-                 kFmtRegW, 4, 0, kFmtRegS, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fcvtms", "!0w, !1s", kFixupNone),
-    ENCODING_MAP(kA64Fcvtms2xS, NO_VARIANTS(0x9e700000),
-                 kFmtRegX, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fcvtms", "!0x, !1S", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fdiv3fff), FLOAT_VARIANTS(0x1e201800),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "fdiv", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fmax3fff), FLOAT_VARIANTS(0x1e204800),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "fmax", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fmin3fff), FLOAT_VARIANTS(0x1e205800),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "fmin", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fmov2ff), FLOAT_VARIANTS(0x1e204000),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
-                 "fmov", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fmov2fI), FLOAT_VARIANTS(0x1e201000),
-                 kFmtRegF, 4, 0, kFmtBitBlt, 20, 13, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
-                 "fmov", "!0f, #!1I", kFixupNone),
-    ENCODING_MAP(kA64Fmov2sw, NO_VARIANTS(0x1e270000),
-                 kFmtRegS, 4, 0, kFmtRegW, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fmov", "!0s, !1w", kFixupNone),
-    ENCODING_MAP(kA64Fmov2Sx, NO_VARIANTS(0x9e670000),
-                 kFmtRegD, 4, 0, kFmtRegX, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fmov", "!0S, !1x", kFixupNone),
-    ENCODING_MAP(kA64Fmov2ws, NO_VARIANTS(0x1e260000),
-                 kFmtRegW, 4, 0, kFmtRegS, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fmov", "!0w, !1s", kFixupNone),
-    ENCODING_MAP(kA64Fmov2xS, NO_VARIANTS(0x9e660000),
-                 kFmtRegX, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fmov", "!0x, !1S", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fmul3fff), FLOAT_VARIANTS(0x1e200800),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "fmul", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fneg2ff), FLOAT_VARIANTS(0x1e214000),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fneg", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Frintp2ff), FLOAT_VARIANTS(0x1e24c000),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "frintp", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Frintm2ff), FLOAT_VARIANTS(0x1e254000),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "frintm", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Frintn2ff), FLOAT_VARIANTS(0x1e244000),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "frintn", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Frintz2ff), FLOAT_VARIANTS(0x1e25c000),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "frintz", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fsqrt2ff), FLOAT_VARIANTS(0x1e61c000),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "fsqrt", "!0f, !1f", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Fsub3fff), FLOAT_VARIANTS(0x1e203800),
-                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "fsub", "!0f, !1f, !2f", kFixupNone),
-    ENCODING_MAP(kA64Ldrb3wXd, NO_VARIANTS(0x39400000),
-                 kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldrb", "!0w, [!1X, #!2d]", kFixupNone),
-    ENCODING_MAP(kA64Ldrb3wXx, NO_VARIANTS(0x38606800),
-                 kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
-                 "ldrb", "!0w, [!1X, !2x]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldrsb3rXd), CUSTOM_VARIANTS(0x39c00000, 0x39800000),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldrsb", "!0r, [!1X, #!2d]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldrsb3rXx), CUSTOM_VARIANTS(0x38e06800, 0x38a06800),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
-                 "ldrsb", "!0r, [!1X, !2x]", kFixupNone),
-    ENCODING_MAP(kA64Ldrh3wXF, NO_VARIANTS(0x79400000),
-                 kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldrh", "!0w, [!1X, #!2F]", kFixupNone),
-    ENCODING_MAP(kA64Ldrh4wXxd, NO_VARIANTS(0x78606800),
-                 kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
-                 "ldrh", "!0w, [!1X, !2x, lsl #!3d]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldrsh3rXF), CUSTOM_VARIANTS(0x79c00000, 0x79800000),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldrsh", "!0r, [!1X, #!2F]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldrsh4rXxd), CUSTOM_VARIANTS(0x78e06800, 0x78906800),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
-                 "ldrsh", "!0r, [!1X, !2x, lsl #!3d]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldr2fp), SIZE_VARIANTS(0x1c000000),
-                 kFmtRegF, 4, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
-                 "ldr", "!0f, !1p", kFixupLoad),
-    ENCODING_MAP(WIDE(kA64Ldr2rp), SIZE_VARIANTS(0x18000000),
-                 kFmtRegR, 4, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1,
-                 IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
-                 "ldr", "!0r, !1p", kFixupLoad),
-    ENCODING_MAP(WIDE(kA64Ldr3fXD), SIZE_VARIANTS(0xbd400000),
-                 kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldr", "!0f, [!1X, #!2D]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldr3rXD), SIZE_VARIANTS(0xb9400000),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
-                 "ldr", "!0r, [!1X, #!2D]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldr4fXxG), SIZE_VARIANTS(0xbc606800),
-                 kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
-                 "ldr", "!0f, [!1X, !2x!3G]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldr4rXxG), SIZE_VARIANTS(0xb8606800),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
-                 "ldr", "!0r, [!1X, !2x!3G]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64LdrPost3rXd), SIZE_VARIANTS(0xb8400400),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 20, 12,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01 | REG_USE1 | IS_LOAD,
-                 "ldr", "!0r, [!1X], #!2d", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldp4ffXD), CUSTOM_VARIANTS(0x2d400000, 0x6d400000),
-                 kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD_OFF,
-                 "ldp", "!0f, !1f, [!2X, #!3D]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldp4rrXD), SF_VARIANTS(0x29400000),
-                 kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD_OFF,
-                 "ldp", "!0r, !1r, [!2X, #!3D]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64LdpPost4rrXD), CUSTOM_VARIANTS(0x28c00000, 0xa8c00000),
-                 kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF012 | IS_LOAD,
-                 "ldp", "!0r, !1r, [!2X], #!3D", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldur3fXd), CUSTOM_VARIANTS(0xbc400000, 0xfc400000),
-                 kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 20, 12,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
-                 "ldur", "!0f, [!1X, #!2d]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldur3rXd), SIZE_VARIANTS(0xb8400000),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 20, 12,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
-                 "ldur", "!0r, [!1X, #!2d]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldxr2rX), SIZE_VARIANTS(0x885f7c00),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOADX,
-                 "ldxr", "!0r, [!1X]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ldaxr2rX), SIZE_VARIANTS(0x885ffc00),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOADX,
-                 "ldaxr", "!0r, [!1X]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Lsl3rrr), SF_VARIANTS(0x1ac02000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "lsl", "!0r, !1r, !2r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Lsr3rrd), CUSTOM_VARIANTS(0x53007c00, 0xd340fc00),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "lsr", "!0r, !1r, #!2d", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Lsr3rrr), SF_VARIANTS(0x1ac02400),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "lsr", "!0r, !1r, !2r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Madd4rrrr), SF_VARIANTS(0x1b000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtRegR, 14, 10, IS_QUAD_OP | REG_DEF0_USE123 | NEEDS_FIXUP,
-                 "madd", "!0r, !1r, !2r, !3r", kFixupA53Erratum835769),
-    ENCODING_MAP(WIDE(kA64Movk3rdM), SF_VARIANTS(0x72800000),
-                 kFmtRegR, 4, 0, kFmtBitBlt, 20, 5, kFmtBitBlt, 22, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE0,
-                 "movk", "!0r, #!1d!2M", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Movn3rdM), SF_VARIANTS(0x12800000),
-                 kFmtRegR, 4, 0, kFmtBitBlt, 20, 5, kFmtBitBlt, 22, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0,
-                 "movn", "!0r, #!1d!2M", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Movz3rdM), SF_VARIANTS(0x52800000),
-                 kFmtRegR, 4, 0, kFmtBitBlt, 20, 5, kFmtBitBlt, 22, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0,
-                 "movz", "!0r, #!1d!2M", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Mov2rr), SF_VARIANTS(0x2a0003e0),
-                 kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
-                 "mov", "!0r, !1r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Mvn2rr), SF_VARIANTS(0x2a2003e0),
-                 kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "mvn", "!0r, !1r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Mul3rrr), SF_VARIANTS(0x1b007c00),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mul", "!0r, !1r, !2r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Msub4rrrr), SF_VARIANTS(0x1b008000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtRegR, 14, 10, IS_QUAD_OP | REG_DEF0_USE123 | NEEDS_FIXUP,
-                 "msub", "!0r, !1r, !2r, !3r", kFixupA53Erratum835769),
-    ENCODING_MAP(WIDE(kA64Neg3rro), SF_VARIANTS(0x4b0003e0),
-                 kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtShift, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "neg", "!0r, !1r!2o", kFixupNone),
-    ENCODING_MAP(kA64Nop0, NO_VARIANTS(0xd503201f),
-                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, NO_OPERAND,
-                 "nop", "", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Orr3Rrl), SF_VARIANTS(0x32000000),
-                 kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "orr", "!0R, !1r, #!2l", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Orr4rrro), SF_VARIANTS(0x2a000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "orr", "!0r, !1r, !2r!3o", kFixupNone),
-    ENCODING_MAP(kA64Ret, NO_VARIANTS(0xd65f03c0),
-                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH,
-                 "ret", "", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Rbit2rr), SF_VARIANTS(0x5ac00000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "rbit", "!0r, !1r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Rev2rr), CUSTOM_VARIANTS(0x5ac00800, 0xdac00c00),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "rev", "!0r, !1r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Rev162rr), SF_VARIANTS(0x5ac00400),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "rev16", "!0r, !1r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Ror3rrr), SF_VARIANTS(0x1ac02c00),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "ror", "!0r, !1r, !2r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Sbc3rrr), SF_VARIANTS(0x5a000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES,
-                 "sbc", "!0r, !1r, !2r", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Sbfm4rrdd), SF_N_VARIANTS(0x13000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16,
-                 kFmtBitBlt, 15, 10, IS_QUAD_OP | REG_DEF0_USE1,
-                 "sbfm", "!0r, !1r, #!2d, #!3d", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Scvtf2fw), FLOAT_VARIANTS(0x1e220000),
-                 kFmtRegF, 4, 0, kFmtRegW, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "scvtf", "!0f, !1w", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Scvtf2fx), FLOAT_VARIANTS(0x9e220000),
-                 kFmtRegF, 4, 0, kFmtRegX, 9, 5, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "scvtf", "!0f, !1x", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Sdiv3rrr), SF_VARIANTS(0x1ac00c00),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "sdiv", "!0r, !1r, !2r", kFixupNone),
-    ENCODING_MAP(kA64Smull3xww, NO_VARIANTS(0x9b207c00),
-                 kFmtRegX, 4, 0, kFmtRegW, 9, 5, kFmtRegW, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "smull", "!0x, !1w, !2w", kFixupNone),
-    ENCODING_MAP(kA64Smulh3xxx, NO_VARIANTS(0x9b407c00),
-                 kFmtRegX, 4, 0, kFmtRegX, 9, 5, kFmtRegX, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "smulh", "!0x, !1x, !2x", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Stp4ffXD), CUSTOM_VARIANTS(0x2d000000, 0x6d000000),
-                 kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
-                 "stp", "!0f, !1f, [!2X, #!3D]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Stp4rrXD), SF_VARIANTS(0x29000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
-                 "stp", "!0r, !1r, [!2X, #!3D]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64StpPost4rrXD), CUSTOM_VARIANTS(0x28800000, 0xa8800000),
-                 kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
-                 "stp", "!0r, !1r, [!2X], #!3D", kFixupNone),
-    ENCODING_MAP(WIDE(kA64StpPre4ffXD), CUSTOM_VARIANTS(0x2d800000, 0x6d800000),
-                 kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
-                 "stp", "!0f, !1f, [!2X, #!3D]!!", kFixupNone),
-    ENCODING_MAP(WIDE(kA64StpPre4rrXD), CUSTOM_VARIANTS(0x29800000, 0xa9800000),
-                 kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
-                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
-                 "stp", "!0r, !1r, [!2X, #!3D]!!", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Str3fXD), CUSTOM_VARIANTS(0xbd000000, 0xfd000000),
-                 kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
-                 "str", "!0f, [!1X, #!2D]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Str4fXxG), CUSTOM_VARIANTS(0xbc206800, 0xfc206800),
-                 kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_USE012 | IS_STORE,
-                 "str", "!0f, [!1X, !2x!3G]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Str3rXD), SIZE_VARIANTS(0xb9000000),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
-                 "str", "!0r, [!1X, #!2D]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Str4rXxG), SIZE_VARIANTS(0xb8206800),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_USE012 | IS_STORE,
-                 "str", "!0r, [!1X, !2x!3G]", kFixupNone),
-    ENCODING_MAP(kA64Strb3wXd, NO_VARIANTS(0x39000000),
-                 kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
-                 "strb", "!0w, [!1X, #!2d]", kFixupNone),
-    ENCODING_MAP(kA64Strb3wXx, NO_VARIANTS(0x38206800),
-                 kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
-                 "strb", "!0w, [!1X, !2x]", kFixupNone),
-    ENCODING_MAP(kA64Strh3wXF, NO_VARIANTS(0x79000000),
-                 kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
-                 "strh", "!0w, [!1X, #!2F]", kFixupNone),
-    ENCODING_MAP(kA64Strh4wXxd, NO_VARIANTS(0x78206800),
-                 kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
-                 kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_USE012 | IS_STORE,
-                 "strh", "!0w, [!1X, !2x, lsl #!3d]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64StrPost3rXd), SIZE_VARIANTS(0xb8000400),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 20, 12,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | REG_DEF1 | IS_STORE,
-                 "str", "!0r, [!1X], #!2d", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Stur3fXd), CUSTOM_VARIANTS(0xbc000000, 0xfc000000),
-                 kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 20, 12,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
-                 "stur", "!0f, [!1X, #!2d]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Stur3rXd), SIZE_VARIANTS(0xb8000000),
-                 kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 20, 12,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
-                 "stur", "!0r, [!1X, #!2d]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Stxr3wrX), SIZE_VARIANTS(0x88007c00),
-                 kFmtRegW, 20, 16, kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STOREX,
-                 "stxr", "!0w, !1r, [!2X]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Stlxr3wrX), SIZE_VARIANTS(0x8800fc00),
-                 kFmtRegW, 20, 16, kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STOREX,
-                 "stlxr", "!0w, !1r, [!2X]", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Sub4RRdT), SF_VARIANTS(0x51000000),
-                 kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtBitBlt, 23, 22, IS_QUAD_OP | REG_DEF0_USE1,
-                 "sub", "!0R, !1R, #!2d!3T", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Sub4rrro), SF_VARIANTS(0x4b000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
-                 kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "sub", "!0r, !1r, !2r!3o", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Sub4RRre), SF_VARIANTS(0x4b200000),
-                 kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtRegR, 20, 16,
-                 kFmtExtend, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
-                 "sub", "!0r, !1r, !2r!3e", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Subs3rRd), SF_VARIANTS(0x71000000),
-                 kFmtRegR, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
-                 "subs", "!0r, !1R, #!2d", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Tst2rl), SF_VARIANTS(0x7200001f),
-                 kFmtRegR, 9, 5, kFmtBitBlt, 22, 10, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | SETS_CCODES,
-                 "tst", "!0r, !1l", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Tst3rro), SF_VARIANTS(0x6a00001f),
-                 kFmtRegR, 9, 5, kFmtRegR, 20, 16, kFmtShift, -1, -1,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
-                 "tst", "!0r, !1r!2o", kFixupNone),
-    // NOTE: Tbz/Tbnz does not require SETS_CCODES, but it may be replaced by some other LIRs
-    // which require SETS_CCODES in the fix-up stage.
-    ENCODING_MAP(WIDE(kA64Tbnz3rht), CUSTOM_VARIANTS(0x37000000, 0x37000000),
-                 kFmtRegR, 4, 0, kFmtImm6Shift, -1, -1, kFmtBitBlt, 18, 5, kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP | SETS_CCODES,
-                 "tbnz", "!0r, #!1h, !2t", kFixupTBxZ),
-    ENCODING_MAP(WIDE(kA64Tbz3rht), CUSTOM_VARIANTS(0x36000000, 0x36000000),
-                 kFmtRegR, 4, 0, kFmtImm6Shift, -1, -1, kFmtBitBlt, 18, 5, kFmtUnused, -1, -1,
-                 IS_TERTIARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP | SETS_CCODES,
-                 "tbz", "!0r, #!1h, !2t", kFixupTBxZ),
-    ENCODING_MAP(WIDE(kA64Ubfm4rrdd), SF_N_VARIANTS(0x53000000),
-                 kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16,
-                 kFmtBitBlt, 15, 10, IS_QUAD_OP | REG_DEF0_USE1,
-                 "ubfm", "!0r, !1r, !2d, !3d", kFixupNone),
-};
-
-// new_lir replaces orig_lir in the pcrel_fixup list.
-void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
-  new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
-  if (UNLIKELY(prev_lir == nullptr)) {
-    first_fixup_ = new_lir;
-  } else {
-    prev_lir->u.a.pcrel_next = new_lir;
-  }
-  orig_lir->flags.fixup = kFixupNone;
-}
-
-// new_lir is inserted before orig_lir in the pcrel_fixup list.
-void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
-  new_lir->u.a.pcrel_next = orig_lir;
-  if (UNLIKELY(prev_lir == nullptr)) {
-    first_fixup_ = new_lir;
-  } else {
-    DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
-    prev_lir->u.a.pcrel_next = new_lir;
-  }
-}
-
-/* Nop, used for aligning code. Nop is an alias for hint #0. */
-#define PADDING_NOP (UINT32_C(0xd503201f))
-
-uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
-  uint8_t* const write_buffer = write_pos;
-  for (; lir != nullptr; lir = NEXT_LIR(lir)) {
-    lir->offset = (write_pos - write_buffer);
-    bool opcode_is_wide = IS_WIDE(lir->opcode);
-    A64Opcode opcode = UNWIDE(lir->opcode);
-
-    if (UNLIKELY(IsPseudoLirOp(opcode))) {
-      continue;
-    }
-
-    if (LIKELY(!lir->flags.is_nop)) {
-      const A64EncodingMap *encoder = &EncodingMap[opcode];
-
-      // Select the right variant of the skeleton.
-      uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
-      DCHECK(!opcode_is_wide || IS_WIDE(encoder->opcode));
-
-      for (int i = 0; i < 4; i++) {
-        A64EncodingKind kind = encoder->field_loc[i].kind;
-        uint32_t operand = lir->operands[i];
-        uint32_t value;
-
-        if (LIKELY(static_cast<unsigned>(kind) <= kFmtBitBlt)) {
-          // Note: this will handle kFmtReg* and kFmtBitBlt.
-
-          if (static_cast<unsigned>(kind) < kFmtBitBlt) {
-            bool is_zero = A64_REG_IS_ZR(operand);
-
-            if (kIsDebugBuild && (kFailOnSizeError || kReportSizeError)) {
-              // Register usage checks: First establish register usage requirements based on the
-              // format in `kind'.
-              bool want_float = false;     // Want a float (rather than core) register.
-              bool want_64_bit = false;    // Want a 64-bit (rather than 32-bit) register.
-              bool want_var_size = true;   // Want register with variable size (kFmtReg{R,F}).
-              bool want_zero = false;      // Want the zero (rather than sp) register.
-              switch (kind) {
-                case kFmtRegX:
-                  want_64_bit = true;
-                  FALLTHROUGH_INTENDED;
-                case kFmtRegW:
-                  want_var_size = false;
-                  FALLTHROUGH_INTENDED;
-                case kFmtRegR:
-                  want_zero = true;
-                  break;
-                case kFmtRegXOrSp:
-                  want_64_bit = true;
-                  FALLTHROUGH_INTENDED;
-                case kFmtRegWOrSp:
-                  want_var_size = false;
-                  break;
-                case kFmtRegROrSp:
-                  break;
-                case kFmtRegD:
-                  want_64_bit = true;
-                  FALLTHROUGH_INTENDED;
-                case kFmtRegS:
-                  want_var_size = false;
-                  FALLTHROUGH_INTENDED;
-                case kFmtRegF:
-                  want_float = true;
-                  break;
-                default:
-                  LOG(FATAL) << "Bad fmt for arg n. " << i << " of " << encoder->name
-                             << " (" << kind << ")";
-                  break;
-              }
-
-              // want_var_size == true means kind == kFmtReg{R,F}. In these two cases, we want
-              // the register size to be coherent with the instruction width.
-              if (want_var_size) {
-                want_64_bit = opcode_is_wide;
-              }
-
-              // Now check that the requirements are satisfied.
-              RegStorage reg(operand | RegStorage::kValid);
-              const char *expected = nullptr;
-              if (want_float) {
-                if (!reg.IsFloat()) {
-                  expected = "float register";
-                } else if (reg.IsDouble() != want_64_bit) {
-                  expected = (want_64_bit) ? "double register" : "single register";
-                }
-              } else {
-                if (reg.IsFloat()) {
-                  expected = "core register";
-                } else if (reg.Is64Bit() != want_64_bit) {
-                  expected = (want_64_bit) ? "x-register" : "w-register";
-                } else if (A64_REGSTORAGE_IS_SP_OR_ZR(reg) && is_zero != want_zero) {
-                  expected = (want_zero) ? "zero-register" : "sp-register";
-                }
-              }
-
-              // Fail, if `expected' contains an unsatisfied requirement.
-              if (expected != nullptr) {
-                LOG(WARNING) << "Method: " << PrettyMethod(cu_->method_idx, *cu_->dex_file)
-                             << " @ 0x" << std::hex << lir->dalvik_offset;
-                if (kFailOnSizeError) {
-                  LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
-                             << "(" << UNWIDE(encoder->opcode) << ", " << encoder->fmt << ")"
-                             << ". Expected " << expected << ", got 0x" << std::hex << operand;
-                } else {
-                  LOG(WARNING) << "Bad argument n. " << i << " of " << encoder->name
-                               << ". Expected " << expected << ", got 0x" << std::hex << operand;
-                }
-              }
-            }
-
-            // In the lines below, we rely on (operand & 0x1f) == 31 to be true for register sp
-            // and zr. This means that these two registers do not need any special treatment, as
-            // their bottom 5 bits are correctly set to 31 == 0b11111, which is the right
-            // value for encoding both sp and zr.
-            static_assert((rxzr & 0x1f) == 0x1f, "rzr register number must be 31");
-            static_assert((rsp & 0x1f) == 0x1f, "rsp register number must be 31");
-          }
-
-          value = (operand << encoder->field_loc[i].start) &
-              ((1 << (encoder->field_loc[i].end + 1)) - 1);
-          bits |= value;
-        } else {
-          switch (kind) {
-            case kFmtSkip:
-              break;  // Nothing to do, but continue to next.
-            case kFmtUnused:
-              i = 4;  // Done, break out of the enclosing loop.
-              break;
-            case kFmtShift:
-              // Intentional fallthrough.
-            case kFmtExtend:
-              DCHECK_EQ((operand & (1 << 6)) == 0, kind == kFmtShift);
-              value = (operand & 0x3f) << 10;
-              value |= ((operand & 0x1c0) >> 6) << 21;
-              bits |= value;
-              break;
-            case kFmtImm21:
-              value = (operand & 0x3) << 29;
-              value |= ((operand & 0x1ffffc) >> 2) << 5;
-              bits |= value;
-              break;
-            case kFmtImm6Shift:
-              value = (operand & 0x1f) << 19;
-              value |= ((operand & 0x20) >> 5) << 31;
-              bits |= value;
-              break;
-            default:
-              LOG(FATAL) << "Bad fmt for arg. " << i << " in " << encoder->name
-                         << " (" << kind << ")";
-          }
-        }
-      }
-
-      DCHECK_EQ(encoder->size, 4);
-      write_pos[0] = (bits & 0xff);
-      write_pos[1] = ((bits >> 8) & 0xff);
-      write_pos[2] = ((bits >> 16) & 0xff);
-      write_pos[3] = ((bits >> 24) & 0xff);
-      write_pos += 4;
-    }
-  }
-
-  return write_pos;
-}
-
-// Align data offset on 8 byte boundary: it will only contain double-word items, as word immediates
-// are better set directly from the code (they will require no more than 2 instructions).
-#define ALIGNED_DATA_OFFSET(offset) (((offset) + 0x7) & ~0x7)
-
-/*
- * Get the LIR which emits the instruction preceding the given LIR.
- * Returns nullptr, if no previous emitting insn found.
- */
-static LIR* GetPrevEmittingLIR(LIR* lir) {
-  DCHECK(lir != nullptr);
-  LIR* prev_lir = lir->prev;
-  while ((prev_lir != nullptr) &&
-         (prev_lir->flags.is_nop || Mir2Lir::IsPseudoLirOp(prev_lir->opcode))) {
-    prev_lir = prev_lir->prev;
-  }
-  return prev_lir;
-}
-
-// Assemble the LIR into binary instruction format.
-void Arm64Mir2Lir::AssembleLIR() {
-  LIR* lir;
-  LIR* prev_lir;
-  cu_->NewTimingSplit("Assemble");
-  int assembler_retries = 0;
-  CodeOffset starting_offset = LinkFixupInsns(first_lir_insn_, last_lir_insn_, 0);
-  data_offset_ = ALIGNED_DATA_OFFSET(starting_offset);
-  int32_t offset_adjustment;
-  AssignDataOffsets();
-
-  /*
-   * Note: generation must be 1 on first pass (to distinguish from initialized state of 0
-   * for non-visited nodes). Start at zero here, and bit will be flipped to 1 on entry to the loop.
-   */
-  int generation = 0;
-  while (true) {
-    offset_adjustment = 0;
-    AssemblerStatus res = kSuccess;  // Assume success
-    generation ^= 1;
-    // Note: nodes requiring possible fixup linked in ascending order.
-    lir = first_fixup_;
-    prev_lir = nullptr;
-    while (lir != nullptr) {
-      // NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
-      // the time of insertion.  Note that inserted instructions don't need use/def flags, but do
-      // need size and pc-rel status properly updated.
-      lir->offset += offset_adjustment;
-      // During pass, allows us to tell whether a node has been updated with offset_adjustment yet.
-      lir->flags.generation = generation;
-      switch (static_cast<FixupKind>(lir->flags.fixup)) {
-        case kFixupLabel:
-        case kFixupNone:
-        case kFixupVLoad:
-          break;
-        case kFixupT1Branch: {
-          LIR *target_lir = lir->target;
-          DCHECK(target_lir);
-          CodeOffset pc = lir->offset;
-          CodeOffset target = target_lir->offset +
-              ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int32_t delta = target - pc;
-          DCHECK_ALIGNED(delta, 4);
-          if (!IS_SIGNED_IMM26(delta >> 2)) {
-            LOG(FATAL) << "Invalid jump range in kFixupT1Branch";
-          }
-          lir->operands[0] = delta >> 2;
-          if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && lir->operands[0] == 1) {
-            // Useless branch.
-            offset_adjustment -= lir->flags.size;
-            lir->flags.is_nop = true;
-            // Don't unlink - just set to do-nothing.
-            lir->flags.fixup = kFixupNone;
-            res = kRetryAll;
-          }
-          break;
-        }
-        case kFixupLoad:
-        case kFixupCBxZ:
-        case kFixupCondBranch: {
-          LIR *target_lir = lir->target;
-          DCHECK(target_lir);
-          CodeOffset pc = lir->offset;
-          CodeOffset target = target_lir->offset +
-            ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int32_t delta = target - pc;
-          DCHECK_ALIGNED(delta, 4);
-          if (!IS_SIGNED_IMM19(delta >> 2)) {
-            LOG(FATAL) << "Invalid jump range in kFixupLoad";
-          }
-          lir->operands[1] = delta >> 2;
-          break;
-        }
-        case kFixupTBxZ: {
-          int16_t opcode = lir->opcode;
-          RegStorage reg(lir->operands[0] | RegStorage::kValid);
-          int32_t imm = lir->operands[1];
-          DCHECK_EQ(IS_WIDE(opcode), reg.Is64Bit());
-          DCHECK_LT(imm, 64);
-          if (imm >= 32) {
-            DCHECK(IS_WIDE(opcode));
-          } else if (kIsDebugBuild && IS_WIDE(opcode)) {
-            // "tbz/tbnz x0, #imm(<32)" is the same with "tbz/tbnz w0, #imm(<32)", but GCC/oatdump
-            // will disassemble it as "tbz/tbnz w0, #imm(<32)". So unwide the LIR to make the
-            // compiler log behave the same with those disassembler in debug build.
-            // This will also affect tst instruction if it need to be replaced, but there is no
-            // performance difference between "tst Xt" and "tst Wt".
-            lir->opcode = UNWIDE(opcode);
-            lir->operands[0] = As32BitReg(reg).GetReg();
-          }
-
-          // Fix-up branch offset.
-          LIR *target_lir = lir->target;
-          DCHECK(target_lir);
-          CodeOffset pc = lir->offset;
-          CodeOffset target = target_lir->offset +
-              ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
-          int32_t delta = target - pc;
-          DCHECK_ALIGNED(delta, 4);
-          // Check if branch offset can be encoded in tbz/tbnz.
-          if (!IS_SIGNED_IMM14(delta >> 2)) {
-            DexOffset dalvik_offset = lir->dalvik_offset;
-            LIR* targetLIR = lir->target;
-            // "tbz/tbnz Rt, #imm, label" -> "tst Rt, #(1<<imm)".
-            offset_adjustment -= lir->flags.size;
-            int32_t encodedImm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]);
-            DCHECK_NE(encodedImm, -1);
-            lir->opcode = IS_WIDE(opcode) ? WIDE(kA64Tst2rl) : kA64Tst2rl;
-            lir->operands[1] = encodedImm;
-            lir->target = nullptr;
-            lir->flags.fixup = EncodingMap[kA64Tst2rl].fixup;
-            lir->flags.size = EncodingMap[kA64Tst2rl].size;
-            offset_adjustment += lir->flags.size;
-            // Insert "beq/bneq label".
-            opcode = UNWIDE(opcode);
-            DCHECK(opcode == kA64Tbz3rht || opcode == kA64Tbnz3rht);
-            LIR* new_lir = RawLIR(dalvik_offset, kA64B2ct,
-                opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, targetLIR);
-            InsertLIRAfter(lir, new_lir);
-            new_lir->offset = lir->offset + lir->flags.size;
-            new_lir->flags.generation = generation;
-            new_lir->flags.fixup = EncodingMap[kA64B2ct].fixup;
-            new_lir->flags.size = EncodingMap[kA64B2ct].size;
-            offset_adjustment += new_lir->flags.size;
-            // lir no longer pcrel, unlink and link in new_lir.
-            ReplaceFixup(prev_lir, lir, new_lir);
-            prev_lir = new_lir;  // Continue with the new instruction.
-            lir = new_lir->u.a.pcrel_next;
-            res = kRetryAll;
-            continue;
-          }
-          lir->operands[2] = delta >> 2;
-          break;
-        }
-        case kFixupAdr: {
-          LIR* target_lir = lir->target;
-          int32_t delta;
-          if (target_lir) {
-            CodeOffset target_offs = ((target_lir->flags.generation == lir->flags.generation) ?
-                                      0 : offset_adjustment) + target_lir->offset;
-            delta = target_offs - lir->offset;
-          } else if (lir->operands[2] >= 0) {
-            const EmbeddedData* tab = UnwrapPointer<EmbeddedData>(lir->operands[2]);
-            delta = tab->offset + offset_adjustment - lir->offset;
-          } else {
-            // No fixup: this usage allows to retrieve the current PC.
-            delta = lir->operands[1];
-          }
-          if (!IS_SIGNED_IMM21(delta)) {
-            LOG(FATAL) << "Jump range above 1MB in kFixupAdr";
-          }
-          lir->operands[1] = delta;
-          break;
-        }
-        case kFixupA53Erratum835769:
-          // Avoid emitting code that could trigger Cortex A53's erratum 835769.
-          // This fixup should be carried out for all multiply-accumulate instructions: madd, msub,
-          // smaddl, smsubl, umaddl and umsubl.
-          if (cu_->compiler_driver->GetInstructionSetFeatures()->AsArm64InstructionSetFeatures()
-              ->NeedFixCortexA53_835769()) {
-            // Check that this is a 64-bit multiply-accumulate.
-            if (IS_WIDE(lir->opcode)) {
-              LIR* prev_insn = GetPrevEmittingLIR(lir);
-              if (prev_insn == nullptr) {
-                break;
-              }
-              uint64_t prev_insn_flags = EncodingMap[UNWIDE(prev_insn->opcode)].flags;
-              // Check that the instruction preceding the multiply-accumulate is a load or store.
-              if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
-                // insert a NOP between the load/store and the multiply-accumulate.
-                LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, nullptr);
-                new_lir->offset = lir->offset;
-                new_lir->flags.fixup = kFixupNone;
-                new_lir->flags.size = EncodingMap[kA64Nop0].size;
-                InsertLIRBefore(lir, new_lir);
-                lir->offset += new_lir->flags.size;
-                offset_adjustment += new_lir->flags.size;
-                res = kRetryAll;
-              }
-            }
-          }
-          break;
-        default:
-          LOG(FATAL) << "Unexpected case " << lir->flags.fixup;
-      }
-      prev_lir = lir;
-      lir = lir->u.a.pcrel_next;
-    }
-
-    if (res == kSuccess) {
-      DCHECK_EQ(offset_adjustment, 0);
-      break;
-    } else {
-      assembler_retries++;
-      if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
-        CodegenDump();
-        LOG(FATAL) << "Assembler error - too many retries";
-      }
-      starting_offset += offset_adjustment;
-      data_offset_ = ALIGNED_DATA_OFFSET(starting_offset);
-      AssignDataOffsets();
-    }
-  }
-
-  // Build the CodeBuffer.
-  DCHECK_LE(data_offset_, total_size_);
-  code_buffer_.reserve(total_size_);
-  code_buffer_.resize(starting_offset);
-  uint8_t* write_pos = &code_buffer_[0];
-  write_pos = EncodeLIRs(write_pos, first_lir_insn_);
-  DCHECK_EQ(static_cast<CodeOffset>(write_pos - &code_buffer_[0]), starting_offset);
-
-  DCHECK_EQ(data_offset_, ALIGNED_DATA_OFFSET(code_buffer_.size()));
-
-  // Install literals
-  InstallLiteralPools();
-
-  // Install switch tables
-  InstallSwitchTables();
-
-  // Install fill array data
-  InstallFillArrayData();
-
-  // Create the mapping table and native offset to reference map.
-  cu_->NewTimingSplit("PcMappingTable");
-  CreateMappingTables();
-
-  cu_->NewTimingSplit("GcMap");
-  CreateNativeGcMap();
-}
-
-size_t Arm64Mir2Lir::GetInsnSize(LIR* lir) {
-  A64Opcode opcode = UNWIDE(lir->opcode);
-  DCHECK(!IsPseudoLirOp(opcode));
-  return EncodingMap[opcode].size;
-}
-
-// Encode instruction bit pattern and assign offsets.
-uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
-  LIR* end_lir = tail_lir->next;
-
-  LIR* last_fixup = nullptr;
-  for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
-    A64Opcode opcode = UNWIDE(lir->opcode);
-    if (!lir->flags.is_nop) {
-      if (lir->flags.fixup != kFixupNone) {
-        if (!IsPseudoLirOp(opcode)) {
-          lir->flags.size = EncodingMap[opcode].size;
-          lir->flags.fixup = EncodingMap[opcode].fixup;
-        } else {
-          DCHECK_NE(static_cast<int>(opcode), kPseudoPseudoAlign4);
-          lir->flags.size = 0;
-          lir->flags.fixup = kFixupLabel;
-        }
-        // Link into the fixup chain.
-        lir->flags.use_def_invalid = true;
-        lir->u.a.pcrel_next = nullptr;
-        if (first_fixup_ == nullptr) {
-          first_fixup_ = lir;
-        } else {
-          last_fixup->u.a.pcrel_next = lir;
-        }
-        last_fixup = lir;
-        lir->offset = offset;
-      }
-      offset += lir->flags.size;
-    }
-  }
-  return offset;
-}
-
-void Arm64Mir2Lir::AssignDataOffsets() {
-  /* Set up offsets for literals */
-  CodeOffset offset = data_offset_;
-
-  offset = AssignLiteralOffset(offset);
-
-  offset = AssignSwitchTablesOffset(offset);
-
-  total_size_ = AssignFillArrayDataOffset(offset);
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm64/backend_arm64.h b/compiler/dex/quick/arm64/backend_arm64.h
deleted file mode 100644
index 53650c4..0000000
--- a/compiler/dex/quick/arm64/backend_arm64.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_ARM64_BACKEND_ARM64_H_
-#define ART_COMPILER_DEX_QUICK_ARM64_BACKEND_ARM64_H_
-
-namespace art {
-
-struct CompilationUnit;
-class Mir2Lir;
-class MIRGraph;
-class ArenaAllocator;
-
-Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                            ArenaAllocator* const arena);
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_ARM64_BACKEND_ARM64_H_
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
deleted file mode 100644
index b1acf5e..0000000
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ /dev/null
@@ -1,595 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the Thumb2 ISA. */
-
-#include "codegen_arm64.h"
-
-#include "arm64_lir.h"
-#include "art_method.h"
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "gc/accounting/card_table.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "mirror/object_array-inl.h"
-#include "utils/dex_cache_arrays_layout-inl.h"
-
-namespace art {
-
-/*
- * The sparse table in the literal pool is an array of <key,displacement>
- * pairs.  For each set, we'll load them as a pair using ldp.
- * The test loop will look something like:
- *
- *   adr   r_base, <table>
- *   ldr   r_val, [rA64_SP, v_reg_off]
- *   mov   r_idx, #table_size
- * loop:
- *   cbz   r_idx, quit
- *   ldp   r_key, r_disp, [r_base], #8
- *   sub   r_idx, #1
- *   cmp   r_val, r_key
- *   b.ne  loop
- *   adr   r_base, #0        ; This is the instruction from which we compute displacements
- *   add   r_base, r_disp
- *   br    r_base
- * quit:
- */
-void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  // Add the table to the list - we'll process it later
-  SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
-  tab_rec->switch_mir = mir;
-  tab_rec->table = table;
-  tab_rec->vaddr = current_dalvik_offset_;
-  uint32_t size = table[1];
-  switch_tables_.push_back(tab_rec);
-
-  // Get the switch value
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegStorage r_base = AllocTempWide();
-  // Allocate key and disp temps.
-  RegStorage r_key = AllocTemp();
-  RegStorage r_disp = AllocTemp();
-  // Materialize a pointer to the switch table
-  NewLIR3(kA64Adr2xd, r_base.GetReg(), 0, WrapPointer(tab_rec));
-  // Set up r_idx
-  RegStorage r_idx = AllocTemp();
-  LoadConstant(r_idx, size);
-
-  // Entry of loop.
-  LIR* loop_entry = NewLIR0(kPseudoTargetLabel);
-  LIR* branch_out = NewLIR2(kA64Cbz2rt, r_idx.GetReg(), 0);
-
-  // Load next key/disp.
-  NewLIR4(kA64LdpPost4rrXD, r_key.GetReg(), r_disp.GetReg(), r_base.GetReg(), 2);
-  OpRegRegImm(kOpSub, r_idx, r_idx, 1);
-
-  // Go to next case, if key does not match.
-  OpRegReg(kOpCmp, r_key, rl_src.reg);
-  OpCondBranch(kCondNe, loop_entry);
-
-  // Key does match: branch to case label.
-  LIR* switch_label = NewLIR3(kA64Adr2xd, r_base.GetReg(), 0, -1);
-  tab_rec->anchor = switch_label;
-
-  // Add displacement to base branch address and go!
-  OpRegRegRegExtend(kOpAdd, r_base, r_base, As64BitReg(r_disp), kA64Sxtw, 0U);
-  NewLIR1(kA64Br1x, r_base.GetReg());
-
-  // Loop exit label.
-  LIR* loop_exit = NewLIR0(kPseudoTargetLabel);
-  branch_out->target = loop_exit;
-}
-
-
-void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  // Add the table to the list - we'll process it later
-  SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable),  kArenaAllocData));
-  tab_rec->switch_mir = mir;
-  tab_rec->table = table;
-  tab_rec->vaddr = current_dalvik_offset_;
-  uint32_t size = table[1];
-  switch_tables_.push_back(tab_rec);
-
-  // Get the switch value
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegStorage table_base = AllocTempWide();
-  // Materialize a pointer to the switch table
-  NewLIR3(kA64Adr2xd, table_base.GetReg(), 0, WrapPointer(tab_rec));
-  int low_key = s4FromSwitchData(&table[2]);
-  RegStorage key_reg;
-  // Remove the bias, if necessary
-  if (low_key == 0) {
-    key_reg = rl_src.reg;
-  } else {
-    key_reg = AllocTemp();
-    OpRegRegImm(kOpSub, key_reg, rl_src.reg, low_key);
-  }
-  // Bounds check - if < 0 or >= size continue following switch
-  OpRegImm(kOpCmp, key_reg, size - 1);
-  LIR* branch_over = OpCondBranch(kCondHi, nullptr);
-
-  // Load the displacement from the switch table
-  RegStorage disp_reg = AllocTemp();
-  LoadBaseIndexed(table_base, As64BitReg(key_reg), disp_reg, 2, k32);
-
-  // Get base branch address.
-  RegStorage branch_reg = AllocTempWide();
-  LIR* switch_label = NewLIR3(kA64Adr2xd, branch_reg.GetReg(), 0, -1);
-  tab_rec->anchor = switch_label;
-
-  // Add displacement to base branch address and go!
-  OpRegRegRegExtend(kOpAdd, branch_reg, branch_reg, As64BitReg(disp_reg), kA64Sxtw, 0U);
-  NewLIR1(kA64Br1x, branch_reg.GetReg());
-
-  // branch_over target here
-  LIR* target = NewLIR0(kPseudoTargetLabel);
-  branch_over->target = target;
-}
-
-/*
- * Handle unlocked -> thin locked transition inline or else call out to quick entrypoint. For more
- * details see monitor.cc.
- */
-void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
-  // x0/w0 = object
-  // w1    = thin lock thread id
-  // x2    = address of lock word
-  // w3    = lock word / store failure
-  // TUNING: How much performance we get when we inline this?
-  // Since we've already flush all register.
-  FlushAllRegs();
-  LoadValueDirectFixed(rl_src, rs_x0);  // = TargetReg(kArg0, kRef)
-  LockCallTemps();  // Prepare for explicit register usage
-  LIR* null_check_branch = nullptr;
-  if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
-    null_check_branch = nullptr;  // No null check.
-  } else {
-    // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
-    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-      null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
-    }
-  }
-  Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
-  OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
-  NewLIR2(kA64Ldxr2rX, rw3, rx2);
-  MarkPossibleNullPointerException(opt_flags);
-  // Zero out the read barrier bits.
-  OpRegRegImm(kOpAnd, rs_w2, rs_w3, LockWord::kReadBarrierStateMaskShiftedToggled);
-  LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, nullptr);
-  // w3 is zero except for the rb bits here. Copy the read barrier bits into w1.
-  OpRegRegReg(kOpOr, rs_w1, rs_w1, rs_w3);
-  OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
-  NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
-  LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, nullptr);
-
-  LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
-  not_unlocked_branch->target = slow_path_target;
-  if (null_check_branch != nullptr) {
-    null_check_branch->target = slow_path_target;
-  }
-  // TODO: move to a slow path.
-  // Go expensive route - artLockObjectFromCode(obj);
-  LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pLockObject).Int32Value(), rs_xLR);
-  ClobberCallerSave();
-  LIR* call_inst = OpReg(kOpBlx, rs_xLR);
-  MarkSafepointPC(call_inst);
-
-  LIR* success_target = NewLIR0(kPseudoTargetLabel);
-  lock_success_branch->target = success_target;
-  GenMemBarrier(kLoadAny);
-}
-
-/*
- * Handle thin locked -> unlocked transition inline or else call out to quick entrypoint. For more
- * details see monitor.cc. Note the code below doesn't use ldxr/stxr as the code holds the lock
- * and can only give away ownership if its suspended.
- */
-void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
-  // x0/w0 = object
-  // w1    = thin lock thread id
-  // w2    = lock word
-  // TUNING: How much performance we get when we inline this?
-  // Since we've already flush all register.
-  FlushAllRegs();
-  LoadValueDirectFixed(rl_src, rs_x0);  // Get obj
-  LockCallTemps();  // Prepare for explicit register usage
-  LIR* null_check_branch = nullptr;
-  if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
-    null_check_branch = nullptr;  // No null check.
-  } else {
-    // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
-    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-      null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
-    }
-  }
-  Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
-  if (!kUseReadBarrier) {
-    Load32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2);
-  } else {
-    OpRegRegImm(kOpAdd, rs_x3, rs_x0, mirror::Object::MonitorOffset().Int32Value());
-    NewLIR2(kA64Ldxr2rX, rw2, rx3);
-  }
-  MarkPossibleNullPointerException(opt_flags);
-  // Zero out the read barrier bits.
-  OpRegRegImm(kOpAnd, rs_w3, rs_w2, LockWord::kReadBarrierStateMaskShiftedToggled);
-  // Zero out except the read barrier bits.
-  OpRegRegImm(kOpAnd, rs_w2, rs_w2, LockWord::kReadBarrierStateMaskShifted);
-  LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, nullptr);
-  GenMemBarrier(kAnyStore);
-  LIR* unlock_success_branch;
-  if (!kUseReadBarrier) {
-    Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2);
-    unlock_success_branch = OpUnconditionalBranch(nullptr);
-  } else {
-    OpRegRegImm(kOpAdd, rs_x3, rs_x0, mirror::Object::MonitorOffset().Int32Value());
-    NewLIR3(kA64Stxr3wrX, rw1, rw2, rx3);
-    unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, nullptr);
-  }
-  LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
-  slow_unlock_branch->target = slow_path_target;
-  if (null_check_branch != nullptr) {
-    null_check_branch->target = slow_path_target;
-  }
-  // TODO: move to a slow path.
-  // Go expensive route - artUnlockObjectFromCode(obj);
-  LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject).Int32Value(), rs_xLR);
-  ClobberCallerSave();
-  LIR* call_inst = OpReg(kOpBlx, rs_xLR);
-  MarkSafepointPC(call_inst);
-
-  LIR* success_target = NewLIR0(kPseudoTargetLabel);
-  unlock_success_branch->target = success_target;
-}
-
-void Arm64Mir2Lir::GenMoveException(RegLocation rl_dest) {
-  int ex_offset = Thread::ExceptionOffset<8>().Int32Value();
-  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
-  LoadRefDisp(rs_xSELF, ex_offset, rl_result.reg, kNotVolatile);
-  StoreRefDisp(rs_xSELF, ex_offset, rs_xzr, kNotVolatile);
-  StoreValue(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
-  RegStorage reg_card_base = AllocTempWide();
-  RegStorage reg_card_no = AllocTempWide();  // Needs to be wide as addr is ref=64b
-  LoadWordDisp(rs_xSELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
-  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
-  // TODO(Arm64): generate "strb wB, [xB, wC, uxtw]" rather than "strb wB, [xB, xC]"?
-  StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base),
-                   0, kUnsignedByte);
-  FreeTemp(reg_card_base);
-  FreeTemp(reg_card_no);
-}
-
-static dwarf::Reg DwarfCoreReg(int num) {
-  return dwarf::Reg::Arm64Core(num);
-}
-
-void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
-  DCHECK_EQ(cfi_.GetCurrentCFAOffset(), 0);  // empty stack.
-
-  /*
-   * On entry, x0 to x7 are live.  Let the register allocation
-   * mechanism know so it doesn't try to use any of them when
-   * expanding the frame or flushing.
-   * Reserve x8 & x9 for temporaries.
-   */
-  LockTemp(rs_x0);
-  LockTemp(rs_x1);
-  LockTemp(rs_x2);
-  LockTemp(rs_x3);
-  LockTemp(rs_x4);
-  LockTemp(rs_x5);
-  LockTemp(rs_x6);
-  LockTemp(rs_x7);
-  LockTemp(rs_xIP0);
-  LockTemp(rs_xIP1);
-
-  /* TUNING:
-   * Use AllocTemp() and reuse LR if possible to give us the freedom on adjusting the number
-   * of temp registers.
-   */
-
-  /*
-   * We can safely skip the stack overflow check if we're
-   * a leaf *and* our frame size < fudge factor.
-   */
-  bool skip_overflow_check = mir_graph_->MethodIsLeaf() &&
-    !FrameNeedsStackCheck(frame_size_, kArm64);
-
-  const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm64);
-  const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
-  bool generate_explicit_stack_overflow_check = large_frame ||
-    !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks();
-  const int spill_count = num_core_spills_ + num_fp_spills_;
-  const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf;  // SP 16 byte alignment.
-  const int frame_size_without_spills = frame_size_ - spill_size;
-
-  if (!skip_overflow_check) {
-    if (generate_explicit_stack_overflow_check) {
-      // Load stack limit
-      LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
-    } else {
-      // Implicit stack overflow check.
-      // Generate a load from [sp, #-framesize].  If this is in the stack
-      // redzone we will get a segmentation fault.
-
-      // TODO: If the frame size is small enough, is it possible to make this a pre-indexed load,
-      //       so that we can avoid the following "sub sp" when spilling?
-      OpRegRegImm(kOpSub, rs_x8, rs_sp, GetStackOverflowReservedBytes(kArm64));
-      Load32Disp(rs_x8, 0, rs_wzr);
-      MarkPossibleStackOverflowException();
-    }
-  }
-
-  int spilled_already = 0;
-  if (spill_size > 0) {
-    spilled_already = SpillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
-    DCHECK(spill_size == spilled_already || frame_size_ == spilled_already);
-  }
-
-  if (spilled_already != frame_size_) {
-    OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
-    cfi_.AdjustCFAOffset(frame_size_without_spills);
-  }
-
-  if (!skip_overflow_check) {
-    if (generate_explicit_stack_overflow_check) {
-      class StackOverflowSlowPath: public LIRSlowPath {
-      public:
-        StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
-            : LIRSlowPath(m2l, branch),
-              sp_displace_(sp_displace) {
-        }
-        void Compile() OVERRIDE {
-          m2l_->ResetRegPool();
-          m2l_->ResetDefTracking();
-          GenerateTargetLabel(kPseudoThrowTarget);
-          // Unwinds stack.
-          m2l_->OpRegImm(kOpAdd, rs_sp, sp_displace_);
-          m2l_->cfi().AdjustCFAOffset(-sp_displace_);
-          m2l_->ClobberCallerSave();
-          ThreadOffset<8> func_offset = QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow);
-          m2l_->LockTemp(rs_xIP0);
-          m2l_->LoadWordDisp(rs_xSELF, func_offset.Int32Value(), rs_xIP0);
-          m2l_->NewLIR1(kA64Br1x, rs_xIP0.GetReg());
-          m2l_->FreeTemp(rs_xIP0);
-          m2l_->cfi().AdjustCFAOffset(sp_displace_);
-        }
-
-      private:
-        const size_t sp_displace_;
-      };
-
-      LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr);
-      AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
-    }
-  }
-
-  FlushIns(ArgLocs, rl_method);
-
-  FreeTemp(rs_x0);
-  FreeTemp(rs_x1);
-  FreeTemp(rs_x2);
-  FreeTemp(rs_x3);
-  FreeTemp(rs_x4);
-  FreeTemp(rs_x5);
-  FreeTemp(rs_x6);
-  FreeTemp(rs_x7);
-  FreeTemp(rs_xIP0);
-  FreeTemp(rs_xIP1);
-}
-
-void Arm64Mir2Lir::GenExitSequence() {
-  cfi_.RememberState();
-  /*
-   * In the exit path, r0/r1 are live - make sure they aren't
-   * allocated by the register utilities as temps.
-   */
-  LockTemp(rs_x0);
-  LockTemp(rs_x1);
-  UnspillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
-
-  // Finally return.
-  NewLIR0(kA64Ret);
-  // The CFI should be restored for any code that follows the exit block.
-  cfi_.RestoreState();
-  cfi_.DefCFAOffset(frame_size_);
-}
-
-void Arm64Mir2Lir::GenSpecialExitSequence() {
-  NewLIR0(kA64Ret);
-}
-
-void Arm64Mir2Lir::GenSpecialEntryForSuspend() {
-  // Keep 16-byte stack alignment - push x0, i.e. ArtMethod*, lr.
-  core_spill_mask_ = (1u << rs_xLR.GetRegNum());
-  num_core_spills_ = 1u;
-  fp_spill_mask_ = 0u;
-  num_fp_spills_ = 0u;
-  frame_size_ = 16u;
-  core_vmap_table_.clear();
-  fp_vmap_table_.clear();
-  NewLIR4(WIDE(kA64StpPre4rrXD), rs_x0.GetReg(), rs_xLR.GetReg(), rs_sp.GetReg(), -frame_size_ / 8);
-  cfi_.AdjustCFAOffset(frame_size_);
-  // Do not generate CFI for scratch register x0.
-  cfi_.RelOffset(DwarfCoreReg(rxLR), 8);
-}
-
-void Arm64Mir2Lir::GenSpecialExitForSuspend() {
-  // Pop the frame. (ArtMethod* no longer needed but restore it anyway.)
-  NewLIR4(WIDE(kA64LdpPost4rrXD), rs_x0.GetReg(), rs_xLR.GetReg(), rs_sp.GetReg(), frame_size_ / 8);
-  cfi_.AdjustCFAOffset(-frame_size_);
-  cfi_.Restore(DwarfCoreReg(rxLR));
-}
-
-static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& target_method) {
-  // Emit relative calls anywhere in the image or within a dex file otherwise.
-  return cu->compiler_driver->IsBootImage() || cu->dex_file == target_method.dex_file;
-}
-
-/*
- * Bit of a hack here - in the absence of a real scheduling pass,
- * emit the next instruction in static & direct invoke sequences.
- */
-int Arm64Mir2Lir::Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
-                                      int state, const MethodReference& target_method,
-                                      uint32_t unused_idx ATTRIBUTE_UNUSED,
-                                      uintptr_t direct_code, uintptr_t direct_method,
-                                      InvokeType type) {
-  Arm64Mir2Lir* cg = static_cast<Arm64Mir2Lir*>(cu->cg.get());
-  if (info->string_init_offset != 0) {
-    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
-    switch (state) {
-    case 0: {  // Grab target method* from thread pointer
-      cg->LoadWordDisp(rs_xSELF, info->string_init_offset, arg0_ref);
-      break;
-    }
-    case 1:  // Grab the code from the method*
-      if (direct_code == 0) {
-        // kInvokeTgt := arg0_ref->entrypoint
-        cg->LoadWordDisp(arg0_ref,
-                         ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-                             kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
-      }
-      break;
-    default:
-      return -1;
-    }
-  } else if (direct_code != 0 && direct_method != 0) {
-    switch (state) {
-    case 0:  // Get the current Method* [sets kArg0]
-      if (direct_code != static_cast<uintptr_t>(-1)) {
-        cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
-      } else if (Arm64UseRelativeCall(cu, target_method)) {
-        // Defer to linker patch.
-      } else {
-        cg->LoadCodeAddress(target_method, type, kInvokeTgt);
-      }
-      if (direct_method != static_cast<uintptr_t>(-1)) {
-        cg->LoadConstantWide(cg->TargetReg(kArg0, kRef), direct_method);
-      } else {
-        cg->LoadMethodAddress(target_method, type, kArg0);
-      }
-      break;
-    default:
-      return -1;
-    }
-  } else {
-    bool use_pc_rel = cg->CanUseOpPcRelDexCacheArrayLoad();
-    RegStorage arg0_ref = cg->TargetPtrReg(kArg0);
-    switch (state) {
-    case 0:  // Get the current Method* [sets kArg0]
-      // TUNING: we can save a reg copy if Method* has been promoted.
-      if (!use_pc_rel) {
-        cg->LoadCurrMethodDirect(arg0_ref);
-        break;
-      }
-      ++state;
-      FALLTHROUGH_INTENDED;
-    case 1:  // Get method->dex_cache_resolved_methods_
-      if (!use_pc_rel) {
-        cg->LoadBaseDisp(arg0_ref,
-                         ArtMethod::DexCacheResolvedMethodsOffset(kArm64PointerSize).Int32Value(),
-                         arg0_ref,
-                         k64,
-                         kNotVolatile);
-      }
-      // Set up direct code if known.
-      if (direct_code != 0) {
-        if (direct_code != static_cast<uintptr_t>(-1)) {
-          cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
-        } else if (Arm64UseRelativeCall(cu, target_method)) {
-          // Defer to linker patch.
-        } else {
-          CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
-          cg->LoadCodeAddress(target_method, type, kInvokeTgt);
-        }
-      }
-      if (!use_pc_rel || direct_code != 0) {
-        break;
-      }
-      ++state;
-      FALLTHROUGH_INTENDED;
-    case 2:  // Grab target method*
-      CHECK_EQ(cu->dex_file, target_method.dex_file);
-      if (!use_pc_rel) {
-        cg->LoadWordDisp(arg0_ref,
-                         cg->GetCachePointerOffset(target_method.dex_method_index,
-                                                   kArm64PointerSize),
-                         arg0_ref);
-      } else {
-        size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index);
-        cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref, true);
-      }
-      break;
-    case 3:  // Grab the code from the method*
-      if (direct_code == 0) {
-        // kInvokeTgt := arg0_ref->entrypoint
-        cg->LoadWordDisp(arg0_ref,
-                         ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-                             kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
-      }
-      break;
-    default:
-      return -1;
-    }
-  }
-  return state + 1;
-}
-
-NextCallInsn Arm64Mir2Lir::GetNextSDCallInsn() {
-  return Arm64NextSDCallInsn;
-}
-
-LIR* Arm64Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
-  // For ARM64, just generate a relative BL instruction that will be filled in at 'link time'.
-  // If the target turns out to be too far, the linker will generate a thunk for dispatch.
-  int target_method_idx = target_method.dex_method_index;
-  const DexFile* target_dex_file = target_method.dex_file;
-
-  // Generate the call instruction and save index, dex_file, and type.
-  // NOTE: Method deduplication takes linker patches into account, so we can just pass 0
-  // as a placeholder for the offset.
-  LIR* call = RawLIR(current_dalvik_offset_, kA64Bl1t, 0,
-                     target_method_idx, WrapPointer(target_dex_file), type);
-  AppendLIR(call);
-  call_method_insns_.push_back(call);
-  return call;
-}
-
-LIR* Arm64Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
-  LIR* call_insn;
-  if (method_info.FastPath() && Arm64UseRelativeCall(cu_, method_info.GetTargetMethod()) &&
-      (method_info.GetSharpType() == kDirect || method_info.GetSharpType() == kStatic) &&
-      method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
-    call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType());
-  } else {
-    call_insn = OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
-  }
-  return call_insn;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
deleted file mode 100644
index ca2e012..0000000
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_ARM64_CODEGEN_ARM64_H_
-#define ART_COMPILER_DEX_QUICK_ARM64_CODEGEN_ARM64_H_
-
-#include "arm64_lir.h"
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir.h"
-
-#include <map>
-
-namespace art {
-
-class Arm64Mir2Lir FINAL : public Mir2Lir {
- protected:
-  class InToRegStorageArm64Mapper : public InToRegStorageMapper {
-   public:
-    InToRegStorageArm64Mapper() : cur_core_reg_(0), cur_fp_reg_(0) {}
-    virtual ~InToRegStorageArm64Mapper() {}
-    virtual RegStorage GetNextReg(ShortyArg arg);
-    virtual void Reset() OVERRIDE {
-      cur_core_reg_ = 0;
-      cur_fp_reg_ = 0;
-    }
-   private:
-    size_t cur_core_reg_;
-    size_t cur_fp_reg_;
-  };
-
-  InToRegStorageArm64Mapper in_to_reg_storage_arm64_mapper_;
-  InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
-    in_to_reg_storage_arm64_mapper_.Reset();
-    return &in_to_reg_storage_arm64_mapper_;
-  }
-
- public:
-  Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
-
-  // Required for target - codegen helpers.
-  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                          RegLocation rl_dest, int lit) OVERRIDE;
-  bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
-                        RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-  bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
-                          RegLocation rl_src, RegLocation rl_dest, int64_t lit);
-  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-  void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
-                                  int32_t constant) OVERRIDE;
-  void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
-                                   int64_t constant) OVERRIDE;
-  LIR* CheckSuspendUsingLoad() OVERRIDE;
-  RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
-  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                    OpSize size, VolatileKind is_volatile) OVERRIDE;
-  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
-                       OpSize size) OVERRIDE;
-  LIR* LoadConstantNoClobber(RegStorage r_dest, int value) OVERRIDE;
-  LIR* LoadConstantWide(RegStorage r_dest, int64_t value) OVERRIDE;
-  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
-                     VolatileKind is_volatile) OVERRIDE;
-  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
-                        OpSize size) OVERRIDE;
-
-  /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
-  void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
-
-  bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE;
-  void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, bool wide)
-      OVERRIDE;
-
-  LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
-                         int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
-
-  // Required for target - register utilities.
-  RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
-  RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
-    if (wide_kind == kWide || wide_kind == kRef) {
-      return As64BitReg(TargetReg(symbolic_reg));
-    } else {
-      return Check32BitReg(TargetReg(symbolic_reg));
-    }
-  }
-  RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
-    return As64BitReg(TargetReg(symbolic_reg));
-  }
-  RegLocation GetReturnAlt() OVERRIDE;
-  RegLocation GetReturnWideAlt() OVERRIDE;
-  RegLocation LocCReturn() OVERRIDE;
-  RegLocation LocCReturnRef() OVERRIDE;
-  RegLocation LocCReturnDouble() OVERRIDE;
-  RegLocation LocCReturnFloat() OVERRIDE;
-  RegLocation LocCReturnWide() OVERRIDE;
-  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
-  void AdjustSpillMask() OVERRIDE;
-  void ClobberCallerSave() OVERRIDE;
-  void FreeCallTemps() OVERRIDE;
-  void LockCallTemps() OVERRIDE;
-  void CompilerInitializeRegAlloc() OVERRIDE;
-
-  // Required for target - miscellaneous.
-  void AssembleLIR() OVERRIDE;
-  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
-  void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
-  const char* GetTargetInstFmt(int opcode) OVERRIDE;
-  const char* GetTargetInstName(int opcode) OVERRIDE;
-  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
-  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
-  uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
-  size_t GetInsnSize(LIR* lir) OVERRIDE;
-  bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
-
-  // Get the register class for load/store of a field.
-  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
-
-  // Required for target - Dalvik-level generators.
-  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                      RegLocation lr_shift) OVERRIDE;
-  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_src2, int flags) OVERRIDE;
-  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
-                   RegLocation rl_dest, int scale) OVERRIDE;
-  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
-                   RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
-  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_shift, int flags) OVERRIDE;
-  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                        RegLocation rl_src2) OVERRIDE;
-  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2) OVERRIDE;
-  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                RegLocation rl_src2) OVERRIDE;
-  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
-  bool GenInlinedReverseBits(CallInfo* info, OpSize size) OVERRIDE;
-  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
-  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
-  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
-  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
-  bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
-  bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
-  bool GenInlinedCeil(CallInfo* info) OVERRIDE;
-  bool GenInlinedFloor(CallInfo* info) OVERRIDE;
-  bool GenInlinedRint(CallInfo* info) OVERRIDE;
-  bool GenInlinedRound(CallInfo* info, bool is_double) OVERRIDE;
-  bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
-  bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
-  bool GenInlinedAbsInt(CallInfo* info) OVERRIDE;
-  bool GenInlinedAbsLong(CallInfo* info) OVERRIDE;
-  bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
-  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
-  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                      RegLocation rl_src2, int flags) OVERRIDE;
-  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
-      OVERRIDE;
-  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div)
-      OVERRIDE;
-  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)  OVERRIDE;
-  void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
-  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
-  void GenExitSequence() OVERRIDE;
-  void GenSpecialExitSequence() OVERRIDE;
-  void GenSpecialEntryForSuspend() OVERRIDE;
-  void GenSpecialExitForSuspend() OVERRIDE;
-  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
-  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
-  void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
-  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                        RegisterClass dest_reg_class) OVERRIDE;
-
-  bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
-  void GenMonitorEnter(int opt_flags, RegLocation rl_src) OVERRIDE;
-  void GenMonitorExit(int opt_flags, RegLocation rl_src) OVERRIDE;
-  void GenMoveException(RegLocation rl_dest) OVERRIDE;
-  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                     int first_bit, int second_bit) OVERRIDE;
-  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
-  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
-  void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
-  void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
-  void GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                      RegLocation rl_src3, bool is_sub);
-  void GenMaddMsubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                       RegLocation rl_src3, bool is_sub);
-
-  // Required for target - single operation generators.
-  LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
-  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
-  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
-  LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
-  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
-  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
-  LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
-  void OpEndIT(LIR* it) OVERRIDE;
-  LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
-  void OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
-  LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
-  void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
-  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
-  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
-  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
-  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
-  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
-  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
-  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
-  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
-  LIR* OpTestSuspend(LIR* target) OVERRIDE;
-  LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
-  LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
-  void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
-
-  bool InexpensiveConstantInt(int32_t value) OVERRIDE;
-  bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) OVERRIDE;
-  bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
-  bool InexpensiveConstantLong(int64_t value) OVERRIDE;
-  bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
-
-  void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) OVERRIDE;
-
-  bool WideGPRsAreAliases() const OVERRIDE {
-    return true;  // 64b architecture.
-  }
-  bool WideFPRsAreAliases() const OVERRIDE {
-    return true;  // 64b architecture.
-  }
-
-  size_t GetInstructionOffset(LIR* lir) OVERRIDE;
-
-  NextCallInsn GetNextSDCallInsn() OVERRIDE;
-
-  /*
-   * @brief Generate a relative call to the method that will be patched at link time.
-   * @param target_method The MethodReference of the method to be invoked.
-   * @param type How the method will be invoked.
-   * @returns Call instruction
-   */
-  LIR* CallWithLinkerFixup(const MethodReference& target_method, InvokeType type);
-
-  /*
-   * @brief Generate the actual call insn based on the method info.
-   * @param method_info the lowering info for the method call.
-   * @returns Call instruction
-   */
-  virtual LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
-
-  /*
-   * @brief Handle ARM specific literals.
-   */
-  void InstallLiteralPools() OVERRIDE;
-
-  LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
-
- private:
-  /**
-   * @brief Given register xNN (dNN), returns register wNN (sNN).
-   * @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2).
-   * @return A Solo32 with the same register number as the @p reg (e.g. @c w1 or @c s2).
-   * @see As64BitReg
-   */
-  RegStorage As32BitReg(RegStorage reg) {
-    DCHECK(!reg.IsPair());
-    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
-      if (kFailOnSizeError) {
-        LOG(FATAL) << "Expected 64b register";
-      } else {
-        LOG(WARNING) << "Expected 64b register";
-        return reg;
-      }
-    }
-    RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
-                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
-    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
-              ->GetReg().GetReg(),
-              ret_val.GetReg());
-    return ret_val;
-  }
-
-  RegStorage Check32BitReg(RegStorage reg) {
-    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
-      if (kFailOnSizeError) {
-        LOG(FATAL) << "Checked for 32b register";
-      } else {
-        LOG(WARNING) << "Checked for 32b register";
-        return As32BitReg(reg);
-      }
-    }
-    return reg;
-  }
-
-  /**
-   * @brief Given register wNN (sNN), returns register xNN (dNN).
-   * @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
-   * @return A Solo64 with the same register number as the @p reg (e.g. @c x1 or @c d2).
-   * @see As32BitReg
-   */
-  RegStorage As64BitReg(RegStorage reg) {
-    DCHECK(!reg.IsPair());
-    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
-      if (kFailOnSizeError) {
-        LOG(FATAL) << "Expected 32b register";
-      } else {
-        LOG(WARNING) << "Expected 32b register";
-        return reg;
-      }
-    }
-    RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
-                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
-    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
-              ->GetReg().GetReg(),
-              ret_val.GetReg());
-    return ret_val;
-  }
-
-  RegStorage Check64BitReg(RegStorage reg) {
-    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
-      if (kFailOnSizeError) {
-        LOG(FATAL) << "Checked for 64b register";
-      } else {
-        LOG(WARNING) << "Checked for 64b register";
-        return As64BitReg(reg);
-      }
-    }
-    return reg;
-  }
-
-  int32_t EncodeImmSingle(uint32_t bits);
-  int32_t EncodeImmDouble(uint64_t bits);
-  LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
-  LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
-  void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
-  void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
-  void AssignDataOffsets();
-  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                        bool is_div, int flags) OVERRIDE;
-  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) OVERRIDE;
-  size_t GetLoadStoreSize(LIR* lir);
-
-  bool SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                            RegLocation rl_dest, int64_t lit);
-
-  uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
-  int AssignInsnOffsets();
-  void AssignOffsets();
-  uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
-
-  // Spill core and FP registers. Returns the SP difference: either spill size, or whole
-  // frame size.
-  int SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
-
-  // Unspill core and FP registers.
-  void UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
-
-  void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-
-  LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
-  LIR* OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value);
-
-  LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
-  LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
-                        int shift);
-  int EncodeShift(int code, int amount);
-
-  LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
-                      A64RegExtEncodings ext, uint8_t amount);
-  LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
-                         A64RegExtEncodings ext, uint8_t amount);
-  int EncodeExtend(int extend_type, int amount);
-  bool IsExtendEncoding(int encoded_value);
-
-  LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
-  LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
-
-  int EncodeLogicalImmediate(bool is_wide, uint64_t value);
-  uint64_t DecodeLogicalImmediate(bool is_wide, int value);
-  ArmConditionCode ArmConditionEncoding(ConditionCode code);
-
-  // Helper used in the two GenSelect variants.
-  void GenSelect(int32_t left, int32_t right, ConditionCode code, RegStorage rs_dest,
-                 int result_reg_class);
-
-  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-  void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                     RegLocation rl_src2, bool is_div, int flags);
-
-  static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
-                                 int state, const MethodReference& target_method,
-                                 uint32_t unused_idx,
-                                 uintptr_t direct_code, uintptr_t direct_method,
-                                 InvokeType type);
-
-  static const A64EncodingMap EncodingMap[kA64Last];
-
-  ArenaVector<LIR*> call_method_insns_;
-  ArenaVector<LIR*> dex_cache_access_insns_;
-
-  int GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) OVERRIDE;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_ARM64_CODEGEN_ARM64_H_
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
deleted file mode 100644
index 0130ef4..0000000
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_arm64.h"
-
-#include "arm64_lir.h"
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-
-namespace art {
-
-void Arm64Mir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
-                                   RegLocation rl_src1, RegLocation rl_src2) {
-  int op = kA64Brk1d;
-  RegLocation rl_result;
-
-  switch (opcode) {
-    case Instruction::ADD_FLOAT_2ADDR:
-    case Instruction::ADD_FLOAT:
-      op = kA64Fadd3fff;
-      break;
-    case Instruction::SUB_FLOAT_2ADDR:
-    case Instruction::SUB_FLOAT:
-      op = kA64Fsub3fff;
-      break;
-    case Instruction::DIV_FLOAT_2ADDR:
-    case Instruction::DIV_FLOAT:
-      op = kA64Fdiv3fff;
-      break;
-    case Instruction::MUL_FLOAT_2ADDR:
-    case Instruction::MUL_FLOAT:
-      op = kA64Fmul3fff;
-      break;
-    case Instruction::REM_FLOAT_2ADDR:
-    case Instruction::REM_FLOAT:
-      FlushAllRegs();   // Send everything to home location
-      CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
-      rl_result = GetReturn(kFPReg);
-      StoreValue(rl_dest, rl_result);
-      return;
-    case Instruction::NEG_FLOAT:
-      GenNegFloat(rl_dest, rl_src1);
-      return;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-  rl_src1 = LoadValue(rl_src1, kFPReg);
-  rl_src2 = LoadValue(rl_src2, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  StoreValue(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
-                                    RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
-  int op = kA64Brk1d;
-  RegLocation rl_result;
-
-  switch (opcode) {
-    case Instruction::ADD_DOUBLE_2ADDR:
-    case Instruction::ADD_DOUBLE:
-      op = kA64Fadd3fff;
-      break;
-    case Instruction::SUB_DOUBLE_2ADDR:
-    case Instruction::SUB_DOUBLE:
-      op = kA64Fsub3fff;
-      break;
-    case Instruction::DIV_DOUBLE_2ADDR:
-    case Instruction::DIV_DOUBLE:
-      op = kA64Fdiv3fff;
-      break;
-    case Instruction::MUL_DOUBLE_2ADDR:
-    case Instruction::MUL_DOUBLE:
-      op = kA64Fmul3fff;
-      break;
-    case Instruction::REM_DOUBLE_2ADDR:
-    case Instruction::REM_DOUBLE:
-      FlushAllRegs();   // Send everything to home location
-      {
-        RegStorage r_tgt = CallHelperSetup(kQuickFmod);
-        LoadValueDirectWideFixed(rl_src1, rs_d0);
-        LoadValueDirectWideFixed(rl_src2, rs_d1);
-        ClobberCallerSave();
-        CallHelper(r_tgt, kQuickFmod, false);
-      }
-      rl_result = GetReturnWide(kFPReg);
-      StoreValueWide(rl_dest, rl_result);
-      return;
-    case Instruction::NEG_DOUBLE:
-      GenNegDouble(rl_dest, rl_src1);
-      return;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-
-  rl_src1 = LoadValueWide(rl_src1, kFPReg);
-  DCHECK(rl_src1.wide);
-  rl_src2 = LoadValueWide(rl_src2, kFPReg);
-  DCHECK(rl_src2.wide);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  DCHECK(rl_dest.wide);
-  DCHECK(rl_result.wide);
-  NewLIR3(WIDE(op), rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
-                                              int32_t constant) {
-  RegLocation rl_result;
-  RegStorage r_tmp = AllocTempSingle();
-  LoadConstantNoClobber(r_tmp, constant);
-  rl_src1 = LoadValue(rl_src1, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR3(kA64Fmul3fff, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
-  StoreValue(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
-                                               int64_t constant) {
-  RegLocation rl_result;
-  RegStorage r_tmp = AllocTempDouble();
-  DCHECK(r_tmp.IsDouble());
-  LoadConstantWide(r_tmp, constant);
-  rl_src1 = LoadValueWide(rl_src1, kFPReg);
-  DCHECK(rl_src1.wide);
-  rl_result = EvalLocWide(rl_dest, kFPReg, true);
-  DCHECK(rl_dest.wide);
-  DCHECK(rl_result.wide);
-  NewLIR3(WIDE(kA64Fmul3fff), rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenConversion(Instruction::Code opcode,
-                                 RegLocation rl_dest, RegLocation rl_src) {
-  int op = kA64Brk1d;
-  RegLocation rl_result;
-  RegisterClass src_reg_class = kInvalidRegClass;
-  RegisterClass dst_reg_class = kInvalidRegClass;
-
-  switch (opcode) {
-    case Instruction::INT_TO_FLOAT:
-      op = kA64Scvtf2fw;
-      src_reg_class = kCoreReg;
-      dst_reg_class = kFPReg;
-      break;
-    case Instruction::FLOAT_TO_INT:
-      op = kA64Fcvtzs2wf;
-      src_reg_class = kFPReg;
-      dst_reg_class = kCoreReg;
-      break;
-    case Instruction::DOUBLE_TO_FLOAT:
-      op = kA64Fcvt2sS;
-      src_reg_class = kFPReg;
-      dst_reg_class = kFPReg;
-      break;
-    case Instruction::FLOAT_TO_DOUBLE:
-      op = kA64Fcvt2Ss;
-      src_reg_class = kFPReg;
-      dst_reg_class = kFPReg;
-      break;
-    case Instruction::INT_TO_DOUBLE:
-      op = WIDE(kA64Scvtf2fw);
-      src_reg_class = kCoreReg;
-      dst_reg_class = kFPReg;
-      break;
-    case Instruction::DOUBLE_TO_INT:
-      op = WIDE(kA64Fcvtzs2wf);
-      src_reg_class = kFPReg;
-      dst_reg_class = kCoreReg;
-      break;
-    case Instruction::LONG_TO_DOUBLE:
-      op = WIDE(kA64Scvtf2fx);
-      src_reg_class = kCoreReg;
-      dst_reg_class = kFPReg;
-      break;
-    case Instruction::FLOAT_TO_LONG:
-      op = kA64Fcvtzs2xf;
-      src_reg_class = kFPReg;
-      dst_reg_class = kCoreReg;
-      break;
-    case Instruction::LONG_TO_FLOAT:
-      op = kA64Scvtf2fx;
-      src_reg_class = kCoreReg;
-      dst_reg_class = kFPReg;
-      break;
-    case Instruction::DOUBLE_TO_LONG:
-      op = WIDE(kA64Fcvtzs2xf);
-      src_reg_class = kFPReg;
-      dst_reg_class = kCoreReg;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-
-  DCHECK_NE(src_reg_class, kInvalidRegClass);
-  DCHECK_NE(dst_reg_class, kInvalidRegClass);
-  DCHECK_NE(op, kA64Brk1d);
-
-  if (rl_src.wide) {
-    rl_src = LoadValueWide(rl_src, src_reg_class);
-  } else {
-    rl_src = LoadValue(rl_src, src_reg_class);
-  }
-
-  rl_result = EvalLoc(rl_dest, dst_reg_class, true);
-  NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-
-  if (rl_dest.wide) {
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-void Arm64Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
-                                     bool is_double) {
-  LIR* target = &block_label_list_[bb->taken];
-  RegLocation rl_src1;
-  RegLocation rl_src2;
-  if (is_double) {
-    rl_src1 = mir_graph_->GetSrcWide(mir, 0);
-    rl_src2 = mir_graph_->GetSrcWide(mir, 2);
-    rl_src1 = LoadValueWide(rl_src1, kFPReg);
-    rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    NewLIR2(WIDE(kA64Fcmp2ff), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  } else {
-    rl_src1 = mir_graph_->GetSrc(mir, 0);
-    rl_src2 = mir_graph_->GetSrc(mir, 1);
-    rl_src1 = LoadValue(rl_src1, kFPReg);
-    rl_src2 = LoadValue(rl_src2, kFPReg);
-    NewLIR2(kA64Fcmp2ff, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  }
-  ConditionCode ccode = mir->meta.ccode;
-  switch (ccode) {
-    case kCondEq:
-    case kCondNe:
-      break;
-    case kCondLt:
-      if (gt_bias) {
-        ccode = kCondMi;
-      }
-      break;
-    case kCondLe:
-      if (gt_bias) {
-        ccode = kCondLs;
-      }
-      break;
-    case kCondGt:
-      if (gt_bias) {
-        ccode = kCondHi;
-      }
-      break;
-    case kCondGe:
-      if (gt_bias) {
-        ccode = kCondUge;
-      }
-      break;
-    default:
-      LOG(FATAL) << "Unexpected ccode: " << ccode;
-  }
-  OpCondBranch(ccode, target);
-}
-
-
-void Arm64Mir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  bool is_double = false;
-  int default_result = -1;
-  RegLocation rl_result;
-
-  switch (opcode) {
-    case Instruction::CMPL_FLOAT:
-      is_double = false;
-      default_result = -1;
-      break;
-    case Instruction::CMPG_FLOAT:
-      is_double = false;
-      default_result = 1;
-      break;
-    case Instruction::CMPL_DOUBLE:
-      is_double = true;
-      default_result = -1;
-      break;
-    case Instruction::CMPG_DOUBLE:
-      is_double = true;
-      default_result = 1;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-  if (is_double) {
-    rl_src1 = LoadValueWide(rl_src1, kFPReg);
-    rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    // In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
-    ClobberSReg(rl_dest.s_reg_low);
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    LoadConstant(rl_result.reg, default_result);
-    NewLIR2(WIDE(kA64Fcmp2ff), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  } else {
-    rl_src1 = LoadValue(rl_src1, kFPReg);
-    rl_src2 = LoadValue(rl_src2, kFPReg);
-    // In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
-    ClobberSReg(rl_dest.s_reg_low);
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    LoadConstant(rl_result.reg, default_result);
-    NewLIR2(kA64Fcmp2ff, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  }
-  DCHECK(!rl_result.reg.IsFloat());
-
-  // TODO(Arm64): should we rather do this?
-  // csinc wD, wzr, wzr, eq
-  // csneg wD, wD, wD, le
-  // (which requires 2 instructions rather than 3)
-
-  // Rd = if cond then Rd else -Rd.
-  NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
-          rl_result.reg.GetReg(), (default_result == 1) ? kArmCondPl : kArmCondLe);
-  NewLIR4(kA64Csel4rrrc, rl_result.reg.GetReg(), rwzr, rl_result.reg.GetReg(),
-          kArmCondEq);
-  StoreValue(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-  rl_src = LoadValue(rl_src, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(kA64Fneg2ff, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  StoreValue(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-  rl_src = LoadValueWide(rl_src, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(WIDE(kA64Fneg2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-static RegisterClass RegClassForAbsFP(RegLocation rl_src, RegLocation rl_dest) {
-  // If src is in a core reg or, unlikely, dest has been promoted to a core reg, use core reg.
-  if ((rl_src.location == kLocPhysReg && !rl_src.reg.IsFloat()) ||
-      (rl_dest.location == kLocPhysReg && !rl_dest.reg.IsFloat())) {
-    return kCoreReg;
-  }
-  // If src is in an fp reg or dest has been promoted to an fp reg, use fp reg.
-  if (rl_src.location == kLocPhysReg || rl_dest.location == kLocPhysReg) {
-    return kFPReg;
-  }
-  // With both src and dest in the stack frame we have to perform load+abs+store. Whether this
-  // is faster using a core reg or fp reg depends on the particular CPU. For example, on A53
-  // it's faster using core reg while on A57 it's faster with fp reg, the difference being
-  // bigger on the A53. Without further investigation and testing we prefer core register.
-  // (If the result is subsequently used in another fp operation, the dalvik reg will probably
-  // get promoted and that should be handled by the cases above.)
-  return kCoreReg;
-}
-
-bool Arm64Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
-  if (info->result.location == kLocInvalid) {
-    return true;  // Result is unused: inlining successful, no code generated.
-  }
-  RegLocation rl_dest = info->result;
-  RegLocation rl_src = UpdateLoc(info->args[0]);
-  RegisterClass reg_class = RegClassForAbsFP(rl_src, rl_dest);
-  rl_src = LoadValue(rl_src, reg_class);
-  RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
-  if (reg_class == kFPReg) {
-    NewLIR2(kA64Fabs2ff, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  } else {
-    // Clear the sign bit in an integer register.
-    OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
-  }
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
-  if (info->result.location == kLocInvalid) {
-    return true;  // Result is unused: inlining successful, no code generated.
-  }
-  RegLocation rl_dest = info->result;
-  RegLocation rl_src = UpdateLocWide(info->args[0]);
-  RegisterClass reg_class = RegClassForAbsFP(rl_src, rl_dest);
-  rl_src = LoadValueWide(rl_src, reg_class);
-  RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
-  if (reg_class == kFPReg) {
-    NewLIR2(WIDE(kA64Fabs2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  } else {
-    // Clear the sign bit in an integer register.
-    OpRegRegImm64(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffffffffffff);
-  }
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedSqrt(CallInfo* info) {
-  RegLocation rl_src = info->args[0];
-  RegLocation rl_dest = InlineTargetWide(info);  // double place for result
-  rl_src = LoadValueWide(rl_src, kFPReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(WIDE(kA64Fsqrt2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedCeil(CallInfo* info) {
-  RegLocation rl_src = info->args[0];
-  RegLocation rl_dest = InlineTargetWide(info);
-  rl_src = LoadValueWide(rl_src, kFPReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(WIDE(kA64Frintp2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedFloor(CallInfo* info) {
-  RegLocation rl_src = info->args[0];
-  RegLocation rl_dest = InlineTargetWide(info);
-  rl_src = LoadValueWide(rl_src, kFPReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(WIDE(kA64Frintm2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedRint(CallInfo* info) {
-  RegLocation rl_src = info->args[0];
-  RegLocation rl_dest = InlineTargetWide(info);
-  rl_src = LoadValueWide(rl_src, kFPReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(WIDE(kA64Frintn2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
-  // b/26327751.
-  if ((true)) {
-    return false;
-  }
-  int32_t encoded_imm = EncodeImmSingle(bit_cast<uint32_t, float>(0.5f));
-  A64Opcode wide = (is_double) ? WIDE(0) : UNWIDE(0);
-  RegLocation rl_src = info->args[0];
-  RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
-  rl_src = (is_double) ? LoadValueWide(rl_src, kFPReg) : LoadValue(rl_src, kFPReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegStorage r_imm_point5 = (is_double) ? AllocTempDouble() : AllocTempSingle();
-  RegStorage r_tmp = (is_double) ? AllocTempDouble() : AllocTempSingle();
-  // 0.5f and 0.5d are encoded in the same way.
-  NewLIR2(kA64Fmov2fI | wide, r_imm_point5.GetReg(), encoded_imm);
-  NewLIR3(kA64Fadd3fff | wide, r_tmp.GetReg(), rl_src.reg.GetReg(), r_imm_point5.GetReg());
-  NewLIR2((is_double) ? kA64Fcvtms2xS : kA64Fcvtms2ws, rl_result.reg.GetReg(), r_tmp.GetReg());
-  (is_double) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
-  DCHECK_EQ(cu_->instruction_set, kArm64);
-  int op = (is_min) ? kA64Fmin3fff : kA64Fmax3fff;
-  A64Opcode wide = (is_double) ? WIDE(0) : UNWIDE(0);
-  RegLocation rl_src1 = info->args[0];
-  RegLocation rl_src2 = (is_double) ? info->args[2] : info->args[1];
-  rl_src1 = (is_double) ? LoadValueWide(rl_src1, kFPReg) : LoadValue(rl_src1, kFPReg);
-  rl_src2 = (is_double) ? LoadValueWide(rl_src2, kFPReg) : LoadValue(rl_src2, kFPReg);
-  RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
-  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR3(op | wide, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  (is_double) ?  StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
deleted file mode 100644
index d92dea2..0000000
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ /dev/null
@@ -1,1798 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the Thumb2 ISA. */
-
-#include "codegen_arm64.h"
-
-#include "arch/instruction_set_features.h"
-#include "arm64_lir.h"
-#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "driver/compiler_driver.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "mirror/array-inl.h"
-
-namespace art {
-
-LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
-  OpRegReg(kOpCmp, src1, src2);
-  return OpCondBranch(cond, target);
-}
-
-LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpIT for Arm64";
-  UNREACHABLE();
-}
-
-void Arm64Mir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
-}
-
-/*
- * 64-bit 3way compare function.
- *     cmp   xA, xB
- *     csinc wC, wzr, wzr, eq  // wC = (xA == xB) ? 0 : 1
- *     csneg wC, wC, wC, ge    // wC = (xA >= xB) ? wC : -wC
- */
-void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
-                              RegLocation rl_src2) {
-  RegLocation rl_result;
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
-  NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
-  NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
-          rl_result.reg.GetReg(), kArmCondGe);
-  StoreValue(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_shift) {
-  OpKind op = kOpBkpt;
-  switch (opcode) {
-  case Instruction::SHL_LONG:
-  case Instruction::SHL_LONG_2ADDR:
-    op = kOpLsl;
-    break;
-  case Instruction::SHR_LONG:
-  case Instruction::SHR_LONG_2ADDR:
-    op = kOpAsr;
-    break;
-  case Instruction::USHR_LONG:
-  case Instruction::USHR_LONG_2ADDR:
-    op = kOpLsr;
-    break;
-  default:
-    LOG(FATAL) << "Unexpected case: " << opcode;
-  }
-  rl_shift = LoadValue(rl_shift, kCoreReg);
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
-  StoreValueWide(rl_dest, rl_result);
-}
-
-static constexpr bool kUseDeltaEncodingInGenSelect = false;
-
-void Arm64Mir2Lir::GenSelect(int32_t true_val, int32_t false_val, ConditionCode ccode,
-                             RegStorage rs_dest, int result_reg_class) {
-  if (false_val == 0 ||               // 0 is better as first operand.
-      true_val == 1 ||                // Potentially Csinc.
-      true_val == -1 ||               // Potentially Csinv.
-      true_val == false_val + 1) {    // Potentially Csinc.
-    ccode = NegateComparison(ccode);
-    std::swap(true_val, false_val);
-  }
-
-  ArmConditionCode code = ArmConditionEncoding(ccode);
-
-  int opcode;                                      // The opcode.
-  RegStorage left_op = RegStorage::InvalidReg();   // The operands.
-  RegStorage right_op = RegStorage::InvalidReg();  // The operands.
-
-  bool is_wide = rs_dest.Is64Bit();
-
-  RegStorage zero_reg = is_wide ? rs_xzr : rs_wzr;
-
-  if (true_val == 0) {
-    left_op = zero_reg;
-  } else {
-    left_op = rs_dest;
-    LoadConstantNoClobber(rs_dest, true_val);
-  }
-  if (false_val == 1) {
-    right_op = zero_reg;
-    opcode = kA64Csinc4rrrc;
-  } else if (false_val == -1) {
-    right_op = zero_reg;
-    opcode = kA64Csinv4rrrc;
-  } else if (false_val == true_val + 1) {
-    right_op = left_op;
-    opcode = kA64Csinc4rrrc;
-  } else if (false_val == -true_val) {
-    right_op = left_op;
-    opcode = kA64Csneg4rrrc;
-  } else if (false_val == ~true_val) {
-    right_op = left_op;
-    opcode = kA64Csinv4rrrc;
-  } else if (true_val == 0) {
-    // left_op is zero_reg.
-    right_op = rs_dest;
-    LoadConstantNoClobber(rs_dest, false_val);
-    opcode = kA64Csel4rrrc;
-  } else {
-    // Generic case.
-    RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
-    if (is_wide) {
-      if (t_reg2.Is32Bit()) {
-        t_reg2 = As64BitReg(t_reg2);
-      }
-    } else {
-      if (t_reg2.Is64Bit()) {
-        t_reg2 = As32BitReg(t_reg2);
-      }
-    }
-
-    if (kUseDeltaEncodingInGenSelect) {
-      int32_t delta = false_val - true_val;
-      uint32_t abs_val = delta < 0 ? -delta : delta;
-
-      if (abs_val < 0x1000) {  // TODO: Replace with InexpensiveConstant with opcode.
-        // Can encode as immediate to an add.
-        right_op = t_reg2;
-        OpRegRegImm(kOpAdd, t_reg2, left_op, delta);
-      }
-    }
-
-    // Load as constant.
-    if (!right_op.Valid()) {
-      LoadConstantNoClobber(t_reg2, false_val);
-      right_op = t_reg2;
-    }
-
-    opcode = kA64Csel4rrrc;
-  }
-
-  DCHECK(left_op.Valid() && right_op.Valid());
-  NewLIR4(is_wide ? WIDE(opcode) : opcode, rs_dest.GetReg(), left_op.GetReg(), right_op.GetReg(),
-      code);
-}
-
-void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                                    int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                    RegisterClass dest_reg_class) {
-  DCHECK(rs_dest.Valid());
-  OpRegReg(kOpCmp, left_op, right_op);
-  GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
-}
-
-void Arm64Mir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
-  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
-  rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
-  // rl_src may be aliased with rl_result/rl_dest, so do compare early.
-  OpRegImm(kOpCmp, rl_src.reg, 0);
-
-  RegLocation rl_dest = mir_graph_->GetDest(mir);
-
-  // The kMirOpSelect has two variants, one for constants and one for moves.
-  if (mir->ssa_rep->num_uses == 1) {
-    RegLocation rl_result = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kCoreReg, true);
-    GenSelect(mir->dalvikInsn.vB, mir->dalvikInsn.vC, mir->meta.ccode, rl_result.reg,
-              rl_dest.ref ? kRefReg : kCoreReg);
-    StoreValue(rl_dest, rl_result);
-  } else {
-    RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
-    RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
-
-    RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
-    rl_true = LoadValue(rl_true, result_reg_class);
-    rl_false = LoadValue(rl_false, result_reg_class);
-    RegLocation rl_result = EvalLoc(rl_dest, result_reg_class, true);
-
-    bool is_wide = rl_dest.ref || rl_dest.wide;
-    int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
-    NewLIR4(opcode, rl_result.reg.GetReg(),
-            rl_true.reg.GetReg(), rl_false.reg.GetReg(), ArmConditionEncoding(mir->meta.ccode));
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
-  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
-  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
-  LIR* taken = &block_label_list_[bb->taken];
-  LIR* not_taken = &block_label_list_[bb->fall_through];
-  // Normalize such that if either operand is constant, src2 will be constant.
-  ConditionCode ccode = mir->meta.ccode;
-  if (rl_src1.is_const) {
-    std::swap(rl_src1, rl_src2);
-    ccode = FlipComparisonOrder(ccode);
-  }
-
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-
-  if (rl_src2.is_const) {
-    // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.)
-
-    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-    // Special handling using cbz & cbnz.
-    if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
-      OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
-      OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
-      return;
-    }
-
-    // Only handle Imm if src2 is not already in a register.
-    rl_src2 = UpdateLocWide(rl_src2);
-    if (rl_src2.location != kLocPhysReg) {
-      OpRegImm64(kOpCmp, rl_src1.reg, val);
-      OpCondBranch(ccode, taken);
-      OpCondBranch(NegateComparison(ccode), not_taken);
-      return;
-    }
-  }
-
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
-  OpCondBranch(ccode, taken);
-  OpCondBranch(NegateComparison(ccode), not_taken);
-}
-
-/*
- * Generate a register comparison to an immediate and branch.  Caller
- * is responsible for setting branch target field.
- */
-LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
-                                  LIR* target) {
-  LIR* branch = nullptr;
-  ArmConditionCode arm_cond = ArmConditionEncoding(cond);
-  if (check_value == 0) {
-    if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
-      A64Opcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
-      A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
-      branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
-    } else if (arm_cond == kArmCondLs) {
-      // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz.
-      // This case happens for a bounds check of array[0].
-      A64Opcode opcode = kA64Cbz2rt;
-      A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
-      branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
-    } else if (arm_cond == kArmCondLt || arm_cond == kArmCondGe) {
-      A64Opcode opcode = (arm_cond == kArmCondLt) ? kA64Tbnz3rht : kA64Tbz3rht;
-      A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
-      int value = reg.Is64Bit() ? 63 : 31;
-      branch = NewLIR3(opcode | wide, reg.GetReg(), value, 0);
-    }
-  }
-
-  if (branch == nullptr) {
-    OpRegImm(kOpCmp, reg, check_value);
-    branch = NewLIR2(kA64B2ct, arm_cond, 0);
-  }
-
-  branch->target = target;
-  return branch;
-}
-
-LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
-                                     RegStorage base_reg, int offset, int check_value,
-                                     LIR* target, LIR** compare) {
-  DCHECK(compare == nullptr);
-  // It is possible that temp register is 64-bit. (ArgReg or RefReg)
-  // Always compare 32-bit value no matter what temp_reg is.
-  if (temp_reg.Is64Bit()) {
-    temp_reg = As32BitReg(temp_reg);
-  }
-  Load32Disp(base_reg, offset, temp_reg);
-  LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
-  return branch;
-}
-
-LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
-  bool dest_is_fp = r_dest.IsFloat();
-  bool src_is_fp = r_src.IsFloat();
-  A64Opcode opcode = kA64Brk1d;
-  LIR* res;
-
-  if (LIKELY(dest_is_fp == src_is_fp)) {
-    if (LIKELY(!dest_is_fp)) {
-      DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
-
-      // Core/core copy.
-      // Copies involving the sp register require a different instruction.
-      opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
-
-      // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
-      //   This currently works because the other arguments are set to 0 by default. We should
-      //   rather introduce an alias kA64Mov2RR.
-
-      // core/core copy. Do a x/x copy only if both registers are x.
-      if (r_dest.Is64Bit() && r_src.Is64Bit()) {
-        opcode = WIDE(opcode);
-      }
-    } else {
-      // Float/float copy.
-      bool dest_is_double = r_dest.IsDouble();
-      bool src_is_double = r_src.IsDouble();
-
-      // We do not do float/double or double/float casts here.
-      DCHECK_EQ(dest_is_double, src_is_double);
-
-      // Homogeneous float/float copy.
-      opcode = (dest_is_double) ? WIDE(kA64Fmov2ff) : kA64Fmov2ff;
-    }
-  } else {
-    // Inhomogeneous register copy.
-    if (dest_is_fp) {
-      if (r_dest.IsDouble()) {
-        opcode = kA64Fmov2Sx;
-      } else {
-        r_src = Check32BitReg(r_src);
-        opcode = kA64Fmov2sw;
-      }
-    } else {
-      if (r_src.IsDouble()) {
-        opcode = kA64Fmov2xS;
-      } else {
-        r_dest = Check32BitReg(r_dest);
-        opcode = kA64Fmov2ws;
-      }
-    }
-  }
-
-  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
-
-  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
-    res->flags.is_nop = true;
-  }
-
-  return res;
-}
-
-void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
-  if (r_dest != r_src) {
-    LIR* res = OpRegCopyNoInsert(r_dest, r_src);
-    AppendLIR(res);
-  }
-}
-
-void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
-  OpRegCopy(r_dest, r_src);
-}
-
-// Table of magic divisors
-struct MagicTable {
-  int magic64_base;
-  int magic64_eor;
-  uint64_t magic64;
-  uint32_t magic32;
-  uint32_t shift;
-  DividePattern pattern;
-};
-
-static const MagicTable magic_table[] = {
-  {   0,      0,                  0,          0, 0, DivideNone},  // 0
-  {   0,      0,                  0,          0, 0, DivideNone},  // 1
-  {   0,      0,                  0,          0, 0, DivideNone},  // 2
-  {0x3c,     -1, 0x5555555555555556, 0x55555556, 0, Divide3},     // 3
-  {   0,      0,                  0,          0, 0, DivideNone},  // 4
-  {0xf9,     -1, 0x6666666666666667, 0x66666667, 1, Divide5},     // 5
-  {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 0, Divide3},     // 6
-  {  -1,     -1, 0x924924924924924A, 0x92492493, 2, Divide7},     // 7
-  {   0,      0,                  0,          0, 0, DivideNone},  // 8
-  {  -1,     -1, 0x38E38E38E38E38E4, 0x38E38E39, 1, Divide5},     // 9
-  {0xf9,     -1, 0x6666666666666667, 0x66666667, 2, Divide5},     // 10
-  {  -1,     -1, 0x2E8BA2E8BA2E8BA3, 0x2E8BA2E9, 1, Divide5},     // 11
-  {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 1, Divide5},     // 12
-  {  -1,     -1, 0x4EC4EC4EC4EC4EC5, 0x4EC4EC4F, 2, Divide5},     // 13
-  {  -1,     -1, 0x924924924924924A, 0x92492493, 3, Divide7},     // 14
-  {0x78,     -1, 0x8888888888888889, 0x88888889, 3, Divide7},     // 15
-};
-
-// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
-                                      bool is_div,
-                                      RegLocation rl_src,
-                                      RegLocation rl_dest,
-                                      int lit) {
-  if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
-    return false;
-  }
-  DividePattern pattern = magic_table[lit].pattern;
-  if (pattern == DivideNone) {
-    return false;
-  }
-  // Tuning: add rem patterns
-  if (!is_div) {
-    return false;
-  }
-
-  RegStorage r_magic = AllocTemp();
-  LoadConstant(r_magic, magic_table[lit].magic32);
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegStorage r_long_mul = AllocTemp();
-  NewLIR3(kA64Smull3xww, As64BitReg(r_long_mul).GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
-  switch (pattern) {
-    case Divide3:
-      OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
-      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
-      break;
-    case Divide5:
-      OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul),
-                  32 + magic_table[lit].shift);
-      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
-      break;
-    case Divide7:
-      OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg),
-                       As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32));
-      OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
-      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
-      break;
-    default:
-      LOG(FATAL) << "Unexpected pattern: " << pattern;
-  }
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
-                                        bool is_div,
-                                        RegLocation rl_src,
-                                        RegLocation rl_dest,
-                                        int64_t lit) {
-  if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
-    return false;
-  }
-  DividePattern pattern = magic_table[lit].pattern;
-  if (pattern == DivideNone) {
-    return false;
-  }
-  // Tuning: add rem patterns
-  if (!is_div) {
-    return false;
-  }
-
-  RegStorage r_magic = AllocTempWide();
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  RegStorage r_long_mul = AllocTempWide();
-
-  if (magic_table[lit].magic64_base >= 0) {
-    // Check that the entry in the table is correct.
-    if (kIsDebugBuild) {
-      uint64_t reconstructed_imm;
-      uint64_t base = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_base);
-      if (magic_table[lit].magic64_eor >= 0) {
-        uint64_t eor = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_eor);
-        reconstructed_imm = base ^ eor;
-      } else {
-        reconstructed_imm = base + 1;
-      }
-      DCHECK_EQ(reconstructed_imm, magic_table[lit].magic64) << " for literal " << lit;
-    }
-
-    // Load the magic constant in two instructions.
-    NewLIR3(WIDE(kA64Orr3Rrl), r_magic.GetReg(), rxzr, magic_table[lit].magic64_base);
-    if (magic_table[lit].magic64_eor >= 0) {
-      NewLIR3(WIDE(kA64Eor3Rrl), r_magic.GetReg(), r_magic.GetReg(),
-              magic_table[lit].magic64_eor);
-    } else {
-      NewLIR4(WIDE(kA64Add4RRdT), r_magic.GetReg(), r_magic.GetReg(), 1, 0);
-    }
-  } else {
-    LoadConstantWide(r_magic, magic_table[lit].magic64);
-  }
-
-  NewLIR3(kA64Smulh3xxx, r_long_mul.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
-  switch (pattern) {
-    case Divide3:
-      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
-      break;
-    case Divide5:
-      OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
-      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
-      break;
-    case Divide7:
-      OpRegRegReg(kOpAdd, r_long_mul, rl_src.reg, r_long_mul);
-      OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
-      OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
-      break;
-    default:
-      LOG(FATAL) << "Unexpected pattern: " << pattern;
-  }
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
-// and store the result in 'rl_dest'.
-bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
-                                    RegLocation rl_src, RegLocation rl_dest, int lit) {
-  return HandleEasyDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int>(lit));
-}
-
-// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
-// and store the result in 'rl_dest'.
-bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
-                                      RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
-  const bool is_64bit = rl_dest.wide;
-  const int nbits = (is_64bit) ? 64 : 32;
-
-  if (lit < 2) {
-    return false;
-  }
-  if (!IsPowerOfTwo(lit)) {
-    if (is_64bit) {
-      return SmallLiteralDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, lit);
-    } else {
-      return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int32_t>(lit));
-    }
-  }
-  int k = CTZ(lit);
-  if (k >= nbits - 2) {
-    // Avoid special cases.
-    return false;
-  }
-
-  RegLocation rl_result;
-  RegStorage t_reg;
-  if (is_64bit) {
-    rl_src = LoadValueWide(rl_src, kCoreReg);
-    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    t_reg = AllocTempWide();
-  } else {
-    rl_src = LoadValue(rl_src, kCoreReg);
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    t_reg = AllocTemp();
-  }
-
-  int shift = EncodeShift(kA64Lsr, nbits - k);
-  if (is_div) {
-    if (lit == 2) {
-      // Division by 2 is by far the most common division by constant.
-      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
-      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
-    } else {
-      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
-      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, shift);
-      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
-    }
-  } else {
-    if (lit == 2) {
-      OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
-      OpRegRegImm64(kOpAnd, t_reg, t_reg, lit - 1);
-      OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, shift);
-    } else {
-      RegStorage t_reg2 = (is_64bit) ? AllocTempWide() : AllocTemp();
-      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
-      OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, shift);
-      OpRegRegImm64(kOpAnd, t_reg2, t_reg2, lit - 1);
-      OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, shift);
-    }
-  }
-
-  if (is_64bit) {
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    StoreValue(rl_dest, rl_result);
-  }
-  return true;
-}
-
-bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
-                                RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                int lit ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
-  UNREACHABLE();
-}
-
-RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                       RegLocation rl_src1 ATTRIBUTE_UNUSED,
-                                       int lit ATTRIBUTE_UNUSED,
-                                       bool is_div ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
-  UNREACHABLE();
-}
-
-RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-  // Put the literal in a temp.
-  RegStorage lit_temp = AllocTemp();
-  LoadConstant(lit_temp, lit);
-  // Use the generic case for div/rem with arg2 in a register.
-  // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
-  rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
-  FreeTemp(lit_temp);
-
-  return rl_result;
-}
-
-RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                    RegLocation rl_src1 ATTRIBUTE_UNUSED,
-                                    RegLocation rl_src2 ATTRIBUTE_UNUSED,
-                                    bool is_div ATTRIBUTE_UNUSED,
-                                    int flags ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
-  UNREACHABLE();
-}
-
-RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
-                                    bool is_div) {
-  CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
-
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (is_div) {
-    OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
-  } else {
-    // temp = r_src1 / r_src2
-    // dest = r_src1 - temp * r_src2
-    RegStorage temp;
-    A64Opcode wide;
-    if (rl_result.reg.Is64Bit()) {
-      temp = AllocTempWide();
-      wide = WIDE(0);
-    } else {
-      temp = AllocTemp();
-      wide = UNWIDE(0);
-    }
-    OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
-    NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
-            r_src2.GetReg(), r_src1.GetReg());
-    FreeTemp(temp);
-  }
-  return rl_result;
-}
-
-bool Arm64Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
-  RegLocation rl_src = info->args[0];
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegLocation rl_dest = InlineTarget(info);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-  // Compare the source value with zero. Write the negated value to the result if
-  // negative, otherwise write the original value.
-  OpRegImm(kOpCmp, rl_src.reg, 0);
-  NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
-          kArmCondPl);
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
-  RegLocation rl_src = info->args[0];
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  RegLocation rl_dest = InlineTargetWide(info);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-  // Compare the source value with zero. Write the negated value to the result if
-  // negative, otherwise write the original value.
-  OpRegImm(kOpCmp, rl_src.reg, 0);
-  NewLIR4(WIDE(kA64Csneg4rrrc), rl_result.reg.GetReg(), rl_src.reg.GetReg(),
-          rl_src.reg.GetReg(), kArmCondPl);
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
-  DCHECK_EQ(cu_->instruction_set, kArm64);
-  RegLocation rl_src1 = info->args[0];
-  RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1];
-  rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg);
-  rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg);
-  RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
-  NewLIR4((is_long) ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc, rl_result.reg.GetReg(),
-          rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt);
-  (is_long) ?  StoreValueWide(rl_dest, rl_result) :StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
-  RegLocation rl_src_address = info->args[0];  // long address
-  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
-  RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-  LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
-  if (size == k64) {
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
-    StoreValue(rl_dest, rl_result);
-  }
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
-  RegLocation rl_src_address = info->args[0];  // long address
-  RegLocation rl_src_value = info->args[2];  // [size] value
-  RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
-
-  RegLocation rl_value;
-  if (size == k64) {
-    rl_value = LoadValueWide(rl_src_value, kCoreReg);
-  } else {
-    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
-    rl_value = LoadValue(rl_src_value, kCoreReg);
-  }
-  StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
-  DCHECK_EQ(cu_->instruction_set, kArm64);
-  // Unused - RegLocation rl_src_unsafe = info->args[0];
-  RegLocation rl_src_obj = info->args[1];  // Object - known non-null
-  RegLocation rl_src_offset = info->args[2];  // long low
-  RegLocation rl_src_expected = info->args[4];  // int, long or Object
-  // If is_long, high half is in info->args[5]
-  RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
-  // If is_long, high half is in info->args[7]
-  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
-
-  // Load Object and offset
-  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
-  RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
-
-  RegLocation rl_new_value;
-  RegLocation rl_expected;
-  if (is_long) {
-    rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
-    rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
-  } else {
-    rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
-    rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
-  }
-
-  if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
-    // Mark card for object assuming new value is stored.
-    MarkGCCard(0, rl_new_value.reg, rl_object.reg);
-  }
-
-  RegStorage r_ptr = AllocTempRef();
-  OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
-
-  // Free now unneeded rl_object and rl_offset to give more temps.
-  ClobberSReg(rl_object.s_reg_low);
-  FreeTemp(rl_object.reg);
-  ClobberSReg(rl_offset.s_reg_low);
-  FreeTemp(rl_offset.reg);
-
-  // do {
-  //   tmp = [r_ptr] - expected;
-  // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
-  // result = tmp != 0;
-
-  RegStorage r_tmp;
-  RegStorage r_tmp_stored;
-  RegStorage rl_new_value_stored = rl_new_value.reg;
-  A64Opcode wide = UNWIDE(0);
-  if (is_long) {
-    r_tmp_stored = r_tmp = AllocTempWide();
-    wide = WIDE(0);
-  } else if (is_object) {
-    // References use 64-bit registers, but are stored as compressed 32-bit values.
-    // This means r_tmp_stored != r_tmp.
-    r_tmp = AllocTempRef();
-    r_tmp_stored = As32BitReg(r_tmp);
-    rl_new_value_stored = As32BitReg(rl_new_value_stored);
-  } else {
-    r_tmp_stored = r_tmp = AllocTemp();
-  }
-
-  RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp);
-  LIR* loop = NewLIR0(kPseudoTargetLabel);
-  NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
-  OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
-  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-  LIR* early_exit = OpCondBranch(kCondNe, nullptr);
-  NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
-  NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
-  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-  OpCondBranch(kCondNe, loop);
-
-  LIR* exit_loop = NewLIR0(kPseudoTargetLabel);
-  early_exit->target = exit_loop;
-
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
-
-  FreeTemp(r_tmp);  // Now unneeded.
-  FreeTemp(r_ptr);  // Now unneeded.
-
-  StoreValue(rl_dest, rl_result);
-
-  return true;
-}
-
-bool Arm64Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
-  constexpr int kLargeArrayThreshold = 512;
-
-  RegLocation rl_src = info->args[0];
-  RegLocation rl_src_pos = info->args[1];
-  RegLocation rl_dst = info->args[2];
-  RegLocation rl_dst_pos = info->args[3];
-  RegLocation rl_length = info->args[4];
-  // Compile time check, handle exception by non-inline method to reduce related meta-data.
-  if ((rl_src_pos.is_const && (mir_graph_->ConstantValue(rl_src_pos) < 0)) ||
-      (rl_dst_pos.is_const && (mir_graph_->ConstantValue(rl_dst_pos) < 0)) ||
-      (rl_length.is_const && (mir_graph_->ConstantValue(rl_length) < 0))) {
-    return false;
-  }
-
-  ClobberCallerSave();
-  LockCallTemps();  // Prepare for explicit register usage.
-  RegStorage rs_src = rs_x0;
-  RegStorage rs_dst = rs_x1;
-  LoadValueDirectFixed(rl_src, rs_src);
-  LoadValueDirectFixed(rl_dst, rs_dst);
-
-  // Handle null pointer exception in slow-path.
-  LIR* src_check_branch = OpCmpImmBranch(kCondEq, rs_src, 0, nullptr);
-  LIR* dst_check_branch = OpCmpImmBranch(kCondEq, rs_dst, 0, nullptr);
-  // Handle potential overlapping in slow-path.
-  // TUNING: Support overlapping cases.
-  LIR* src_dst_same = OpCmpBranch(kCondEq, rs_src, rs_dst, nullptr);
-  // Handle exception or big length in slow-path.
-  RegStorage rs_length = rs_w2;
-  LoadValueDirectFixed(rl_length, rs_length);
-  LIR* len_neg_or_too_big = OpCmpImmBranch(kCondHi, rs_length, kLargeArrayThreshold, nullptr);
-  // Src bounds check.
-  RegStorage rs_src_pos = rs_w3;
-  RegStorage rs_arr_length = rs_w4;
-  LoadValueDirectFixed(rl_src_pos, rs_src_pos);
-  LIR* src_pos_negative = OpCmpImmBranch(kCondLt, rs_src_pos, 0, nullptr);
-  Load32Disp(rs_src, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
-  OpRegReg(kOpSub, rs_arr_length, rs_src_pos);
-  LIR* src_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
-  // Dst bounds check.
-  RegStorage rs_dst_pos = rs_w5;
-  LoadValueDirectFixed(rl_dst_pos, rs_dst_pos);
-  LIR* dst_pos_negative = OpCmpImmBranch(kCondLt, rs_dst_pos, 0, nullptr);
-  Load32Disp(rs_dst, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
-  OpRegReg(kOpSub, rs_arr_length, rs_dst_pos);
-  LIR* dst_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
-
-  // Everything is checked now.
-  // Set rs_src to the address of the first element to be copied.
-  rs_src_pos = As64BitReg(rs_src_pos);
-  OpRegImm(kOpAdd, rs_src, mirror::Array::DataOffset(2).Int32Value());
-  OpRegRegImm(kOpLsl, rs_src_pos, rs_src_pos, 1);
-  OpRegReg(kOpAdd, rs_src, rs_src_pos);
-  // Set rs_src to the address of the first element to be copied.
-  rs_dst_pos = As64BitReg(rs_dst_pos);
-  OpRegImm(kOpAdd, rs_dst, mirror::Array::DataOffset(2).Int32Value());
-  OpRegRegImm(kOpLsl, rs_dst_pos, rs_dst_pos, 1);
-  OpRegReg(kOpAdd, rs_dst, rs_dst_pos);
-
-  // rs_arr_length won't be not used anymore.
-  RegStorage rs_tmp = rs_arr_length;
-  // Use 64-bit view since rs_length will be used as index.
-  rs_length = As64BitReg(rs_length);
-  OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
-
-  // Copy one element.
-  LIR* jmp_to_copy_two = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 1, 0);
-  OpRegImm(kOpSub, rs_length, 2);
-  LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
-  StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
-
-  // Copy two elements.
-  LIR *copy_two = NewLIR0(kPseudoTargetLabel);
-  LIR* jmp_to_copy_four = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 2, 0);
-  OpRegImm(kOpSub, rs_length, 4);
-  LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
-  StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
-
-  // Copy four elements.
-  LIR *copy_four = NewLIR0(kPseudoTargetLabel);
-  LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_length, 0, nullptr);
-  LIR *begin_loop = NewLIR0(kPseudoTargetLabel);
-  OpRegImm(kOpSub, rs_length, 8);
-  rs_tmp = As64BitReg(rs_tmp);
-  LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k64);
-  StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k64);
-  LIR* jmp_to_loop = OpCmpImmBranch(kCondNe, rs_length, 0, nullptr);
-  LIR* loop_finished = OpUnconditionalBranch(nullptr);
-
-  LIR *check_failed = NewLIR0(kPseudoTargetLabel);
-  LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
-  LIR* return_point = NewLIR0(kPseudoTargetLabel);
-
-  src_check_branch->target = check_failed;
-  dst_check_branch->target = check_failed;
-  src_dst_same->target = check_failed;
-  len_neg_or_too_big->target = check_failed;
-  src_pos_negative->target = check_failed;
-  src_bad_len->target = check_failed;
-  dst_pos_negative->target = check_failed;
-  dst_bad_len->target = check_failed;
-  jmp_to_copy_two->target = copy_two;
-  jmp_to_copy_four->target = copy_four;
-  jmp_to_ret->target = return_point;
-  jmp_to_loop->target = begin_loop;
-  loop_finished->target = return_point;
-
-  AddIntrinsicSlowPath(info, launchpad_branch, return_point);
-  ClobberCallerSave();  // We must clobber everything because slow path will return here
-
-  return true;
-}
-
-void Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-  LIR* lir = NewLIR2(kA64Ldr2rp, As32BitReg(reg).GetReg(), 0);
-  lir->target = target;
-}
-
-bool Arm64Mir2Lir::CanUseOpPcRelDexCacheArrayLoad() const {
-  return dex_cache_arrays_layout_.Valid();
-}
-
-void Arm64Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
-                                            bool wide) {
-  LIR* adrp = NewLIR2(kA64Adrp2xd, r_dest.GetReg(), 0);
-  adrp->operands[2] = WrapPointer(dex_file);
-  adrp->operands[3] = offset;
-  adrp->operands[4] = WrapPointer(adrp);
-  dex_cache_access_insns_.push_back(adrp);
-  if (wide) {
-    DCHECK(r_dest.Is64Bit());
-  }
-  LIR* ldr = LoadBaseDisp(r_dest, 0, r_dest, wide ? k64 : kReference, kNotVolatile);
-  ldr->operands[4] = adrp->operands[4];
-  ldr->flags.fixup = kFixupLabel;
-  dex_cache_access_insns_.push_back(ldr);
-}
-
-LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
-  UNREACHABLE();
-}
-
-LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
-  UNREACHABLE();
-}
-
-void Arm64Mir2Lir::GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                                  RegLocation rl_src3, bool is_sub) {
-  rl_src1 = LoadValue(rl_src1, kCoreReg);
-  rl_src2 = LoadValue(rl_src2, kCoreReg);
-  rl_src3 = LoadValue(rl_src3, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  NewLIR4(is_sub ? kA64Msub4rrrr : kA64Madd4rrrr, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
-          rl_src2.reg.GetReg(), rl_src3.reg.GetReg());
-  StoreValue(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenMaddMsubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                                   RegLocation rl_src3, bool is_sub) {
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  rl_src3 = LoadValueWide(rl_src3, kCoreReg);
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  NewLIR4(is_sub ? WIDE(kA64Msub4rrrr) : WIDE(kA64Madd4rrrr), rl_result.reg.GetReg(),
-          rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), rl_src3.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
-                                                 RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
-                                                 int first_bit, int second_bit) {
-  OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
-                   EncodeShift(kA64Lsl, second_bit - first_bit));
-  if (first_bit != 0) {
-    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
-  }
-}
-
-void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
-}
-
-// Test suspend flag, return target of taken suspend branch
-LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
-  RegStorage r_tmp = AllocTemp();
-  LoadBaseDisp(rs_xSELF, Thread::ThreadFlagsOffset<kArm64PointerSize>().Int32Value(), r_tmp,
-      kUnsignedHalf, kNotVolatile);
-  LIR* cmp_branch = OpCmpImmBranch(target == nullptr ? kCondNe: kCondEq, r_tmp, 0, target);
-  FreeTemp(r_tmp);
-  return cmp_branch;
-}
-
-// Decrement register and branch on condition
-LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
-  // Combine sub & test using sub setflags encoding here.  We need to make sure a
-  // subtract form that sets carry is used, so generate explicitly.
-  // TODO: might be best to add a new op, kOpSubs, and handle it generically.
-  A64Opcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
-  NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1);  // For value == 1, this should set flags.
-  DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-  return OpCondBranch(c_code, target);
-}
-
-bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-  if (!cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
-    return false;
-  }
-  // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
-  LIR* barrier = last_lir_insn_;
-
-  int dmb_flavor;
-  // TODO: revisit Arm barrier kinds
-  switch (barrier_kind) {
-    case kAnyStore: dmb_flavor = kISH; break;
-    case kLoadAny: dmb_flavor = kISH; break;
-        // We conjecture that kISHLD is insufficient.  It is documented
-        // to provide LoadLoad | StoreStore ordering.  But if this were used
-        // to implement volatile loads, we suspect that the lack of store
-        // atomicity on ARM would cause us to allow incorrect results for
-        // the canonical IRIW example.  But we're not sure.
-        // We should be using acquire loads instead.
-    case kStoreStore: dmb_flavor = kISHST; break;
-    case kAnyAny: dmb_flavor = kISH; break;
-    default:
-      LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
-      dmb_flavor = kSY;  // quiet gcc.
-      break;
-  }
-
-  bool ret = false;
-
-  // If the same barrier already exists, don't generate another.
-  if (barrier == nullptr
-      || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
-    barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
-    ret = true;
-  }
-
-  // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
-  DCHECK(!barrier->flags.use_def_invalid);
-  barrier->u.m.def_mask = &kEncodeAll;
-  return ret;
-}
-
-void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-
-  rl_src = LoadValue(rl_src, kCoreReg);
-  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
-                                 RegLocation rl_src1, RegLocation rl_src2, bool is_div, int flags) {
-  if (rl_src2.is_const) {
-    DCHECK(rl_src2.wide);
-    int64_t lit = mir_graph_->ConstantValueWide(rl_src2);
-    if (HandleEasyDivRem64(opcode, is_div, rl_src1, rl_dest, lit)) {
-      return;
-    }
-  }
-
-  RegLocation rl_result;
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
-    GenDivZeroCheck(rl_src2.reg);
-  }
-  rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
-                             RegLocation rl_src2) {
-  RegLocation rl_result;
-
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                                  RegLocation rl_src1, RegLocation rl_src2, int flags) {
-  switch (opcode) {
-    case Instruction::NOT_LONG:
-      GenNotLong(rl_dest, rl_src2);
-      return;
-    case Instruction::ADD_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-      GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
-      return;
-    case Instruction::SUB_LONG:
-    case Instruction::SUB_LONG_2ADDR:
-      GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
-      return;
-    case Instruction::MUL_LONG:
-    case Instruction::MUL_LONG_2ADDR:
-      GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
-      return;
-    case Instruction::DIV_LONG:
-    case Instruction::DIV_LONG_2ADDR:
-      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
-      return;
-    case Instruction::REM_LONG:
-    case Instruction::REM_LONG_2ADDR:
-      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
-      return;
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::AND_LONG:
-      GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
-      return;
-    case Instruction::OR_LONG:
-    case Instruction::OR_LONG_2ADDR:
-      GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
-      return;
-    case Instruction::XOR_LONG:
-    case Instruction::XOR_LONG_2ADDR:
-      GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
-      return;
-    case Instruction::NEG_LONG: {
-      GenNegLong(rl_dest, rl_src2);
-      return;
-    }
-    default:
-      LOG(FATAL) << "Invalid long arith op";
-      return;
-  }
-}
-
-/*
- * Generate array load
- */
-void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
-                             RegLocation rl_index, RegLocation rl_dest, int scale) {
-  RegisterClass reg_class = RegClassBySize(size);
-  int len_offset = mirror::Array::LengthOffset().Int32Value();
-  int data_offset;
-  RegLocation rl_result;
-  bool constant_index = rl_index.is_const;
-  rl_array = LoadValue(rl_array, kRefReg);
-  if (!constant_index) {
-    rl_index = LoadValue(rl_index, kCoreReg);
-  }
-
-  if (rl_dest.wide) {
-    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
-  } else {
-    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
-  }
-
-  /* null object? */
-  GenNullCheck(rl_array.reg, opt_flags);
-
-  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
-  RegStorage reg_len;
-  if (needs_range_check) {
-    reg_len = AllocTemp();
-    /* Get len */
-    Load32Disp(rl_array.reg, len_offset, reg_len);
-    MarkPossibleNullPointerException(opt_flags);
-  } else {
-    ForceImplicitNullCheck(rl_array.reg, opt_flags);
-  }
-  if (constant_index) {
-    rl_result = EvalLoc(rl_dest, reg_class, true);
-
-    if (needs_range_check) {
-      GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
-      FreeTemp(reg_len);
-    }
-    // Fold the constant index into the data offset.
-    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
-    if (rl_result.ref) {
-      LoadRefDisp(rl_array.reg, data_offset, rl_result.reg, kNotVolatile);
-    } else {
-      LoadBaseDisp(rl_array.reg, data_offset, rl_result.reg, size, kNotVolatile);
-    }
-  } else {
-    // Offset base, then use indexed load.
-    RegStorage reg_ptr = AllocTempRef();
-    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
-    FreeTemp(rl_array.reg);
-    rl_result = EvalLoc(rl_dest, reg_class, true);
-
-    if (needs_range_check) {
-      GenArrayBoundsCheck(rl_index.reg, reg_len);
-      FreeTemp(reg_len);
-    }
-    if (rl_result.ref) {
-      LoadRefIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale);
-    } else {
-      LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
-    }
-    FreeTemp(reg_ptr);
-  }
-  if (rl_dest.wide) {
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-/*
- * Generate array store
- *
- */
-void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
-                             RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
-  RegisterClass reg_class = RegClassBySize(size);
-  int len_offset = mirror::Array::LengthOffset().Int32Value();
-  bool constant_index = rl_index.is_const;
-
-  int data_offset;
-  if (size == k64 || size == kDouble) {
-    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
-  } else {
-    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
-  }
-
-  rl_array = LoadValue(rl_array, kRefReg);
-  if (!constant_index) {
-    rl_index = LoadValue(rl_index, kCoreReg);
-  }
-
-  RegStorage reg_ptr;
-  bool allocated_reg_ptr_temp = false;
-  if (constant_index) {
-    reg_ptr = rl_array.reg;
-  } else if (IsTemp(rl_array.reg) && !card_mark) {
-    Clobber(rl_array.reg);
-    reg_ptr = rl_array.reg;
-  } else {
-    allocated_reg_ptr_temp = true;
-    reg_ptr = AllocTempRef();
-  }
-
-  /* null object? */
-  GenNullCheck(rl_array.reg, opt_flags);
-
-  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
-  RegStorage reg_len;
-  if (needs_range_check) {
-    reg_len = AllocTemp();
-    // NOTE: max live temps(4) here.
-    /* Get len */
-    Load32Disp(rl_array.reg, len_offset, reg_len);
-    MarkPossibleNullPointerException(opt_flags);
-  } else {
-    ForceImplicitNullCheck(rl_array.reg, opt_flags);
-  }
-  /* at this point, reg_ptr points to array, 2 live temps */
-  if (rl_src.wide) {
-    rl_src = LoadValueWide(rl_src, reg_class);
-  } else {
-    rl_src = LoadValue(rl_src, reg_class);
-  }
-  if (constant_index) {
-    if (needs_range_check) {
-      GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
-      FreeTemp(reg_len);
-    }
-    // Fold the constant index into the data offset.
-    data_offset += mir_graph_->ConstantValue(rl_index) << scale;
-    if (rl_src.ref) {
-      StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
-    } else {
-      StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
-    }
-  } else {
-    /* reg_ptr -> array data */
-    OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
-    if (needs_range_check) {
-      GenArrayBoundsCheck(rl_index.reg, reg_len);
-      FreeTemp(reg_len);
-    }
-    if (rl_src.ref) {
-      StoreRefIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale);
-    } else {
-      StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
-    }
-  }
-  if (allocated_reg_ptr_temp) {
-    FreeTemp(reg_ptr);
-  }
-  if (card_mark) {
-    MarkGCCard(opt_flags, rl_src.reg, rl_array.reg);
-  }
-}
-
-void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
-                                     RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
-                                     int flags ATTRIBUTE_UNUSED) {
-  OpKind op = kOpBkpt;
-  // Per spec, we only care about low 6 bits of shift amount.
-  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  if (shift_amount == 0) {
-    StoreValueWide(rl_dest, rl_src);
-    return;
-  }
-
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  switch (opcode) {
-    case Instruction::SHL_LONG:
-    case Instruction::SHL_LONG_2ADDR:
-      op = kOpLsl;
-      break;
-    case Instruction::SHR_LONG:
-    case Instruction::SHR_LONG_2ADDR:
-      op = kOpAsr;
-      break;
-    case Instruction::USHR_LONG:
-    case Instruction::USHR_LONG_2ADDR:
-      op = kOpLsr;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected case";
-  }
-  OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                                     RegLocation rl_src1, RegLocation rl_src2, int flags) {
-  OpKind op = kOpBkpt;
-  switch (opcode) {
-    case Instruction::ADD_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-      op = kOpAdd;
-      break;
-    case Instruction::SUB_LONG:
-    case Instruction::SUB_LONG_2ADDR:
-      op = kOpSub;
-      break;
-    case Instruction::AND_LONG:
-    case Instruction::AND_LONG_2ADDR:
-      op = kOpAnd;
-      break;
-    case Instruction::OR_LONG:
-    case Instruction::OR_LONG_2ADDR:
-      op = kOpOr;
-      break;
-    case Instruction::XOR_LONG:
-    case Instruction::XOR_LONG_2ADDR:
-      op = kOpXor;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected opcode";
-  }
-
-  if (op == kOpSub) {
-    if (!rl_src2.is_const) {
-      return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-    }
-  } else {
-    // Associativity.
-    if (!rl_src2.is_const) {
-      DCHECK(rl_src1.is_const);
-      std::swap(rl_src1, rl_src2);
-    }
-  }
-  DCHECK(rl_src2.is_const);
-  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-static uint32_t ExtractReg(uint32_t reg_mask, int* reg) {
-  // Find first register.
-  int first_bit_set = CTZ(reg_mask) + 1;
-  *reg = *reg + first_bit_set;
-  reg_mask >>= first_bit_set;
-  return reg_mask;
-}
-
-/**
- * @brief Split a register list in pairs or registers.
- *
- * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
- * @code
- *   int reg1 = -1, reg2 = -1;
- *   while (reg_mask) {
- *     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
- *     if (UNLIKELY(reg2 < 0)) {
- *       // Single register in reg1.
- *     } else {
- *       // Pair in reg1, reg2.
- *     }
- *   }
- * @endcode
- */
-static uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
-  // Find first register.
-  int first_bit_set = CTZ(reg_mask) + 1;
-  int reg = *reg1 + first_bit_set;
-  reg_mask >>= first_bit_set;
-
-  if (LIKELY(reg_mask)) {
-    // Save the first register, find the second and use the pair opcode.
-    int second_bit_set = CTZ(reg_mask) + 1;
-    *reg2 = reg;
-    reg_mask >>= second_bit_set;
-    *reg1 = reg + second_bit_set;
-    return reg_mask;
-  }
-
-  // Use the single opcode, as we just have one register.
-  *reg1 = reg;
-  *reg2 = -1;
-  return reg_mask;
-}
-
-static dwarf::Reg DwarfCoreReg(int num) {
-  return dwarf::Reg::Arm64Core(num);
-}
-
-static dwarf::Reg DwarfFpReg(int num) {
-  return dwarf::Reg::Arm64Fp(num);
-}
-
-static void SpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
-  int reg1 = -1, reg2 = -1;
-  const int reg_log2_size = 3;
-
-  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
-    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
-    if (UNLIKELY(reg2 < 0)) {
-      m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-      m2l->cfi().RelOffset(DwarfCoreReg(reg1), offset << reg_log2_size);
-    } else {
-      m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
-                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-      m2l->cfi().RelOffset(DwarfCoreReg(reg2), offset << reg_log2_size);
-      m2l->cfi().RelOffset(DwarfCoreReg(reg1), (offset + 1) << reg_log2_size);
-    }
-  }
-}
-
-// TODO(Arm64): consider using ld1 and st1?
-static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
-  int reg1 = -1, reg2 = -1;
-  const int reg_log2_size = 3;
-
-  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
-    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
-    if (UNLIKELY(reg2 < 0)) {
-      m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
-                   offset);
-      m2l->cfi().RelOffset(DwarfFpReg(reg1), offset << reg_log2_size);
-    } else {
-      m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
-                   RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
-      m2l->cfi().RelOffset(DwarfFpReg(reg2), offset << reg_log2_size);
-      m2l->cfi().RelOffset(DwarfFpReg(reg1), (offset + 1) << reg_log2_size);
-    }
-  }
-}
-
-static int SpillRegsPreSub(Arm64Mir2Lir* m2l, uint32_t core_reg_mask, uint32_t fp_reg_mask,
-                           int frame_size) {
-  m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
-  m2l->cfi().AdjustCFAOffset(frame_size);
-
-  int core_count = POPCOUNT(core_reg_mask);
-
-  if (fp_reg_mask != 0) {
-    // Spill FP regs.
-    int fp_count = POPCOUNT(fp_reg_mask);
-    int spill_offset = frame_size - (core_count + fp_count) * kArm64PointerSize;
-    SpillFPRegs(m2l, rs_sp, spill_offset, fp_reg_mask);
-  }
-
-  if (core_reg_mask != 0) {
-    // Spill core regs.
-    int spill_offset = frame_size - (core_count * kArm64PointerSize);
-    SpillCoreRegs(m2l, rs_sp, spill_offset, core_reg_mask);
-  }
-
-  return frame_size;
-}
-
-static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
-                               uint32_t fp_reg_mask) {
-  // Otherwise, spill both core and fp regs at the same time.
-  // The very first instruction will be an stp with pre-indexed address, moving the stack pointer
-  // down. From then on, we fill upwards. This will generate overall the same number of instructions
-  // as the specialized code above in most cases (exception being odd number of core and even
-  // non-zero fp spills), but is more flexible, as the offsets are guaranteed small.
-  //
-  // Some demonstrative fill cases : (c) = core, (f) = fp
-  // cc    44   cc    44   cc    22   cc    33   fc => 1[1/2]
-  // fc => 23   fc => 23   ff => 11   ff => 22
-  // ff    11    f    11               f    11
-  //
-  int reg1 = -1, reg2 = -1;
-  int core_count = POPCOUNT(core_reg_mask);
-  int fp_count = POPCOUNT(fp_reg_mask);
-
-  int combined = fp_count + core_count;
-  int all_offset = RoundUp(combined, 2);  // Needs to be 16B = 2-reg aligned.
-
-  int cur_offset = 2;  // What's the starting offset after the first stp? We expect the base slot
-                       // to be filled.
-
-  // First figure out whether the bottom is FP or core.
-  if (fp_count > 0) {
-    // Some FP spills.
-    //
-    // Four cases: (d0 is dummy to fill up stp)
-    // 1) Single FP, even number of core -> stp d0, fp_reg
-    // 2) Single FP, odd number of core -> stp fp_reg, d0
-    // 3) More FP, even number combined -> stp fp_reg1, fp_reg2
-    // 4) More FP, odd number combined -> stp d0, fp_reg
-    if (fp_count == 1) {
-      fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
-      DCHECK_EQ(fp_reg_mask, 0U);
-      if (core_count % 2 == 0) {
-        m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
-                     RegStorage::FloatSolo64(reg1).GetReg(),
-                     RegStorage::FloatSolo64(reg1).GetReg(),
-                     base.GetReg(), -all_offset);
-        m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
-        m2l->cfi().RelOffset(DwarfFpReg(reg1), kArm64PointerSize);
-      } else {
-        m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
-                     RegStorage::FloatSolo64(reg1).GetReg(),
-                     RegStorage::FloatSolo64(reg1).GetReg(),
-                     base.GetReg(), -all_offset);
-        m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
-        m2l->cfi().RelOffset(DwarfFpReg(reg1), 0);
-        cur_offset = 0;  // That core reg needs to go into the upper half.
-      }
-    } else {
-      if (combined % 2 == 0) {
-        fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
-        m2l->NewLIR4(WIDE(kA64StpPre4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
-                     RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), -all_offset);
-        m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
-        m2l->cfi().RelOffset(DwarfFpReg(reg2), 0);
-        m2l->cfi().RelOffset(DwarfFpReg(reg1), kArm64PointerSize);
-      } else {
-        fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
-        m2l->NewLIR4(WIDE(kA64StpPre4ffXD), rs_d0.GetReg(), RegStorage::FloatSolo64(reg1).GetReg(),
-                     base.GetReg(), -all_offset);
-        m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
-        m2l->cfi().RelOffset(DwarfFpReg(reg1), kArm64PointerSize);
-      }
-    }
-  } else {
-    // No FP spills.
-    //
-    // Two cases:
-    // 1) Even number of core -> stp core1, core2
-    // 2) Odd number of core -> stp xzr, core1
-    if (core_count % 2 == 1) {
-      core_reg_mask = ExtractReg(core_reg_mask, &reg1);
-      m2l->NewLIR4(WIDE(kA64StpPre4rrXD), rs_xzr.GetReg(),
-                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
-      m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
-      m2l->cfi().RelOffset(DwarfCoreReg(reg1), kArm64PointerSize);
-    } else {
-      core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
-      m2l->NewLIR4(WIDE(kA64StpPre4rrXD), RegStorage::Solo64(reg2).GetReg(),
-                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
-      m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
-      m2l->cfi().RelOffset(DwarfCoreReg(reg2), 0);
-      m2l->cfi().RelOffset(DwarfCoreReg(reg1), kArm64PointerSize);
-    }
-  }
-  DCHECK_EQ(m2l->cfi().GetCurrentCFAOffset(),
-            static_cast<int>(all_offset * kArm64PointerSize));
-
-  if (fp_count != 0) {
-    for (; fp_reg_mask != 0;) {
-      // Have some FP regs to do.
-      fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
-      if (UNLIKELY(reg2 < 0)) {
-        m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
-                     cur_offset);
-        m2l->cfi().RelOffset(DwarfFpReg(reg1), cur_offset * kArm64PointerSize);
-        // Do not increment offset here, as the second half will be filled by a core reg.
-      } else {
-        m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
-                     RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), cur_offset);
-        m2l->cfi().RelOffset(DwarfFpReg(reg2), cur_offset * kArm64PointerSize);
-        m2l->cfi().RelOffset(DwarfFpReg(reg1), (cur_offset + 1) * kArm64PointerSize);
-        cur_offset += 2;
-      }
-    }
-
-    // Reset counting.
-    reg1 = -1;
-
-    // If there is an odd number of core registers, we need to store the bottom now.
-    if (core_count % 2 == 1) {
-      core_reg_mask = ExtractReg(core_reg_mask, &reg1);
-      m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(),
-                   cur_offset + 1);
-      m2l->cfi().RelOffset(DwarfCoreReg(reg1), (cur_offset + 1) * kArm64PointerSize);
-      cur_offset += 2;  // Half-slot filled now.
-    }
-  }
-
-  // Spill the rest of the core regs. They are guaranteed to be even.
-  DCHECK_EQ(POPCOUNT(core_reg_mask) % 2, 0);
-  for (; core_reg_mask != 0; cur_offset += 2) {
-    core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
-    m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
-                 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), cur_offset);
-    m2l->cfi().RelOffset(DwarfCoreReg(reg2), cur_offset * kArm64PointerSize);
-    m2l->cfi().RelOffset(DwarfCoreReg(reg1), (cur_offset + 1) * kArm64PointerSize);
-  }
-
-  DCHECK_EQ(cur_offset, all_offset);
-
-  return all_offset * 8;
-}
-
-int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
-                            int frame_size) {
-  // If the frame size is small enough that all offsets would fit into the immediates, use that
-  // setup, as it decrements sp early (kind of instruction scheduling), and is not worse
-  // instruction-count wise than the complicated code below.
-  //
-  // This case is also optimal when we have an odd number of core spills, and an even (non-zero)
-  // number of fp spills.
-  if ((RoundUp(frame_size, 8) / 8 <= 63)) {
-    return SpillRegsPreSub(this, core_reg_mask, fp_reg_mask, frame_size);
-  } else {
-    return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask);
-  }
-}
-
-static void UnSpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
-  int reg1 = -1, reg2 = -1;
-  const int reg_log2_size = 3;
-
-  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
-    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
-    if (UNLIKELY(reg2 < 0)) {
-      m2l->NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-      m2l->cfi().Restore(DwarfCoreReg(reg1));
-    } else {
-      DCHECK_LE(offset, 63);
-      m2l->NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
-                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-      m2l->cfi().Restore(DwarfCoreReg(reg2));
-      m2l->cfi().Restore(DwarfCoreReg(reg1));
-    }
-  }
-}
-
-static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
-  int reg1 = -1, reg2 = -1;
-  const int reg_log2_size = 3;
-
-  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
-     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
-    if (UNLIKELY(reg2 < 0)) {
-      m2l->NewLIR3(WIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
-                   offset);
-      m2l->cfi().Restore(DwarfFpReg(reg1));
-    } else {
-      m2l->NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
-                   RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
-      m2l->cfi().Restore(DwarfFpReg(reg2));
-      m2l->cfi().Restore(DwarfFpReg(reg1));
-    }
-  }
-}
-
-void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
-                               int frame_size) {
-  DCHECK_EQ(base, rs_sp);
-  // Restore saves and drop stack frame.
-  // 2 versions:
-  //
-  // 1. (Original): Try to address directly, then drop the whole frame.
-  //                Limitation: ldp is a 7b signed immediate.
-  //
-  // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
-  //           in range. Then drop the rest.
-  //
-  // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
-  //       in variant 1.
-
-  // "Magic" constant, 63 (max signed 7b) * 8.
-  static constexpr int kMaxFramesizeForOffset = 63 * kArm64PointerSize;
-
-  const int num_core_spills = POPCOUNT(core_reg_mask);
-  const int num_fp_spills = POPCOUNT(fp_reg_mask);
-
-  int early_drop = 0;
-
-  if (frame_size > kMaxFramesizeForOffset) {
-    // Second variant. Drop the frame part.
-
-    // TODO: Always use the first formula, as num_fp_spills would be zero?
-    if (fp_reg_mask != 0) {
-      early_drop = frame_size - kArm64PointerSize * (num_fp_spills + num_core_spills);
-    } else {
-      early_drop = frame_size - kArm64PointerSize * num_core_spills;
-    }
-
-    // Drop needs to be 16B aligned, so that SP keeps aligned.
-    early_drop = RoundDown(early_drop, 16);
-
-    OpRegImm64(kOpAdd, rs_sp, early_drop);
-    cfi_.AdjustCFAOffset(-early_drop);
-  }
-
-  // Unspill.
-  if (fp_reg_mask != 0) {
-    int offset = frame_size - early_drop - kArm64PointerSize * (num_fp_spills + num_core_spills);
-    UnSpillFPRegs(this, rs_sp, offset, fp_reg_mask);
-  }
-  if (core_reg_mask != 0) {
-    int offset = frame_size - early_drop - kArm64PointerSize * num_core_spills;
-    UnSpillCoreRegs(this, rs_sp, offset, core_reg_mask);
-  }
-
-  // Drop the (rest of) the frame.
-  int adjust = frame_size - early_drop;
-  OpRegImm64(kOpAdd, rs_sp, adjust);
-  cfi_.AdjustCFAOffset(-adjust);
-}
-
-bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
-  A64Opcode wide = IsWide(size) ? WIDE(0) : UNWIDE(0);
-  RegLocation rl_src_i = info->args[0];
-  RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegLocation rl_i = IsWide(size) ?
-      LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
-  NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg());
-  IsWide(size) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
deleted file mode 100644
index 691bfd9..0000000
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ /dev/null
@@ -1,912 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_arm64.h"
-
-#include <inttypes.h>
-
-#include <string>
-#include <sstream>
-
-#include "backend_arm64.h"
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-
-namespace art {
-
-static constexpr RegStorage core_regs_arr[] =
-    {rs_w0, rs_w1, rs_w2, rs_w3, rs_w4, rs_w5, rs_w6, rs_w7,
-     rs_w8, rs_w9, rs_w10, rs_w11, rs_w12, rs_w13, rs_w14, rs_w15,
-     rs_w16, rs_w17, rs_w18, rs_w19, rs_w20, rs_w21, rs_w22, rs_w23,
-     rs_w24, rs_w25, rs_w26, rs_w27, rs_w28, rs_w29, rs_w30, rs_w31,
-     rs_wzr};
-static constexpr RegStorage core64_regs_arr[] =
-    {rs_x0, rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7,
-     rs_x8, rs_x9, rs_x10, rs_x11, rs_x12, rs_x13, rs_x14, rs_x15,
-     rs_x16, rs_x17, rs_x18, rs_x19, rs_x20, rs_x21, rs_x22, rs_x23,
-     rs_x24, rs_x25, rs_x26, rs_x27, rs_x28, rs_x29, rs_x30, rs_x31,
-     rs_xzr};
-static constexpr RegStorage sp_regs_arr[] =
-    {rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7,
-     rs_f8, rs_f9, rs_f10, rs_f11, rs_f12, rs_f13, rs_f14, rs_f15,
-     rs_f16, rs_f17, rs_f18, rs_f19, rs_f20, rs_f21, rs_f22, rs_f23,
-     rs_f24, rs_f25, rs_f26, rs_f27, rs_f28, rs_f29, rs_f30, rs_f31};
-static constexpr RegStorage dp_regs_arr[] =
-    {rs_d0, rs_d1, rs_d2, rs_d3, rs_d4, rs_d5, rs_d6, rs_d7,
-     rs_d8, rs_d9, rs_d10, rs_d11, rs_d12, rs_d13, rs_d14, rs_d15,
-     rs_d16, rs_d17, rs_d18, rs_d19, rs_d20, rs_d21, rs_d22, rs_d23,
-     rs_d24, rs_d25, rs_d26, rs_d27, rs_d28, rs_d29, rs_d30, rs_d31};
-static constexpr RegStorage reserved_regs_arr[] = {rs_wSELF, rs_wsp, rs_wLR, rs_wzr};
-static constexpr RegStorage reserved64_regs_arr[] = {rs_xSELF, rs_sp, rs_xLR, rs_xzr};
-
-static constexpr RegStorage core_temps_arr[] =
-    {rs_w0, rs_w1, rs_w2, rs_w3, rs_w4, rs_w5, rs_w6, rs_w7,
-     rs_w8, rs_w9, rs_w10, rs_w11, rs_w12, rs_w13, rs_w14, rs_w15, rs_w16,
-     rs_w17, rs_w18};
-static constexpr RegStorage core64_temps_arr[] =
-    {rs_x0, rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7,
-     rs_x8, rs_x9, rs_x10, rs_x11, rs_x12, rs_x13, rs_x14, rs_x15, rs_x16,
-     rs_x17, rs_x18};
-static constexpr RegStorage sp_temps_arr[] =
-    {rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7,
-     rs_f16, rs_f17, rs_f18, rs_f19, rs_f20, rs_f21, rs_f22, rs_f23,
-     rs_f24, rs_f25, rs_f26, rs_f27, rs_f28, rs_f29, rs_f30, rs_f31};
-static constexpr RegStorage dp_temps_arr[] =
-    {rs_d0, rs_d1, rs_d2, rs_d3, rs_d4, rs_d5, rs_d6, rs_d7,
-     rs_d16, rs_d17, rs_d18, rs_d19, rs_d20, rs_d21, rs_d22, rs_d23,
-     rs_d24, rs_d25, rs_d26, rs_d27, rs_d28, rs_d29, rs_d30, rs_d31};
-
-static constexpr ArrayRef<const RegStorage> core_regs(core_regs_arr);
-static constexpr ArrayRef<const RegStorage> core64_regs(core64_regs_arr);
-static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
-static constexpr ArrayRef<const RegStorage> reserved_regs(reserved_regs_arr);
-static constexpr ArrayRef<const RegStorage> reserved64_regs(reserved64_regs_arr);
-static constexpr ArrayRef<const RegStorage> core_temps(core_temps_arr);
-static constexpr ArrayRef<const RegStorage> core64_temps(core64_temps_arr);
-static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
-
-RegLocation Arm64Mir2Lir::LocCReturn() {
-  return a64_loc_c_return;
-}
-
-RegLocation Arm64Mir2Lir::LocCReturnRef() {
-  return a64_loc_c_return_ref;
-}
-
-RegLocation Arm64Mir2Lir::LocCReturnWide() {
-  return a64_loc_c_return_wide;
-}
-
-RegLocation Arm64Mir2Lir::LocCReturnFloat() {
-  return a64_loc_c_return_float;
-}
-
-RegLocation Arm64Mir2Lir::LocCReturnDouble() {
-  return a64_loc_c_return_double;
-}
-
-// Return a target-dependent special register.
-RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
-  RegStorage res_reg = RegStorage::InvalidReg();
-  switch (reg) {
-    case kSelf: res_reg = rs_wSELF; break;
-    case kSuspend: res_reg = RegStorage::InvalidReg(); break;
-    case kLr: res_reg =  rs_wLR; break;
-    case kPc: res_reg = RegStorage::InvalidReg(); break;
-    case kSp: res_reg =  rs_wsp; break;
-    case kArg0: res_reg = rs_w0; break;
-    case kArg1: res_reg = rs_w1; break;
-    case kArg2: res_reg = rs_w2; break;
-    case kArg3: res_reg = rs_w3; break;
-    case kArg4: res_reg = rs_w4; break;
-    case kArg5: res_reg = rs_w5; break;
-    case kArg6: res_reg = rs_w6; break;
-    case kArg7: res_reg = rs_w7; break;
-    case kFArg0: res_reg = rs_f0; break;
-    case kFArg1: res_reg = rs_f1; break;
-    case kFArg2: res_reg = rs_f2; break;
-    case kFArg3: res_reg = rs_f3; break;
-    case kFArg4: res_reg = rs_f4; break;
-    case kFArg5: res_reg = rs_f5; break;
-    case kFArg6: res_reg = rs_f6; break;
-    case kFArg7: res_reg = rs_f7; break;
-    case kRet0: res_reg = rs_w0; break;
-    case kRet1: res_reg = rs_w1; break;
-    case kInvokeTgt: res_reg = rs_wLR; break;
-    case kHiddenArg: res_reg = rs_wIP1; break;
-    case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
-    case kCount: res_reg = RegStorage::InvalidReg(); break;
-    default: res_reg = RegStorage::InvalidReg();
-  }
-  return res_reg;
-}
-
-/*
- * Decode the register id. This routine makes assumptions on the encoding made by RegStorage.
- */
-ResourceMask Arm64Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
-  // TODO(Arm64): this function depends too much on the internal RegStorage encoding. Refactor.
-
-  // Check if the shape mask is zero (i.e. invalid).
-  if (UNLIKELY(reg == rs_wzr || reg == rs_xzr)) {
-    // The zero register is not a true register. It is just an immediate zero.
-    return kEncodeNone;
-  }
-
-  return ResourceMask::Bit(
-      // FP register starts at bit position 32.
-      (reg.IsFloat() ? kA64FPReg0 : 0) + reg.GetRegNum());
-}
-
-ResourceMask Arm64Mir2Lir::GetPCUseDefEncoding() const {
-  // Note: On arm64, we are not able to set pc except branch instructions, which is regarded as a
-  //       kind of barrier. All other instructions only use pc, which has no dependency between any
-  //       of them. So it is fine to just return kEncodeNone here.
-  return kEncodeNone;
-}
-
-// Arm64 specific setup.  TODO: inline?:
-void Arm64Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                            ResourceMask* use_mask, ResourceMask* def_mask) {
-  DCHECK_EQ(cu_->instruction_set, kArm64);
-  DCHECK(!lir->flags.use_def_invalid);
-
-  // Note: REG_USE_PC is ignored, the reason is the same with what we do in GetPCUseDefEncoding().
-  // These flags are somewhat uncommon - bypass if we can.
-  if ((flags & (REG_DEF_SP | REG_USE_SP | REG_DEF_LR)) != 0) {
-    if (flags & REG_DEF_SP) {
-      def_mask->SetBit(kA64RegSP);
-    }
-
-    if (flags & REG_USE_SP) {
-      use_mask->SetBit(kA64RegSP);
-    }
-
-    if (flags & REG_DEF_LR) {
-      def_mask->SetBit(kA64RegLR);
-    }
-  }
-}
-
-ArmConditionCode Arm64Mir2Lir::ArmConditionEncoding(ConditionCode ccode) {
-  ArmConditionCode res;
-  switch (ccode) {
-    case kCondEq: res = kArmCondEq; break;
-    case kCondNe: res = kArmCondNe; break;
-    case kCondCs: res = kArmCondCs; break;
-    case kCondCc: res = kArmCondCc; break;
-    case kCondUlt: res = kArmCondCc; break;
-    case kCondUge: res = kArmCondCs; break;
-    case kCondMi: res = kArmCondMi; break;
-    case kCondPl: res = kArmCondPl; break;
-    case kCondVs: res = kArmCondVs; break;
-    case kCondVc: res = kArmCondVc; break;
-    case kCondHi: res = kArmCondHi; break;
-    case kCondLs: res = kArmCondLs; break;
-    case kCondGe: res = kArmCondGe; break;
-    case kCondLt: res = kArmCondLt; break;
-    case kCondGt: res = kArmCondGt; break;
-    case kCondLe: res = kArmCondLe; break;
-    case kCondAl: res = kArmCondAl; break;
-    case kCondNv: res = kArmCondNv; break;
-    default:
-      LOG(FATAL) << "Bad condition code " << ccode;
-      res = static_cast<ArmConditionCode>(0);  // Quiet gcc
-  }
-  return res;
-}
-
-static const char *shift_names[4] = {
-  "lsl",
-  "lsr",
-  "asr",
-  "ror"
-};
-
-static const char* extend_names[8] = {
-  "uxtb",
-  "uxth",
-  "uxtw",
-  "uxtx",
-  "sxtb",
-  "sxth",
-  "sxtw",
-  "sxtx",
-};
-
-/* Decode and print a register extension (e.g. ", uxtb #1") */
-static void DecodeRegExtendOrShift(int operand, char *buf, size_t buf_size) {
-  if ((operand & (1 << 6)) == 0) {
-    const char *shift_name = shift_names[(operand >> 7) & 0x3];
-    int amount = operand & 0x3f;
-    snprintf(buf, buf_size, ", %s #%d", shift_name, amount);
-  } else {
-    const char *extend_name = extend_names[(operand >> 3) & 0x7];
-    int amount = operand & 0x7;
-    if (amount == 0) {
-      snprintf(buf, buf_size, ", %s", extend_name);
-    } else {
-      snprintf(buf, buf_size, ", %s #%d", extend_name, amount);
-    }
-  }
-}
-
-static uint64_t bit_mask(unsigned width) {
-  DCHECK_LE(width, 64U);
-  return (width == 64) ? static_cast<uint64_t>(-1) : ((UINT64_C(1) << (width)) - UINT64_C(1));
-}
-
-static uint64_t RotateRight(uint64_t value, unsigned rotate, unsigned width) {
-  DCHECK_LE(width, 64U);
-  rotate &= 63;
-  value = value & bit_mask(width);
-  return ((value & bit_mask(rotate)) << (width - rotate)) | (value >> rotate);
-}
-
-static uint64_t RepeatBitsAcrossReg(bool is_wide, uint64_t value, unsigned width) {
-  unsigned i;
-  unsigned reg_size = (is_wide) ? 64 : 32;
-  uint64_t result = value & bit_mask(width);
-  for (i = width; i < reg_size; i *= 2) {
-    result |= (result << i);
-  }
-  DCHECK_EQ(i, reg_size);
-  return result;
-}
-
-/**
- * @brief Decode an immediate in the form required by logical instructions.
- *
- * @param is_wide Whether @p value encodes a 64-bit (as opposed to 32-bit) immediate.
- * @param value The encoded logical immediates that is to be decoded.
- * @return The decoded logical immediate.
- * @note This is the inverse of Arm64Mir2Lir::EncodeLogicalImmediate().
- */
-uint64_t Arm64Mir2Lir::DecodeLogicalImmediate(bool is_wide, int value) {
-  unsigned n     = (value >> 12) & 0x01;
-  unsigned imm_r = (value >>  6) & 0x3f;
-  unsigned imm_s = (value >>  0) & 0x3f;
-
-  // An integer is constructed from the n, imm_s and imm_r bits according to
-  // the following table:
-  //
-  // N   imms immr  size S             R
-  // 1 ssssss rrrrrr 64  UInt(ssssss) UInt(rrrrrr)
-  // 0 0sssss xrrrrr 32  UInt(sssss)  UInt(rrrrr)
-  // 0 10ssss xxrrrr 16  UInt(ssss)   UInt(rrrr)
-  // 0 110sss xxxrrr 8   UInt(sss)    UInt(rrr)
-  // 0 1110ss xxxxrr 4   UInt(ss)     UInt(rr)
-  // 0 11110s xxxxxr 2   UInt(s)      UInt(r)
-  // (s bits must not be all set)
-  //
-  // A pattern is constructed of size bits, where the least significant S+1
-  // bits are set. The pattern is rotated right by R, and repeated across a
-  // 32 or 64-bit value, depending on destination register width.
-
-  if (n == 1) {
-    DCHECK_NE(imm_s, 0x3fU);
-    uint64_t bits = bit_mask(imm_s + 1);
-    return RotateRight(bits, imm_r, 64);
-  } else {
-    DCHECK_NE((imm_s >> 1), 0x1fU);
-    for (unsigned width = 0x20; width >= 0x2; width >>= 1) {
-      if ((imm_s & width) == 0) {
-        unsigned mask = (unsigned)(width - 1);
-        DCHECK_NE((imm_s & mask), mask);
-        uint64_t bits = bit_mask((imm_s & mask) + 1);
-        return RepeatBitsAcrossReg(is_wide, RotateRight(bits, imm_r & mask, width), width);
-      }
-    }
-  }
-  return 0;
-}
-
-/**
- * @brief Decode an 8-bit single point number encoded with EncodeImmSingle().
- */
-static float DecodeImmSingle(uint8_t small_float) {
-  int mantissa = (small_float & 0x0f) + 0x10;
-  int sign = ((small_float & 0x80) == 0) ? 1 : -1;
-  float signed_mantissa = static_cast<float>(sign*mantissa);
-  int exponent = (((small_float >> 4) & 0x7) + 4) & 0x7;
-  return signed_mantissa*static_cast<float>(1 << exponent)*0.0078125f;
-}
-
-static const char* cc_names[] = {"eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
-                                 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"};
-/*
- * Interpret a format string and build a string no longer than size
- * See format key in assemble_arm64.cc.
- */
-std::string Arm64Mir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) {
-  std::string buf;
-  const char* fmt_end = &fmt[strlen(fmt)];
-  char tbuf[256];
-  const char* name;
-  char nc;
-  while (fmt < fmt_end) {
-    int operand;
-    if (*fmt == '!') {
-      fmt++;
-      DCHECK_LT(fmt, fmt_end);
-      nc = *fmt++;
-      if (nc == '!') {
-        strcpy(tbuf, "!");
-      } else {
-         DCHECK_LT(fmt, fmt_end);
-         DCHECK_LT(static_cast<unsigned>(nc-'0'), 4U);
-         operand = lir->operands[nc-'0'];
-         switch (*fmt++) {
-           case 'e':  {
-               // Omit ", uxtw #0" in strings like "add w0, w1, w3, uxtw #0" and
-               // ", uxtx #0" in strings like "add x0, x1, x3, uxtx #0"
-               int omittable = ((IS_WIDE(lir->opcode)) ? EncodeExtend(kA64Uxtw, 0) :
-                                EncodeExtend(kA64Uxtw, 0));
-               if (LIKELY(operand == omittable)) {
-                 strcpy(tbuf, "");
-               } else {
-                 DecodeRegExtendOrShift(operand, tbuf, arraysize(tbuf));
-               }
-             }
-             break;
-           case 'o':
-             // Omit ", lsl #0"
-             if (LIKELY(operand == EncodeShift(kA64Lsl, 0))) {
-               strcpy(tbuf, "");
-             } else {
-               DecodeRegExtendOrShift(operand, tbuf, arraysize(tbuf));
-             }
-             break;
-           case 'B':
-             switch (operand) {
-               case kSY:
-                 name = "sy";
-                 break;
-               case kST:
-                 name = "st";
-                 break;
-               case kISH:
-                 name = "ish";
-                 break;
-               case kISHST:
-                 name = "ishst";
-                 break;
-               case kNSH:
-                 name = "nsh";
-                 break;
-               case kNSHST:
-                 name = "shst";
-                 break;
-               default:
-                 name = "DecodeError2";
-                 break;
-             }
-             strcpy(tbuf, name);
-             break;
-           case 's':
-             snprintf(tbuf, arraysize(tbuf), "s%d", operand & RegStorage::kRegNumMask);
-             break;
-           case 'S':
-             snprintf(tbuf, arraysize(tbuf), "d%d", operand & RegStorage::kRegNumMask);
-             break;
-           case 'f':
-             snprintf(tbuf, arraysize(tbuf), "%c%d", (IS_WIDE(lir->opcode)) ? 'd' : 's',
-                      operand & RegStorage::kRegNumMask);
-             break;
-           case 'l': {
-               bool is_wide = IS_WIDE(lir->opcode);
-               uint64_t imm = DecodeLogicalImmediate(is_wide, operand);
-               snprintf(tbuf, arraysize(tbuf), "%" PRId64 " (%#" PRIx64 ")", imm, imm);
-             }
-             break;
-           case 'I':
-             snprintf(tbuf, arraysize(tbuf), "%f", DecodeImmSingle(operand));
-             break;
-           case 'M':
-             if (LIKELY(operand == 0))
-               strcpy(tbuf, "");
-             else
-               snprintf(tbuf, arraysize(tbuf), ", lsl #%d", 16*operand);
-             break;
-           case 'd':
-             snprintf(tbuf, arraysize(tbuf), "%d", operand);
-             break;
-           case 'w':
-             if (LIKELY(operand != rwzr))
-               snprintf(tbuf, arraysize(tbuf), "w%d", operand & RegStorage::kRegNumMask);
-             else
-               strcpy(tbuf, "wzr");
-             break;
-           case 'W':
-             if (LIKELY(operand != rwsp))
-               snprintf(tbuf, arraysize(tbuf), "w%d", operand & RegStorage::kRegNumMask);
-             else
-               strcpy(tbuf, "wsp");
-             break;
-           case 'x':
-             if (LIKELY(operand != rxzr))
-               snprintf(tbuf, arraysize(tbuf), "x%d", operand & RegStorage::kRegNumMask);
-             else
-               strcpy(tbuf, "xzr");
-             break;
-           case 'X':
-             if (LIKELY(operand != rsp))
-               snprintf(tbuf, arraysize(tbuf), "x%d", operand & RegStorage::kRegNumMask);
-             else
-               strcpy(tbuf, "sp");
-             break;
-           case 'D':
-             snprintf(tbuf, arraysize(tbuf), "%d", operand*((IS_WIDE(lir->opcode)) ? 8 : 4));
-             break;
-           case 'E':
-             snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
-             break;
-           case 'F':
-             snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
-             break;
-           case 'G':
-             if (LIKELY(operand == 0))
-               strcpy(tbuf, "");
-             else
-               strcpy(tbuf, (IS_WIDE(lir->opcode)) ? ", lsl #3" : ", lsl #2");
-             break;
-           case 'c':
-             strcpy(tbuf, cc_names[operand]);
-             break;
-           case 't':
-             snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
-                 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + (operand << 2),
-                 lir->target);
-             break;
-           case 'r': {
-               bool is_wide = IS_WIDE(lir->opcode);
-               if (LIKELY(operand != rwzr && operand != rxzr)) {
-                 snprintf(tbuf, arraysize(tbuf), "%c%d", (is_wide) ? 'x' : 'w',
-                          operand & RegStorage::kRegNumMask);
-               } else {
-                 strcpy(tbuf, (is_wide) ? "xzr" : "wzr");
-               }
-             }
-             break;
-           case 'R': {
-               bool is_wide = IS_WIDE(lir->opcode);
-               if (LIKELY(operand != rwsp && operand != rsp)) {
-                 snprintf(tbuf, arraysize(tbuf), "%c%d", (is_wide) ? 'x' : 'w',
-                          operand & RegStorage::kRegNumMask);
-               } else {
-                 strcpy(tbuf, (is_wide) ? "sp" : "wsp");
-               }
-             }
-             break;
-           case 'p':
-             snprintf(tbuf, arraysize(tbuf), ".+%d (addr %#" PRIxPTR ")", 4*operand,
-                      reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4*operand);
-             break;
-           case 'T':
-             if (LIKELY(operand == 0))
-               strcpy(tbuf, "");
-             else if (operand == 1)
-               strcpy(tbuf, ", lsl #12");
-             else
-               strcpy(tbuf, ", DecodeError3");
-             break;
-           case 'h':
-             snprintf(tbuf, arraysize(tbuf), "%d", operand);
-             break;
-           default:
-             strcpy(tbuf, "DecodeError1");
-             break;
-        }
-        buf += tbuf;
-      }
-    } else {
-       buf += *fmt++;
-    }
-  }
-  // Dump thread offset.
-  std::string fmt_str = GetTargetInstFmt(lir->opcode);
-  if (std::string::npos != fmt_str.find(", [!1X, #!2") && rxSELF == lir->operands[1] &&
-      std::string::npos != buf.find(", [")) {
-    int offset = lir->operands[2];
-    if (std::string::npos != fmt_str.find("#!2d")) {
-    } else if (std::string::npos != fmt_str.find("#!2D")) {
-      offset *= (IS_WIDE(lir->opcode)) ? 8 : 4;
-    } else if (std::string::npos != fmt_str.find("#!2F")) {
-      offset *= 2;
-    } else {
-      LOG(FATAL) << "Should not reach here";
-    }
-    std::ostringstream tmp_stream;
-    Thread::DumpThreadOffset<8>(tmp_stream, offset);
-    buf += "  ; ";
-    buf += tmp_stream.str();
-  }
-  return buf;
-}
-
-void Arm64Mir2Lir::DumpResourceMask(LIR* arm_lir, const ResourceMask& mask, const char* prefix) {
-  char buf[256];
-  buf[0] = 0;
-
-  if (mask.Equals(kEncodeAll)) {
-    strcpy(buf, "all");
-  } else {
-    char num[8];
-    int i;
-
-    for (i = 0; i < kA64RegEnd; i++) {
-      if (mask.HasBit(i)) {
-        snprintf(num, arraysize(num), "%d ", i);
-        strcat(buf, num);
-      }
-    }
-
-    if (mask.HasBit(ResourceMask::kCCode)) {
-      strcat(buf, "cc ");
-    }
-    if (mask.HasBit(ResourceMask::kFPStatus)) {
-      strcat(buf, "fpcc ");
-    }
-
-    /* Memory bits */
-    if (arm_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
-      snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
-               DECODE_ALIAS_INFO_REG(arm_lir->flags.alias_info),
-               DECODE_ALIAS_INFO_WIDE(arm_lir->flags.alias_info) ? "(+1)" : "");
-    }
-    if (mask.HasBit(ResourceMask::kLiteral)) {
-      strcat(buf, "lit ");
-    }
-
-    if (mask.HasBit(ResourceMask::kHeapRef)) {
-      strcat(buf, "heap ");
-    }
-    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
-      strcat(buf, "noalias ");
-    }
-  }
-  if (buf[0]) {
-    LOG(INFO) << prefix << ": " << buf;
-  }
-}
-
-bool Arm64Mir2Lir::IsUnconditionalBranch(LIR* lir) {
-  return (lir->opcode == kA64B1t);
-}
-
-RegisterClass Arm64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
-  if (UNLIKELY(is_volatile)) {
-    // On arm64, fp register load/store is atomic only for single bytes.
-    if (size != kSignedByte && size != kUnsignedByte) {
-      return (size == kReference) ? kRefReg : kCoreReg;
-    }
-  }
-  return RegClassBySize(size);
-}
-
-Arm64Mir2Lir::Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
-    : Mir2Lir(cu, mir_graph, arena),
-      call_method_insns_(arena->Adapter()),
-      dex_cache_access_insns_(arena->Adapter()) {
-  // Sanity check - make sure encoding map lines up.
-  for (int i = 0; i < kA64Last; i++) {
-    DCHECK_EQ(UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode), i)
-        << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
-        << " is wrong: expecting " << i << ", seeing "
-        << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
-  }
-}
-
-Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                            ArenaAllocator* const arena) {
-  return new Arm64Mir2Lir(cu, mir_graph, arena);
-}
-
-void Arm64Mir2Lir::CompilerInitializeRegAlloc() {
-  reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, core64_regs, sp_regs, dp_regs,
-                                            reserved_regs, reserved64_regs,
-                                            core_temps, core64_temps, sp_temps, dp_temps));
-
-  // Target-specific adjustments.
-  // Alias single precision float registers to corresponding double registers.
-  for (RegisterInfo* info : reg_pool_->sp_regs_) {
-    int fp_reg_num = info->GetReg().GetRegNum();
-    RegStorage dp_reg = RegStorage::FloatSolo64(fp_reg_num);
-    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
-    // Double precision register's master storage should refer to itself.
-    DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
-    // Redirect single precision's master storage to master.
-    info->SetMaster(dp_reg_info);
-    // Singles should show a single 32-bit mask bit, at first referring to the low half.
-    DCHECK_EQ(info->StorageMask(), 0x1U);
-  }
-
-  // Alias 32bit W registers to corresponding 64bit X registers.
-  for (RegisterInfo* info : reg_pool_->core_regs_) {
-    int x_reg_num = info->GetReg().GetRegNum();
-    RegStorage x_reg = RegStorage::Solo64(x_reg_num);
-    RegisterInfo* x_reg_info = GetRegInfo(x_reg);
-    // 64bit X register's master storage should refer to itself.
-    DCHECK_EQ(x_reg_info, x_reg_info->Master());
-    // Redirect 32bit W master storage to 64bit X.
-    info->SetMaster(x_reg_info);
-    // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
-    DCHECK_EQ(info->StorageMask(), 0x1U);
-  }
-
-  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
-  // TODO: adjust when we roll to hard float calling convention.
-  reg_pool_->next_core_reg_ = 2;
-  reg_pool_->next_sp_reg_ = 0;
-  reg_pool_->next_dp_reg_ = 0;
-}
-
-/*
- * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
- * instructions might call out to C/assembly helper functions.  Until
- * machinery is in place, always spill lr.
- */
-
-void Arm64Mir2Lir::AdjustSpillMask() {
-  core_spill_mask_ |= (1 << rs_xLR.GetRegNum());
-  num_core_spills_++;
-}
-
-/* Clobber all regs that might be used by an external C call */
-void Arm64Mir2Lir::ClobberCallerSave() {
-  Clobber(rs_x0);
-  Clobber(rs_x1);
-  Clobber(rs_x2);
-  Clobber(rs_x3);
-  Clobber(rs_x4);
-  Clobber(rs_x5);
-  Clobber(rs_x6);
-  Clobber(rs_x7);
-  Clobber(rs_x8);
-  Clobber(rs_x9);
-  Clobber(rs_x10);
-  Clobber(rs_x11);
-  Clobber(rs_x12);
-  Clobber(rs_x13);
-  Clobber(rs_x14);
-  Clobber(rs_x15);
-  Clobber(rs_x16);
-  Clobber(rs_x17);
-  Clobber(rs_x18);
-  Clobber(rs_x30);
-
-  Clobber(rs_f0);
-  Clobber(rs_f1);
-  Clobber(rs_f2);
-  Clobber(rs_f3);
-  Clobber(rs_f4);
-  Clobber(rs_f5);
-  Clobber(rs_f6);
-  Clobber(rs_f7);
-  Clobber(rs_f16);
-  Clobber(rs_f17);
-  Clobber(rs_f18);
-  Clobber(rs_f19);
-  Clobber(rs_f20);
-  Clobber(rs_f21);
-  Clobber(rs_f22);
-  Clobber(rs_f23);
-  Clobber(rs_f24);
-  Clobber(rs_f25);
-  Clobber(rs_f26);
-  Clobber(rs_f27);
-  Clobber(rs_f28);
-  Clobber(rs_f29);
-  Clobber(rs_f30);
-  Clobber(rs_f31);
-}
-
-RegLocation Arm64Mir2Lir::GetReturnWideAlt() {
-  RegLocation res = LocCReturnWide();
-  res.reg.SetReg(rx2);
-  res.reg.SetHighReg(rx3);
-  Clobber(rs_x2);
-  Clobber(rs_x3);
-  MarkInUse(rs_x2);
-  MarkInUse(rs_x3);
-  MarkWide(res.reg);
-  return res;
-}
-
-RegLocation Arm64Mir2Lir::GetReturnAlt() {
-  RegLocation res = LocCReturn();
-  res.reg.SetReg(rx1);
-  Clobber(rs_x1);
-  MarkInUse(rs_x1);
-  return res;
-}
-
-/* To be used when explicitly managing register use */
-void Arm64Mir2Lir::LockCallTemps() {
-  // TODO: needs cleanup.
-  LockTemp(rs_x0);
-  LockTemp(rs_x1);
-  LockTemp(rs_x2);
-  LockTemp(rs_x3);
-  LockTemp(rs_x4);
-  LockTemp(rs_x5);
-  LockTemp(rs_x6);
-  LockTemp(rs_x7);
-  LockTemp(rs_f0);
-  LockTemp(rs_f1);
-  LockTemp(rs_f2);
-  LockTemp(rs_f3);
-  LockTemp(rs_f4);
-  LockTemp(rs_f5);
-  LockTemp(rs_f6);
-  LockTemp(rs_f7);
-}
-
-/* To be used when explicitly managing register use */
-void Arm64Mir2Lir::FreeCallTemps() {
-  // TODO: needs cleanup.
-  FreeTemp(rs_x0);
-  FreeTemp(rs_x1);
-  FreeTemp(rs_x2);
-  FreeTemp(rs_x3);
-  FreeTemp(rs_x4);
-  FreeTemp(rs_x5);
-  FreeTemp(rs_x6);
-  FreeTemp(rs_x7);
-  FreeTemp(rs_f0);
-  FreeTemp(rs_f1);
-  FreeTemp(rs_f2);
-  FreeTemp(rs_f3);
-  FreeTemp(rs_f4);
-  FreeTemp(rs_f5);
-  FreeTemp(rs_f6);
-  FreeTemp(rs_f7);
-  FreeTemp(TargetReg(kHiddenArg));
-}
-
-RegStorage Arm64Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
-  // TODO(Arm64): use LoadWordDisp instead.
-  //   e.g. LoadWordDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR);
-  LoadBaseDisp(rs_xSELF, GetThreadOffset<8>(trampoline).Int32Value(), rs_xLR, k64, kNotVolatile);
-  return rs_xLR;
-}
-
-LIR* Arm64Mir2Lir::CheckSuspendUsingLoad() {
-  RegStorage tmp = rs_x0;
-  LoadWordDisp(rs_xSELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
-  LIR* load2 = LoadWordDisp(tmp, 0, tmp);
-  return load2;
-}
-
-uint64_t Arm64Mir2Lir::GetTargetInstFlags(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].flags;
-}
-
-const char* Arm64Mir2Lir::GetTargetInstName(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].name;
-}
-
-const char* Arm64Mir2Lir::GetTargetInstFmt(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].fmt;
-}
-
-RegStorage Arm64Mir2Lir::InToRegStorageArm64Mapper::GetNextReg(ShortyArg arg) {
-  const RegStorage coreArgMappingToPhysicalReg[] =
-      {rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7};
-  const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
-  const RegStorage fpArgMappingToPhysicalReg[] =
-      {rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7};
-  const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
-
-  RegStorage result = RegStorage::InvalidReg();
-  if (arg.IsFP()) {
-    if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
-      DCHECK(!arg.IsRef());
-      result = fpArgMappingToPhysicalReg[cur_fp_reg_++];
-      if (result.Valid()) {
-        // TODO: switching between widths remains a bit ugly.  Better way?
-        int res_reg = result.GetReg();
-        result = arg.IsWide() ? RegStorage::FloatSolo64(res_reg) : RegStorage::FloatSolo32(res_reg);
-      }
-    }
-  } else {
-    if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
-      result = coreArgMappingToPhysicalReg[cur_core_reg_++];
-      if (result.Valid()) {
-        // TODO: switching between widths remains a bit ugly.  Better way?
-        int res_reg = result.GetReg();
-        DCHECK(!(arg.IsWide() && arg.IsRef()));
-        result = (arg.IsWide() || arg.IsRef()) ?
-                 RegStorage::Solo64(res_reg) : RegStorage::Solo32(res_reg);
-      }
-    }
-  }
-  return result;
-}
-
-void Arm64Mir2Lir::InstallLiteralPools() {
-  patches_.reserve(call_method_insns_.size() + dex_cache_access_insns_.size());
-
-  // PC-relative calls to methods.
-  for (LIR* p : call_method_insns_) {
-      DCHECK_EQ(p->opcode, kA64Bl1t);
-      uint32_t target_method_idx = p->operands[1];
-      const DexFile* target_dex_file = UnwrapPointer<DexFile>(p->operands[2]);
-      patches_.push_back(LinkerPatch::RelativeCodePatch(p->offset,
-                                                        target_dex_file, target_method_idx));
-  }
-
-  // PC-relative references to dex cache arrays.
-  for (LIR* p : dex_cache_access_insns_) {
-    auto non_wide = UNWIDE(p->opcode);  // May be a wide load for ArtMethod*.
-    DCHECK(non_wide == kA64Adrp2xd || non_wide == kA64Ldr3rXD) << p->opcode << " " << non_wide;
-    const LIR* adrp = UnwrapPointer<LIR>(p->operands[4]);
-    DCHECK_EQ(adrp->opcode, kA64Adrp2xd);
-    const DexFile* dex_file = UnwrapPointer<DexFile>(adrp->operands[2]);
-    uint32_t offset = adrp->operands[3];
-    DCHECK(!p->flags.is_nop);
-    DCHECK(!adrp->flags.is_nop);
-    patches_.push_back(LinkerPatch::DexCacheArrayPatch(p->offset, dex_file, adrp->offset, offset));
-  }
-
-  // And do the normal processing.
-  Mir2Lir::InstallLiteralPools();
-}
-
-int Arm64Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* /*info*/, int /*first*/, int count) {
-  /*
-   * TODO: Improve by adding block copy for large number of arguments.  For now, just
-   * copy a Dalvik vreg at a time.
-   */
-  return count;
-}
-
-void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
-  DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode));
-  RegLocation rl_src[3];
-  RegLocation rl_dest = mir_graph_->GetBadLoc();
-  rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
-  ExtendedMIROpcode opcode = static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode);
-  switch (opcode) {
-    case kMirOpMaddInt:
-    case kMirOpMsubInt:
-      rl_dest = mir_graph_->GetDest(mir);
-      rl_src[0] = mir_graph_->GetSrc(mir, 0);
-      rl_src[1] = mir_graph_->GetSrc(mir, 1);
-      rl_src[2]= mir_graph_->GetSrc(mir, 2);
-      GenMaddMsubInt(rl_dest, rl_src[0], rl_src[1], rl_src[2], opcode == kMirOpMsubInt);
-      break;
-    case kMirOpMaddLong:
-    case kMirOpMsubLong:
-      rl_dest = mir_graph_->GetDestWide(mir);
-      rl_src[0] = mir_graph_->GetSrcWide(mir, 0);
-      rl_src[1] = mir_graph_->GetSrcWide(mir, 2);
-      rl_src[2] = mir_graph_->GetSrcWide(mir, 4);
-      GenMaddMsubLong(rl_dest, rl_src[0], rl_src[1], rl_src[2], opcode == kMirOpMsubLong);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << static_cast<int>(opcode);
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
deleted file mode 100644
index 58769ea..0000000
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ /dev/null
@@ -1,1407 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_arm64.h"
-
-#include "arm64_lir.h"
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-
-namespace art {
-
-/* This file contains codegen for the A64 ISA. */
-
-int32_t Arm64Mir2Lir::EncodeImmSingle(uint32_t bits) {
-  /*
-   * Valid values will have the form:
-   *
-   *   aBbb.bbbc.defg.h000.0000.0000.0000.0000
-   *
-   * where B = not(b). In other words, if b == 1, then B == 0 and viceversa.
-   */
-
-  // bits[19..0] are cleared.
-  if ((bits & 0x0007ffff) != 0)
-    return -1;
-
-  // bits[29..25] are all set or all cleared.
-  uint32_t b_pattern = (bits >> 16) & 0x3e00;
-  if (b_pattern != 0 && b_pattern != 0x3e00)
-    return -1;
-
-  // bit[30] and bit[29] are opposite.
-  if (((bits ^ (bits << 1)) & 0x40000000) == 0)
-    return -1;
-
-  // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
-  // bit7: a000.0000
-  uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
-  // bit6: 0b00.0000
-  uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
-  // bit5_to_0: 00cd.efgh
-  uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
-  return (bit7 | bit6 | bit5_to_0);
-}
-
-int32_t Arm64Mir2Lir::EncodeImmDouble(uint64_t bits) {
-  /*
-   * Valid values will have the form:
-   *
-   *   aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
-   *   0000.0000.0000.0000.0000.0000.0000.0000
-   *
-   * where B = not(b).
-   */
-
-  // bits[47..0] are cleared.
-  if ((bits & UINT64_C(0xffffffffffff)) != 0)
-    return -1;
-
-  // bits[61..54] are all set or all cleared.
-  uint32_t b_pattern = (bits >> 48) & 0x3fc0;
-  if (b_pattern != 0 && b_pattern != 0x3fc0)
-    return -1;
-
-  // bit[62] and bit[61] are opposite.
-  if (((bits ^ (bits << 1)) & UINT64_C(0x4000000000000000)) == 0)
-    return -1;
-
-  // bit7: a000.0000
-  uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
-  // bit6: 0b00.0000
-  uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
-  // bit5_to_0: 00cd.efgh
-  uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
-  return (bit7 | bit6 | bit5_to_0);
-}
-
-size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) {
-  bool opcode_is_wide = IS_WIDE(lir->opcode);
-  A64Opcode opcode = UNWIDE(lir->opcode);
-  DCHECK(!IsPseudoLirOp(opcode));
-  const A64EncodingMap *encoder = &EncodingMap[opcode];
-  uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
-  return (bits >> 30);
-}
-
-size_t Arm64Mir2Lir::GetInstructionOffset(LIR* lir) {
-  size_t offset = lir->operands[2];
-  uint64_t check_flags = GetTargetInstFlags(lir->opcode);
-  DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
-  if (check_flags & SCALED_OFFSET_X0) {
-    DCHECK(check_flags & IS_TERTIARY_OP);
-    offset = offset * (1 << GetLoadStoreSize(lir));
-  }
-  return offset;
-}
-
-LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
-  DCHECK(r_dest.IsSingle());
-  if (value == 0) {
-    return NewLIR2(kA64Fmov2sw, r_dest.GetReg(), rwzr);
-  } else {
-    int32_t encoded_imm = EncodeImmSingle((uint32_t)value);
-    if (encoded_imm >= 0) {
-      return NewLIR2(kA64Fmov2fI, r_dest.GetReg(), encoded_imm);
-    }
-  }
-
-  LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
-  if (data_target == nullptr) {
-    // Wide, as we need 8B alignment.
-    data_target = AddWideData(&literal_list_, value, 0);
-  }
-
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-  LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kA64Ldr2fp,
-                            r_dest.GetReg(), 0, 0, 0, 0, data_target);
-  AppendLIR(load_pc_rel);
-  return load_pc_rel;
-}
-
-LIR* Arm64Mir2Lir::LoadFPConstantValueWide(RegStorage r_dest, int64_t value) {
-  DCHECK(r_dest.IsDouble());
-  if (value == 0) {
-    return NewLIR2(kA64Fmov2Sx, r_dest.GetReg(), rxzr);
-  } else {
-    int32_t encoded_imm = EncodeImmDouble(value);
-    if (encoded_imm >= 0) {
-      return NewLIR2(WIDE(kA64Fmov2fI), r_dest.GetReg(), encoded_imm);
-    }
-  }
-
-  // No short form - load from the literal pool.
-  int32_t val_lo = Low32Bits(value);
-  int32_t val_hi = High32Bits(value);
-  LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-  if (data_target == nullptr) {
-    data_target = AddWideData(&literal_list_, val_lo, val_hi);
-  }
-
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-  LIR* load_pc_rel = RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2fp),
-                            r_dest.GetReg(), 0, 0, 0, 0, data_target);
-  AppendLIR(load_pc_rel);
-  return load_pc_rel;
-}
-
-static int CountLeadingZeros(bool is_wide, uint64_t value) {
-  return (is_wide) ? __builtin_clzll(value) : __builtin_clz((uint32_t)value);
-}
-
-static int CountTrailingZeros(bool is_wide, uint64_t value) {
-  return (is_wide) ? __builtin_ctzll(value) : __builtin_ctz((uint32_t)value);
-}
-
-static int CountSetBits(bool is_wide, uint64_t value) {
-  return ((is_wide) ?
-          __builtin_popcountll(value) : __builtin_popcount((uint32_t)value));
-}
-
-/**
- * @brief Try encoding an immediate in the form required by logical instructions.
- *
- * @param is_wide Whether @p value is a 64-bit (as opposed to 32-bit) value.
- * @param value An integer to be encoded. This is interpreted as 64-bit if @p is_wide is true and as
- *   32-bit if @p is_wide is false.
- * @return A non-negative integer containing the encoded immediate or -1 if the encoding failed.
- * @note This is the inverse of Arm64Mir2Lir::DecodeLogicalImmediate().
- */
-int Arm64Mir2Lir::EncodeLogicalImmediate(bool is_wide, uint64_t value) {
-  unsigned n, imm_s, imm_r;
-
-  // Logical immediates are encoded using parameters n, imm_s and imm_r using
-  // the following table:
-  //
-  //  N   imms    immr    size        S             R
-  //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
-  //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
-  //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
-  //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
-  //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
-  //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
-  // (s bits must not be all set)
-  //
-  // A pattern is constructed of size bits, where the least significant S+1
-  // bits are set. The pattern is rotated right by R, and repeated across a
-  // 32 or 64-bit value, depending on destination register width.
-  //
-  // To test if an arbitary immediate can be encoded using this scheme, an
-  // iterative algorithm is used.
-  //
-
-  // 1. If the value has all set or all clear bits, it can't be encoded.
-  if (value == 0 || value == ~UINT64_C(0) ||
-      (!is_wide && (uint32_t)value == ~UINT32_C(0))) {
-    return -1;
-  }
-
-  unsigned lead_zero  = CountLeadingZeros(is_wide, value);
-  unsigned lead_one   = CountLeadingZeros(is_wide, ~value);
-  unsigned trail_zero = CountTrailingZeros(is_wide, value);
-  unsigned trail_one  = CountTrailingZeros(is_wide, ~value);
-  unsigned set_bits   = CountSetBits(is_wide, value);
-
-  // The fixed bits in the immediate s field.
-  // If width == 64 (X reg), start at 0xFFFFFF80.
-  // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
-  // widths won't be executed.
-  unsigned width = (is_wide) ? 64 : 32;
-  int imm_s_fixed = (is_wide) ? -128 : -64;
-  int imm_s_mask = 0x3f;
-
-  for (;;) {
-    // 2. If the value is two bits wide, it can be encoded.
-    if (width == 2) {
-      n = 0;
-      imm_s = 0x3C;
-      imm_r = (value & 3) - 1;
-      break;
-    }
-
-    n = (width == 64) ? 1 : 0;
-    imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
-    if ((lead_zero + set_bits) == width) {
-      imm_r = 0;
-    } else {
-      imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
-    }
-
-    // 3. If the sum of leading zeros, trailing zeros and set bits is
-    //    equal to the bit width of the value, it can be encoded.
-    if (lead_zero + trail_zero + set_bits == width) {
-      break;
-    }
-
-    // 4. If the sum of leading ones, trailing ones and unset bits in the
-    //    value is equal to the bit width of the value, it can be encoded.
-    if (lead_one + trail_one + (width - set_bits) == width) {
-      break;
-    }
-
-    // 5. If the most-significant half of the bitwise value is equal to
-    //    the least-significant half, return to step 2 using the
-    //    least-significant half of the value.
-    uint64_t mask = (UINT64_C(1) << (width >> 1)) - 1;
-    if ((value & mask) == ((value >> (width >> 1)) & mask)) {
-      width >>= 1;
-      set_bits >>= 1;
-      imm_s_fixed >>= 1;
-      continue;
-    }
-
-    // 6. Otherwise, the value can't be encoded.
-    return -1;
-  }
-
-  return (n << 12 | imm_r << 6 | imm_s);
-}
-
-// Maximum number of instructions to use for encoding the immediate.
-static const int max_num_ops_per_const_load = 2;
-
-/**
- * @brief Return the number of fast halfwords in the given uint64_t integer.
- * @details The input integer is split into 4 halfwords (bits 0-15, 16-31, 32-47, 48-63). The
- *   number of fast halfwords (halfwords that are either 0 or 0xffff) is returned. See below for
- *   a more accurate description.
- * @param value The input 64-bit integer.
- * @return Return @c retval such that (retval & 0x7) is the maximum between n and m, where n is
- *   the number of halfwords with all bits unset (0) and m is the number of halfwords with all bits
- *   set (0xffff). Additionally (retval & 0x8) is set when m > n.
- */
-static int GetNumFastHalfWords(uint64_t value) {
-  unsigned int num_0000_halfwords = 0;
-  unsigned int num_ffff_halfwords = 0;
-  for (int shift = 0; shift < 64; shift += 16) {
-    uint16_t halfword = static_cast<uint16_t>(value >> shift);
-    if (halfword == 0)
-      num_0000_halfwords++;
-    else if (halfword == UINT16_C(0xffff))
-      num_ffff_halfwords++;
-  }
-  if (num_0000_halfwords >= num_ffff_halfwords) {
-    DCHECK_LE(num_0000_halfwords, 4U);
-    return num_0000_halfwords;
-  } else {
-    DCHECK_LE(num_ffff_halfwords, 4U);
-    return num_ffff_halfwords | 0x8;
-  }
-}
-
-// The InexpensiveConstantXXX variants below are used in the promotion algorithm to determine how a
-// constant is considered for promotion. If the constant is "inexpensive" then the promotion
-// algorithm will give it a low priority for promotion, even when it is referenced many times in
-// the code.
-
-bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
-  // A 32-bit int can always be loaded with 2 instructions (and without using the literal pool).
-  // We therefore return true and give it a low priority for promotion.
-  return true;
-}
-
-bool Arm64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
-  return EncodeImmSingle(value) >= 0;
-}
-
-bool Arm64Mir2Lir::InexpensiveConstantLong(int64_t value) {
-  int num_slow_halfwords = 4 - (GetNumFastHalfWords(value) & 0x7);
-  if (num_slow_halfwords <= max_num_ops_per_const_load) {
-    return true;
-  }
-  return (EncodeLogicalImmediate(/*is_wide=*/true, value) >= 0);
-}
-
-bool Arm64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
-  return EncodeImmDouble(value) >= 0;
-}
-
-// The InexpensiveConstantXXX variants below are used to determine which A64 instructions to use
-// when one of the operands is an immediate (e.g. register version or immediate version of add).
-
-bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
-  switch (opcode) {
-  case Instruction::IF_EQ:
-  case Instruction::IF_NE:
-  case Instruction::IF_LT:
-  case Instruction::IF_GE:
-  case Instruction::IF_GT:
-  case Instruction::IF_LE:
-  case Instruction::ADD_INT:
-  case Instruction::ADD_INT_2ADDR:
-  case Instruction::SUB_INT:
-  case Instruction::SUB_INT_2ADDR:
-    // The code below is consistent with the implementation of OpRegRegImm().
-    {
-      uint32_t abs_value = (value == INT_MIN) ? value : std::abs(value);
-      if (abs_value < 0x1000) {
-        return true;
-      } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
-        return true;
-      }
-      return false;
-    }
-  case Instruction::SHL_INT:
-  case Instruction::SHL_INT_2ADDR:
-  case Instruction::SHR_INT:
-  case Instruction::SHR_INT_2ADDR:
-  case Instruction::USHR_INT:
-  case Instruction::USHR_INT_2ADDR:
-    return true;
-  case Instruction::AND_INT:
-  case Instruction::AND_INT_2ADDR:
-  case Instruction::AND_INT_LIT16:
-  case Instruction::AND_INT_LIT8:
-  case Instruction::OR_INT:
-  case Instruction::OR_INT_2ADDR:
-  case Instruction::OR_INT_LIT16:
-  case Instruction::OR_INT_LIT8:
-  case Instruction::XOR_INT:
-  case Instruction::XOR_INT_2ADDR:
-  case Instruction::XOR_INT_LIT16:
-  case Instruction::XOR_INT_LIT8:
-    if (value == 0 || value == INT32_C(-1)) {
-      return true;
-    }
-    return (EncodeLogicalImmediate(/*is_wide=*/false, value) >= 0);
-  default:
-    return false;
-  }
-}
-
-/*
- * Load a immediate using one single instruction when possible; otherwise
- * use a pair of movz and movk instructions.
- *
- * No additional register clobbering operation performed. Use this version when
- * 1) r_dest is freshly returned from AllocTemp or
- * 2) The codegen is under fixed register usage
- */
-LIR* Arm64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
-  LIR* res;
-
-  if (r_dest.IsFloat()) {
-    return LoadFPConstantValue(r_dest, value);
-  }
-
-  if (r_dest.Is64Bit()) {
-    return LoadConstantWide(r_dest, value);
-  }
-
-  // Loading SP/ZR with an immediate is not supported.
-  DCHECK(!A64_REG_IS_SP(r_dest.GetReg()));
-  DCHECK(!A64_REG_IS_ZR(r_dest.GetReg()));
-
-  // Compute how many movk, movz instructions are needed to load the value.
-  uint16_t high_bits = High16Bits(value);
-  uint16_t low_bits = Low16Bits(value);
-
-  bool low_fast = ((uint16_t)(low_bits + 1) <= 1);
-  bool high_fast = ((uint16_t)(high_bits + 1) <= 1);
-
-  if (LIKELY(low_fast || high_fast)) {
-    // 1 instruction is enough to load the immediate.
-    if (LIKELY(low_bits == high_bits)) {
-      // Value is either 0 or -1: we can just use wzr.
-      A64Opcode opcode = LIKELY(low_bits == 0) ? kA64Mov2rr : kA64Mvn2rr;
-      res = NewLIR2(opcode, r_dest.GetReg(), rwzr);
-    } else {
-      uint16_t uniform_bits, useful_bits;
-      int shift;
-
-      if (LIKELY(high_fast)) {
-        shift = 0;
-        uniform_bits = high_bits;
-        useful_bits = low_bits;
-      } else {
-        shift = 1;
-        uniform_bits = low_bits;
-        useful_bits = high_bits;
-      }
-
-      if (UNLIKELY(uniform_bits != 0)) {
-        res = NewLIR3(kA64Movn3rdM, r_dest.GetReg(), ~useful_bits, shift);
-      } else {
-        res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), useful_bits, shift);
-      }
-    }
-  } else {
-    // movk, movz require 2 instructions. Try detecting logical immediates.
-    int log_imm = EncodeLogicalImmediate(/*is_wide=*/false, value);
-    if (log_imm >= 0) {
-      res = NewLIR3(kA64Orr3Rrl, r_dest.GetReg(), rwzr, log_imm);
-    } else {
-      // Use 2 instructions.
-      res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), low_bits, 0);
-      NewLIR3(kA64Movk3rdM, r_dest.GetReg(), high_bits, 1);
-    }
-  }
-
-  return res;
-}
-
-// TODO: clean up the names. LoadConstantWide() should really be LoadConstantNoClobberWide().
-LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
-  if (r_dest.IsFloat()) {
-    return LoadFPConstantValueWide(r_dest, value);
-  }
-
-  DCHECK(r_dest.Is64Bit());
-
-  // Loading SP/ZR with an immediate is not supported.
-  DCHECK(!A64_REG_IS_SP(r_dest.GetReg()));
-  DCHECK(!A64_REG_IS_ZR(r_dest.GetReg()));
-
-  if (LIKELY(value == INT64_C(0) || value == INT64_C(-1))) {
-    // value is either 0 or -1: we can just use xzr.
-    A64Opcode opcode = LIKELY(value == 0) ? WIDE(kA64Mov2rr) : WIDE(kA64Mvn2rr);
-    return NewLIR2(opcode, r_dest.GetReg(), rxzr);
-  }
-
-  // At least one in value's halfwords is not 0x0, nor 0xffff: find out how many.
-  uint64_t uvalue = static_cast<uint64_t>(value);
-  int num_fast_halfwords = GetNumFastHalfWords(uvalue);
-  int num_slow_halfwords = 4 - (num_fast_halfwords & 0x7);
-  bool more_ffff_halfwords = (num_fast_halfwords & 0x8) != 0;
-
-  if (num_slow_halfwords > 1) {
-    // A single movz/movn is not enough. Try the logical immediate route.
-    int log_imm = EncodeLogicalImmediate(/*is_wide=*/true, value);
-    if (log_imm >= 0) {
-      return NewLIR3(WIDE(kA64Orr3Rrl), r_dest.GetReg(), rxzr, log_imm);
-    }
-  }
-
-  if (num_slow_halfwords <= max_num_ops_per_const_load) {
-    // We can encode the number using a movz/movn followed by one or more movk.
-    A64Opcode op;
-    uint16_t background;
-    LIR* res = nullptr;
-
-    // Decide whether to use a movz or a movn.
-    if (more_ffff_halfwords) {
-      op = WIDE(kA64Movn3rdM);
-      background = 0xffff;
-    } else {
-      op = WIDE(kA64Movz3rdM);
-      background = 0;
-    }
-
-    // Emit the first instruction (movz, movn).
-    int shift;
-    for (shift = 0; shift < 4; shift++) {
-      uint16_t halfword = static_cast<uint16_t>(uvalue >> (shift << 4));
-      if (halfword != background) {
-        res = NewLIR3(op, r_dest.GetReg(), halfword ^ background, shift);
-        break;
-      }
-    }
-
-    // Emit the movk instructions.
-    for (shift++; shift < 4; shift++) {
-      uint16_t halfword = static_cast<uint16_t>(uvalue >> (shift << 4));
-      if (halfword != background) {
-        NewLIR3(WIDE(kA64Movk3rdM), r_dest.GetReg(), halfword, shift);
-      }
-    }
-    return res;
-  }
-
-  // Use the literal pool.
-  int32_t val_lo = Low32Bits(value);
-  int32_t val_hi = High32Bits(value);
-  LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-  if (data_target == nullptr) {
-    data_target = AddWideData(&literal_list_, val_lo, val_hi);
-  }
-
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-  LIR *res = RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp),
-                    r_dest.GetReg(), 0, 0, 0, 0, data_target);
-  AppendLIR(res);
-  return res;
-}
-
-LIR* Arm64Mir2Lir::OpUnconditionalBranch(LIR* target) {
-  LIR* res = NewLIR1(kA64B1t, 0 /* offset to be patched  during assembly */);
-  res->target = target;
-  return res;
-}
-
-LIR* Arm64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
-  LIR* branch = NewLIR2(kA64B2ct, ArmConditionEncoding(cc),
-                        0 /* offset to be patched */);
-  branch->target = target;
-  return branch;
-}
-
-LIR* Arm64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
-  A64Opcode opcode = kA64Brk1d;
-  switch (op) {
-    case kOpBlx:
-      opcode = kA64Blr1x;
-      break;
-    default:
-      LOG(FATAL) << "Bad opcode " << op;
-  }
-  return NewLIR1(opcode, r_dest_src.GetReg());
-}
-
-LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift) {
-  A64Opcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
-  CHECK_EQ(r_dest_src1.Is64Bit(), r_src2.Is64Bit());
-  A64Opcode opcode = kA64Brk1d;
-
-  switch (op) {
-    case kOpCmn:
-      opcode = kA64Cmn3rro;
-      break;
-    case kOpCmp:
-      opcode = kA64Cmp3rro;
-      break;
-    case kOpMov:
-      opcode = kA64Mov2rr;
-      break;
-    case kOpMvn:
-      opcode = kA64Mvn2rr;
-      break;
-    case kOpNeg:
-      opcode = kA64Neg3rro;
-      break;
-    case kOpTst:
-      opcode = kA64Tst3rro;
-      break;
-    case kOpRev:
-      DCHECK_EQ(shift, 0);
-      // Binary, but rm is encoded twice.
-      return NewLIR2(kA64Rev2rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
-    case kOpRevsh:
-      // Binary, but rm is encoded twice.
-      NewLIR2(kA64Rev162rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
-      // "sxth r1, r2" is "sbfm r1, r2, #0, #15"
-      return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), 0, 15);
-    case kOp2Byte:
-      DCHECK_EQ(shift, ENCODE_NO_SHIFT);
-      // "sbfx r1, r2, #imm1, #imm2" is "sbfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
-      // For now we use sbfm directly.
-      return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 7);
-    case kOp2Short:
-      DCHECK_EQ(shift, ENCODE_NO_SHIFT);
-      // For now we use sbfm rather than its alias, sbfx.
-      return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
-    case kOp2Char:
-      // "ubfx r1, r2, #imm1, #imm2" is "ubfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
-      // For now we use ubfm directly.
-      DCHECK_EQ(shift, ENCODE_NO_SHIFT);
-      return NewLIR4(kA64Ubfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
-    default:
-      return OpRegRegRegShift(op, r_dest_src1, r_dest_src1, r_src2, shift);
-  }
-
-  DCHECK(!IsPseudoLirOp(opcode));
-  if (EncodingMap[opcode].flags & IS_BINARY_OP) {
-    DCHECK_EQ(shift, ENCODE_NO_SHIFT);
-    return NewLIR2(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg());
-  } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
-    A64EncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
-    if (kind == kFmtShift) {
-      return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
-    }
-  }
-
-  LOG(FATAL) << "Unexpected encoding operand count";
-  return nullptr;
-}
-
-LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
-                                  A64RegExtEncodings ext, uint8_t amount) {
-  A64Opcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
-  A64Opcode opcode = kA64Brk1d;
-
-  switch (op) {
-    case kOpCmn:
-      opcode = kA64Cmn3Rre;
-      break;
-    case kOpCmp:
-      opcode = kA64Cmp3Rre;
-      break;
-    case kOpAdd:
-      // Note: intentional fallthrough
-    case kOpSub:
-      return OpRegRegRegExtend(op, r_dest_src1, r_dest_src1, r_src2, ext, amount);
-    default:
-      LOG(FATAL) << "Bad Opcode: " << opcode;
-      UNREACHABLE();
-  }
-
-  DCHECK(!IsPseudoLirOp(opcode));
-  if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
-    A64EncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
-    if (kind == kFmtExtend) {
-      return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(),
-                     EncodeExtend(ext, amount));
-    }
-  }
-
-  LOG(FATAL) << "Unexpected encoding operand count";
-  return nullptr;
-}
-
-LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
-  /* RegReg operations with SP in first parameter need extended register instruction form.
-   * Only CMN, CMP, ADD & SUB instructions are implemented.
-   */
-  if (r_dest_src1 == rs_sp) {
-    return OpRegRegExtend(op, r_dest_src1, r_src2, kA64Uxtx, 0);
-  } else {
-    return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT);
-  }
-}
-
-LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
-                               RegStorage r_base ATTRIBUTE_UNUSED,
-                               int offset ATTRIBUTE_UNUSED,
-                               MoveType move_type ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL);
-  UNREACHABLE();
-}
-
-LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
-                               int offset ATTRIBUTE_UNUSED,
-                               RegStorage r_src ATTRIBUTE_UNUSED,
-                               MoveType move_type ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL);
-  return nullptr;
-}
-
-LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
-                                ConditionCode cc ATTRIBUTE_UNUSED,
-                                RegStorage r_dest ATTRIBUTE_UNUSED,
-                                RegStorage r_src ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
-  UNREACHABLE();
-}
-
-LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
-                                    RegStorage r_src2, int shift) {
-  A64Opcode opcode = kA64Brk1d;
-
-  switch (op) {
-    case kOpAdd:
-      opcode = kA64Add4rrro;
-      break;
-    case kOpSub:
-      opcode = kA64Sub4rrro;
-      break;
-    // case kOpRsub:
-    //   opcode = kA64RsubWWW;
-    //   break;
-    case kOpAdc:
-      opcode = kA64Adc3rrr;
-      break;
-    case kOpAnd:
-      opcode = kA64And4rrro;
-      break;
-    case kOpXor:
-      opcode = kA64Eor4rrro;
-      break;
-    case kOpMul:
-      opcode = kA64Mul3rrr;
-      break;
-    case kOpDiv:
-      opcode = kA64Sdiv3rrr;
-      break;
-    case kOpOr:
-      opcode = kA64Orr4rrro;
-      break;
-    case kOpSbc:
-      opcode = kA64Sbc3rrr;
-      break;
-    case kOpLsl:
-      opcode = kA64Lsl3rrr;
-      break;
-    case kOpLsr:
-      opcode = kA64Lsr3rrr;
-      break;
-    case kOpAsr:
-      opcode = kA64Asr3rrr;
-      break;
-    case kOpRor:
-      opcode = kA64Ror3rrr;
-      break;
-    default:
-      LOG(FATAL) << "Bad opcode: " << op;
-      break;
-  }
-
-  // The instructions above belong to two kinds:
-  // - 4-operands instructions, where the last operand is a shift/extend immediate,
-  // - 3-operands instructions with no shift/extend.
-  A64Opcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
-  CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
-  CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
-  if (EncodingMap[opcode].flags & IS_QUAD_OP) {
-    DCHECK(!IsExtendEncoding(shift));
-    return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
-  } else {
-    DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
-    DCHECK_EQ(shift, ENCODE_NO_SHIFT);
-    return NewLIR3(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
-  }
-}
-
-LIR* Arm64Mir2Lir::OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1,
-                                     RegStorage r_src2, A64RegExtEncodings ext, uint8_t amount) {
-  A64Opcode opcode = kA64Brk1d;
-
-  switch (op) {
-    case kOpAdd:
-      opcode = kA64Add4RRre;
-      break;
-    case kOpSub:
-      opcode = kA64Sub4RRre;
-      break;
-    default:
-      UNIMPLEMENTED(FATAL) << "Unimplemented opcode: " << op;
-      UNREACHABLE();
-  }
-  A64Opcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
-
-  if (r_dest.Is64Bit()) {
-    CHECK(r_src1.Is64Bit());
-
-    // dest determines whether the op is wide or not. Up-convert src2 when necessary.
-    // Note: this is not according to aarch64 specifications, but our encoding.
-    if (!r_src2.Is64Bit()) {
-      r_src2 = As64BitReg(r_src2);
-    }
-  } else {
-    CHECK(!r_src1.Is64Bit());
-    CHECK(!r_src2.Is64Bit());
-  }
-
-  // Sanity checks.
-  //    1) Amount is in the range 0..4
-  CHECK_LE(amount, 4);
-
-  return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(),
-                 EncodeExtend(ext, amount));
-}
-
-LIR* Arm64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
-  return OpRegRegRegShift(op, r_dest, r_src1, r_src2, ENCODE_NO_SHIFT);
-}
-
-LIR* Arm64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
-  return OpRegRegImm64(op, r_dest, r_src1, static_cast<int64_t>(value));
-}
-
-LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value) {
-  LIR* res;
-  bool neg = (value < 0);
-  uint64_t abs_value = (neg & !(value == LLONG_MIN)) ? -value : value;
-  A64Opcode opcode = kA64Brk1d;
-  A64Opcode alt_opcode = kA64Brk1d;
-  bool is_logical = false;
-  bool is_wide = r_dest.Is64Bit();
-  A64Opcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
-  int info = 0;
-
-  switch (op) {
-    case kOpLsl: {
-      // "lsl w1, w2, #imm" is an alias of "ubfm w1, w2, #(-imm MOD 32), #(31-imm)"
-      // and "lsl x1, x2, #imm" of "ubfm x1, x2, #(-imm MOD 64), #(63-imm)".
-      // For now, we just use ubfm directly.
-      int max_value = (is_wide) ? 63 : 31;
-      return NewLIR4(kA64Ubfm4rrdd | wide, r_dest.GetReg(), r_src1.GetReg(),
-                     (-value) & max_value, max_value - value);
-    }
-    case kOpLsr:
-      return NewLIR3(kA64Lsr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
-    case kOpAsr:
-      return NewLIR3(kA64Asr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
-    case kOpRor:
-      // "ror r1, r2, #imm" is an alias of "extr r1, r2, r2, #imm".
-      // For now, we just use extr directly.
-      return NewLIR4(kA64Extr4rrrd | wide, r_dest.GetReg(), r_src1.GetReg(), r_src1.GetReg(),
-                     value);
-    case kOpAdd:
-      neg = !neg;
-      FALLTHROUGH_INTENDED;
-    case kOpSub:
-      // Add and sub below read/write sp rather than xzr.
-      if (abs_value < 0x1000) {
-        opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
-        return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value, 0);
-      } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
-        opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
-        return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value >> 12, 1);
-      } else {
-        alt_opcode = (op == kOpAdd) ? kA64Add4RRre : kA64Sub4RRre;
-        info = EncodeExtend(is_wide ? kA64Uxtx : kA64Uxtw, 0);
-      }
-      break;
-    case kOpAdc:
-      alt_opcode = kA64Adc3rrr;
-      break;
-    case kOpSbc:
-      alt_opcode = kA64Sbc3rrr;
-      break;
-    case kOpOr:
-      is_logical = true;
-      opcode = kA64Orr3Rrl;
-      alt_opcode = kA64Orr4rrro;
-      break;
-    case kOpAnd:
-      is_logical = true;
-      opcode = kA64And3Rrl;
-      alt_opcode = kA64And4rrro;
-      break;
-    case kOpXor:
-      is_logical = true;
-      opcode = kA64Eor3Rrl;
-      alt_opcode = kA64Eor4rrro;
-      break;
-    case kOpMul:
-      // TUNING: power of 2, shift & add
-      alt_opcode = kA64Mul3rrr;
-      break;
-    default:
-      LOG(FATAL) << "Bad opcode: " << op;
-  }
-
-  if (is_logical) {
-    int log_imm = EncodeLogicalImmediate(is_wide, value);
-    if (log_imm >= 0) {
-      return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
-    } else {
-      // When the immediate is either 0 or ~0, the logical operation can be trivially reduced
-      // to a - possibly negated - assignment.
-      if (value == 0) {
-        switch (op) {
-          case kOpOr:
-          case kOpXor:
-            // Or/Xor by zero reduces to an assignment.
-            return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), r_src1.GetReg());
-          default:
-            // And by zero reduces to a `mov rdest, xzr'.
-            DCHECK(op == kOpAnd);
-            return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), (is_wide) ? rxzr : rwzr);
-        }
-      } else if (value == INT64_C(-1)
-                 || (!is_wide && static_cast<uint32_t>(value) == ~UINT32_C(0))) {
-        switch (op) {
-          case kOpAnd:
-            // And by -1 reduces to an assignment.
-            return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), r_src1.GetReg());
-          case kOpXor:
-            // Xor by -1 reduces to an `mvn rdest, rsrc'.
-            return NewLIR2(kA64Mvn2rr | wide, r_dest.GetReg(), r_src1.GetReg());
-          default:
-            // Or by -1 reduces to a `mvn rdest, xzr'.
-            DCHECK(op == kOpOr);
-            return NewLIR2(kA64Mvn2rr | wide, r_dest.GetReg(), (is_wide) ? rxzr : rwzr);
-        }
-      }
-    }
-  }
-
-  RegStorage r_scratch;
-  if (is_wide) {
-    r_scratch = AllocTempWide();
-    LoadConstantWide(r_scratch, value);
-  } else {
-    r_scratch = AllocTemp();
-    LoadConstant(r_scratch, value);
-  }
-  if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
-    res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
-  else
-    res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
-  FreeTemp(r_scratch);
-  return res;
-}
-
-LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
-  return OpRegImm64(op, r_dest_src1, static_cast<int64_t>(value));
-}
-
-LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value) {
-  A64Opcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
-  A64Opcode opcode = kA64Brk1d;
-  A64Opcode neg_opcode = kA64Brk1d;
-  bool shift;
-  bool neg = (value < 0);
-  uint64_t abs_value = (neg & !(value == LLONG_MIN)) ? -value : value;
-
-  if (LIKELY(abs_value < 0x1000)) {
-    // abs_value is a 12-bit immediate.
-    shift = false;
-  } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
-    // abs_value is a shifted 12-bit immediate.
-    shift = true;
-    abs_value >>= 12;
-  } else if (LIKELY(abs_value < 0x1000000 && (op == kOpAdd || op == kOpSub))) {
-    // Note: It is better to use two ADD/SUB instead of loading a number to a temp register.
-    // This works for both normal registers and SP.
-    // For a frame size == 0x2468, it will be encoded as:
-    //   sub sp, #0x2000
-    //   sub sp, #0x468
-    if (neg) {
-      op = (op == kOpAdd) ? kOpSub : kOpAdd;
-    }
-    OpRegImm64(op, r_dest_src1, abs_value & (~INT64_C(0xfff)));
-    return OpRegImm64(op, r_dest_src1, abs_value & 0xfff);
-  } else {
-    RegStorage r_tmp;
-    LIR* res;
-    if (IS_WIDE(wide)) {
-      r_tmp = AllocTempWide();
-      res = LoadConstantWide(r_tmp, value);
-    } else {
-      r_tmp = AllocTemp();
-      res = LoadConstant(r_tmp, value);
-    }
-    OpRegReg(op, r_dest_src1, r_tmp);
-    FreeTemp(r_tmp);
-    return res;
-  }
-
-  switch (op) {
-    case kOpAdd:
-      neg_opcode = kA64Sub4RRdT;
-      opcode = kA64Add4RRdT;
-      break;
-    case kOpSub:
-      neg_opcode = kA64Add4RRdT;
-      opcode = kA64Sub4RRdT;
-      break;
-    case kOpCmp:
-      neg_opcode = kA64Cmn3RdT;
-      opcode = kA64Cmp3RdT;
-      break;
-    default:
-      LOG(FATAL) << "Bad op-kind in OpRegImm: " << op;
-      break;
-  }
-
-  if (UNLIKELY(neg))
-    opcode = neg_opcode;
-
-  if (EncodingMap[opcode].flags & IS_QUAD_OP)
-    return NewLIR4(opcode | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), abs_value,
-                   (shift) ? 1 : 0);
-  else
-    return NewLIR3(opcode | wide, r_dest_src1.GetReg(), abs_value, (shift) ? 1 : 0);
-}
-
-int Arm64Mir2Lir::EncodeShift(int shift_type, int amount) {
-  DCHECK_EQ(shift_type & 0x3, shift_type);
-  DCHECK_EQ(amount & 0x3f, amount);
-  return ((shift_type & 0x3) << 7) | (amount & 0x3f);
-}
-
-int Arm64Mir2Lir::EncodeExtend(int extend_type, int amount) {
-  DCHECK_EQ(extend_type & 0x7, extend_type);
-  DCHECK_EQ(amount & 0x7, amount);
-  return  (1 << 6) | ((extend_type & 0x7) << 3) | (amount & 0x7);
-}
-
-bool Arm64Mir2Lir::IsExtendEncoding(int encoded_value) {
-  return ((1 << 6) & encoded_value) != 0;
-}
-
-LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
-                                   int scale, OpSize size) {
-  LIR* load;
-  int expected_scale = 0;
-  A64Opcode opcode = kA64Brk1d;
-  r_base = Check64BitReg(r_base);
-
-  // TODO(Arm64): The sign extension of r_index should be carried out by using an extended
-  //   register offset load (rather than doing the sign extension in a separate instruction).
-  if (r_index.Is32Bit()) {
-    // Assemble: ``sxtw xN, wN''.
-    r_index = As64BitReg(r_index);
-    NewLIR4(WIDE(kA64Sbfm4rrdd), r_index.GetReg(), r_index.GetReg(), 0, 31);
-  }
-
-  if (r_dest.IsFloat()) {
-    if (r_dest.IsDouble()) {
-      DCHECK(size == k64 || size == kDouble);
-      expected_scale = 3;
-      opcode = WIDE(kA64Ldr4fXxG);
-    } else {
-      DCHECK(r_dest.IsSingle());
-      DCHECK(size == k32 || size == kSingle);
-      expected_scale = 2;
-      opcode = kA64Ldr4fXxG;
-    }
-
-    DCHECK(scale == 0 || scale == expected_scale);
-    return NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
-                   (scale != 0) ? 1 : 0);
-  }
-
-  switch (size) {
-    case kDouble:
-    case kWord:
-    case k64:
-      r_dest = Check64BitReg(r_dest);
-      opcode = WIDE(kA64Ldr4rXxG);
-      expected_scale = 3;
-      break;
-    case kReference:
-      r_dest = As32BitReg(r_dest);
-      FALLTHROUGH_INTENDED;
-    case kSingle:     // Intentional fall-through.
-    case k32:
-      r_dest = Check32BitReg(r_dest);
-      opcode = kA64Ldr4rXxG;
-      expected_scale = 2;
-      break;
-    case kUnsignedHalf:
-      r_dest = Check32BitReg(r_dest);
-      opcode = kA64Ldrh4wXxd;
-      expected_scale = 1;
-      break;
-    case kSignedHalf:
-      r_dest = Check32BitReg(r_dest);
-      opcode = kA64Ldrsh4rXxd;
-      expected_scale = 1;
-      break;
-    case kUnsignedByte:
-      r_dest = Check32BitReg(r_dest);
-      opcode = kA64Ldrb3wXx;
-      break;
-    case kSignedByte:
-      r_dest = Check32BitReg(r_dest);
-      opcode = kA64Ldrsb3rXx;
-      break;
-    default:
-      LOG(FATAL) << "Bad size: " << size;
-  }
-
-  if (UNLIKELY(expected_scale == 0)) {
-    // This is a tertiary op (e.g. ldrb, ldrsb), it does not not support scale.
-    DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
-    DCHECK_EQ(scale, 0);
-    load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
-  } else {
-    DCHECK(scale == 0 || scale == expected_scale);
-    load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
-                   (scale != 0) ? 1 : 0);
-  }
-
-  return load;
-}
-
-LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
-                                    int scale, OpSize size) {
-  LIR* store;
-  int expected_scale = 0;
-  A64Opcode opcode = kA64Brk1d;
-  r_base = Check64BitReg(r_base);
-
-  // TODO(Arm64): The sign extension of r_index should be carried out by using an extended
-  //   register offset store (rather than doing the sign extension in a separate instruction).
-  if (r_index.Is32Bit()) {
-    // Assemble: ``sxtw xN, wN''.
-    r_index = As64BitReg(r_index);
-    NewLIR4(WIDE(kA64Sbfm4rrdd), r_index.GetReg(), r_index.GetReg(), 0, 31);
-  }
-
-  if (r_src.IsFloat()) {
-    if (r_src.IsDouble()) {
-      DCHECK(size == k64 || size == kDouble);
-      expected_scale = 3;
-      opcode = WIDE(kA64Str4fXxG);
-    } else {
-      DCHECK(r_src.IsSingle());
-      DCHECK(size == k32 || size == kSingle);
-      expected_scale = 2;
-      opcode = kA64Str4fXxG;
-    }
-
-    DCHECK(scale == 0 || scale == expected_scale);
-    return NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
-                   (scale != 0) ? 1 : 0);
-  }
-
-  switch (size) {
-    case kDouble:     // Intentional fall-trough.
-    case kWord:       // Intentional fall-trough.
-    case k64:
-      r_src = Check64BitReg(r_src);
-      opcode = WIDE(kA64Str4rXxG);
-      expected_scale = 3;
-      break;
-    case kReference:
-      r_src = As32BitReg(r_src);
-      FALLTHROUGH_INTENDED;
-    case kSingle:     // Intentional fall-trough.
-    case k32:
-      r_src = Check32BitReg(r_src);
-      opcode = kA64Str4rXxG;
-      expected_scale = 2;
-      break;
-    case kUnsignedHalf:
-    case kSignedHalf:
-      r_src = Check32BitReg(r_src);
-      opcode = kA64Strh4wXxd;
-      expected_scale = 1;
-      break;
-    case kUnsignedByte:
-    case kSignedByte:
-      r_src = Check32BitReg(r_src);
-      opcode = kA64Strb3wXx;
-      break;
-    default:
-      LOG(FATAL) << "Bad size: " << size;
-  }
-
-  if (UNLIKELY(expected_scale == 0)) {
-    // This is a tertiary op (e.g. strb), it does not not support scale.
-    DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
-    DCHECK_EQ(scale, 0);
-    store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
-  } else {
-    store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
-                    (scale != 0) ? 1 : 0);
-  }
-
-  return store;
-}
-
-/*
- * Load value from base + displacement.  Optionally perform null check
- * on base (which must have an associated s_reg and MIR).  If not
- * performing null check, incoming MIR can be null.
- */
-LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
-                                    OpSize size) {
-  LIR* load = nullptr;
-  A64Opcode opcode = kA64Brk1d;
-  A64Opcode alt_opcode = kA64Brk1d;
-  int scale = 0;
-
-  switch (size) {
-    case kDouble:     // Intentional fall-through.
-    case kWord:       // Intentional fall-through.
-    case k64:
-      r_dest = Check64BitReg(r_dest);
-      scale = 3;
-      if (r_dest.IsFloat()) {
-        DCHECK(r_dest.IsDouble());
-        opcode = WIDE(kA64Ldr3fXD);
-        alt_opcode = WIDE(kA64Ldur3fXd);
-      } else {
-        opcode = WIDE(kA64Ldr3rXD);
-        alt_opcode = WIDE(kA64Ldur3rXd);
-      }
-      break;
-    case kReference:
-      r_dest = As32BitReg(r_dest);
-      FALLTHROUGH_INTENDED;
-    case kSingle:     // Intentional fall-through.
-    case k32:
-      r_dest = Check32BitReg(r_dest);
-      scale = 2;
-      if (r_dest.IsFloat()) {
-        DCHECK(r_dest.IsSingle());
-        opcode = kA64Ldr3fXD;
-      } else {
-        opcode = kA64Ldr3rXD;
-      }
-      break;
-    case kUnsignedHalf:
-      scale = 1;
-      opcode = kA64Ldrh3wXF;
-      break;
-    case kSignedHalf:
-      scale = 1;
-      opcode = kA64Ldrsh3rXF;
-      break;
-    case kUnsignedByte:
-      opcode = kA64Ldrb3wXd;
-      break;
-    case kSignedByte:
-      opcode = kA64Ldrsb3rXd;
-      break;
-    default:
-      LOG(FATAL) << "Bad size: " << size;
-  }
-
-  bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
-  int scaled_disp = displacement >> scale;
-  if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
-    // Can use scaled load.
-    load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), scaled_disp);
-  } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
-    // Can use unscaled load.
-    load = NewLIR3(alt_opcode, r_dest.GetReg(), r_base.GetReg(), displacement);
-  } else {
-    // Use long sequence.
-    // TODO: cleaner support for index/displacement registers?  Not a reference, but must match width.
-    RegStorage r_scratch = AllocTempWide();
-    LoadConstantWide(r_scratch, displacement);
-    load = LoadBaseIndexed(r_base, r_scratch,
-                           (size == kReference) ? As64BitReg(r_dest) : r_dest,
-                           0, size);
-    FreeTemp(r_scratch);
-  }
-
-  // TODO: in future may need to differentiate Dalvik accesses w/ spills
-  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    DCHECK_EQ(r_base, rs_sp);
-    AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
-  }
-  return load;
-}
-
-LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                                OpSize size, VolatileKind is_volatile) {
-  // LoadBaseDisp() will emit correct insn for atomic load on arm64
-  // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
-
-  LIR* load = LoadBaseDispBody(r_base, displacement, r_dest, size);
-
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    // TODO: This should generate an acquire load instead of the barrier.
-    GenMemBarrier(kLoadAny);
-  }
-
-  return load;
-}
-
-LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
-                                     OpSize size) {
-  LIR* store = nullptr;
-  A64Opcode opcode = kA64Brk1d;
-  A64Opcode alt_opcode = kA64Brk1d;
-  int scale = 0;
-
-  switch (size) {
-    case kDouble:     // Intentional fall-through.
-    case kWord:       // Intentional fall-through.
-    case k64:
-      r_src = Check64BitReg(r_src);
-      scale = 3;
-      if (r_src.IsFloat()) {
-        DCHECK(r_src.IsDouble());
-        opcode = WIDE(kA64Str3fXD);
-        alt_opcode = WIDE(kA64Stur3fXd);
-      } else {
-        opcode = WIDE(kA64Str3rXD);
-        alt_opcode = WIDE(kA64Stur3rXd);
-      }
-      break;
-    case kReference:
-      r_src = As32BitReg(r_src);
-      FALLTHROUGH_INTENDED;
-    case kSingle:     // Intentional fall-through.
-    case k32:
-      r_src = Check32BitReg(r_src);
-      scale = 2;
-      if (r_src.IsFloat()) {
-        DCHECK(r_src.IsSingle());
-        opcode = kA64Str3fXD;
-      } else {
-        opcode = kA64Str3rXD;
-      }
-      break;
-    case kUnsignedHalf:
-    case kSignedHalf:
-      scale = 1;
-      opcode = kA64Strh3wXF;
-      break;
-    case kUnsignedByte:
-    case kSignedByte:
-      opcode = kA64Strb3wXd;
-      break;
-    default:
-      LOG(FATAL) << "Bad size: " << size;
-  }
-
-  bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
-  int scaled_disp = displacement >> scale;
-  if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
-    // Can use scaled store.
-    store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), scaled_disp);
-  } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
-    // Can use unscaled store.
-    store = NewLIR3(alt_opcode, r_src.GetReg(), r_base.GetReg(), displacement);
-  } else {
-    // Use long sequence.
-    RegStorage r_scratch = AllocTempWide();
-    LoadConstantWide(r_scratch, displacement);
-    store = StoreBaseIndexed(r_base, r_scratch,
-                             (size == kReference) ? As64BitReg(r_src) : r_src,
-                             0, size);
-    FreeTemp(r_scratch);
-  }
-
-  // TODO: In future, may need to differentiate Dalvik & spill accesses.
-  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    DCHECK_EQ(r_base, rs_sp);
-    AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
-  }
-  return store;
-}
-
-LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                                 OpSize size, VolatileKind is_volatile) {
-  // TODO: This should generate a release store and no barriers.
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    // Ensure that prior accesses become visible to other threads first.
-    GenMemBarrier(kAnyStore);
-  }
-
-  // StoreBaseDisp() will emit correct insn for atomic store on arm64
-  // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
-
-  LIR* store = StoreBaseDispBody(r_base, displacement, r_src, size);
-
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    // Preserve order with respect to any subsequent volatile loads.
-    // We need StoreLoad, but that generally requires the most expensive barrier.
-    GenMemBarrier(kAnyAny);
-  }
-
-  return store;
-}
-
-LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest ATTRIBUTE_UNUSED,
-                               RegStorage r_src ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
-  UNREACHABLE();
-}
-
-LIR* Arm64Mir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
-                         RegStorage r_base ATTRIBUTE_UNUSED,
-                         int disp ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpMem for Arm64";
-  UNREACHABLE();
-}
-
-LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt,
-                                    QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
-  // The address of the trampoline is already loaded into r_tgt.
-  return OpReg(op, r_tgt);
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
deleted file mode 100644
index af6f91f..0000000
--- a/compiler/dex/quick/codegen_util.cc
+++ /dev/null
@@ -1,1451 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mir_to_lir-inl.h"
-
-// Mac does not provide endian.h, so we'll use byte order agnostic code.
-#ifndef __APPLE__
-#include <endian.h>
-#endif
-
-#include "base/bit_vector-inl.h"
-#include "base/stringprintf.h"
-#include "dex/mir_graph.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "driver/dex_compilation_unit.h"
-#include "dex_file-inl.h"
-#include "gc_map.h"
-#include "gc_map_builder.h"
-#include "mapping_table.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex/verification_results.h"
-#include "dex/verified_method.h"
-#include "utils/dex_cache_arrays_layout-inl.h"
-#include "verifier/dex_gc_map.h"
-#include "verifier/method_verifier.h"
-#include "vmap_table.h"
-
-namespace art {
-
-namespace {
-
-/* Dump a mapping table */
-template <typename It>
-void DumpMappingTable(const char* table_name, const char* descriptor, const char* name,
-                      const Signature& signature, uint32_t size, It first) {
-  if (size != 0) {
-    std::string line(StringPrintf("\n  %s %s%s_%s_table[%u] = {", table_name,
-                     descriptor, name, signature.ToString().c_str(), size));
-    std::replace(line.begin(), line.end(), ';', '_');
-    LOG(INFO) << line;
-    for (uint32_t i = 0; i != size; ++i) {
-      line = StringPrintf("    {0x%05x, 0x%04x},", first.NativePcOffset(), first.DexPc());
-      ++first;
-      LOG(INFO) << line;
-    }
-    LOG(INFO) <<"  };\n\n";
-  }
-}
-
-}  // anonymous namespace
-
-bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) {
-  bool res = false;
-  if (rl_src.is_const) {
-    if (rl_src.wide) {
-      // For wide registers, check whether we're the high partner. In that case we need to switch
-      // to the lower one for the correct value.
-      if (rl_src.high_word) {
-        rl_src.high_word = false;
-        rl_src.s_reg_low--;
-        rl_src.orig_sreg--;
-      }
-      if (rl_src.fp) {
-        res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src));
-      } else {
-        res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src));
-      }
-    } else {
-      if (rl_src.fp) {
-        res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src));
-      } else {
-        res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src));
-      }
-    }
-  }
-  return res;
-}
-
-void Mir2Lir::MarkSafepointPC(LIR* inst) {
-  DCHECK(!inst->flags.use_def_invalid);
-  inst->u.m.def_mask = &kEncodeAll;
-  LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
-  DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
-  DCHECK(current_mir_ != nullptr || (current_dalvik_offset_ == 0 && safepoints_.empty()));
-  safepoints_.emplace_back(safepoint_pc, current_mir_);
-}
-
-void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
-  DCHECK(!after->flags.use_def_invalid);
-  after->u.m.def_mask = &kEncodeAll;
-  // As NewLIR0 uses Append, we need to create the LIR by hand.
-  LIR* safepoint_pc = RawLIR(current_dalvik_offset_, kPseudoSafepointPC);
-  if (after->next == nullptr) {
-    DCHECK_EQ(after, last_lir_insn_);
-    AppendLIR(safepoint_pc);
-  } else {
-    InsertLIRAfter(after, safepoint_pc);
-  }
-  DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
-  DCHECK(current_mir_ != nullptr || (current_dalvik_offset_ == 0 && safepoints_.empty()));
-  safepoints_.emplace_back(safepoint_pc, current_mir_);
-}
-
-/* Remove a LIR from the list. */
-void Mir2Lir::UnlinkLIR(LIR* lir) {
-  if (UNLIKELY(lir == first_lir_insn_)) {
-    first_lir_insn_ = lir->next;
-    if (lir->next != nullptr) {
-      lir->next->prev = nullptr;
-    } else {
-      DCHECK(lir->next == nullptr);
-      DCHECK(lir == last_lir_insn_);
-      last_lir_insn_ = nullptr;
-    }
-  } else if (lir == last_lir_insn_) {
-    last_lir_insn_ = lir->prev;
-    lir->prev->next = nullptr;
-  } else if ((lir->prev != nullptr) && (lir->next != nullptr)) {
-    lir->prev->next = lir->next;
-    lir->next->prev = lir->prev;
-  }
-}
-
-/* Convert an instruction to a NOP */
-void Mir2Lir::NopLIR(LIR* lir) {
-  lir->flags.is_nop = true;
-  if (!cu_->verbose) {
-    UnlinkLIR(lir);
-  }
-}
-
-void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) {
-  DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
-  DCHECK(!lir->flags.use_def_invalid);
-  // TODO: Avoid the extra Arena allocation!
-  const ResourceMask** mask_ptr;
-  ResourceMask mask;
-  if (is_load) {
-    mask_ptr = &lir->u.m.use_mask;
-  } else {
-    mask_ptr = &lir->u.m.def_mask;
-  }
-  mask = **mask_ptr;
-  /* Clear out the memref flags */
-  mask.ClearBits(kEncodeMem);
-  /* ..and then add back the one we need */
-  switch (mem_type) {
-    case ResourceMask::kLiteral:
-      DCHECK(is_load);
-      mask.SetBit(ResourceMask::kLiteral);
-      break;
-    case ResourceMask::kDalvikReg:
-      mask.SetBit(ResourceMask::kDalvikReg);
-      break;
-    case ResourceMask::kHeapRef:
-      mask.SetBit(ResourceMask::kHeapRef);
-      break;
-    case ResourceMask::kMustNotAlias:
-      /* Currently only loads can be marked as kMustNotAlias */
-      DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
-      mask.SetBit(ResourceMask::kMustNotAlias);
-      break;
-    default:
-      LOG(FATAL) << "Oat: invalid memref kind - " << mem_type;
-  }
-  *mask_ptr = mask_cache_.GetMask(mask);
-}
-
-/*
- * Mark load/store instructions that access Dalvik registers through the stack.
- */
-void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load,
-                                      bool is64bit) {
-  DCHECK((is_load ? lir->u.m.use_mask : lir->u.m.def_mask)->Intersection(kEncodeMem).Equals(
-      kEncodeDalvikReg));
-
-  /*
-   * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
-   * access.
-   */
-  lir->flags.alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit);
-}
-
-/*
- * Debugging macros
- */
-#define DUMP_RESOURCE_MASK(X)
-
-/* Pretty-print a LIR instruction */
-void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) {
-  int offset = lir->offset;
-  int dest = lir->operands[0];
-  const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops));
-
-  /* Handle pseudo-ops individually, and all regular insns as a group */
-  switch (lir->opcode) {
-    case kPseudoPrologueBegin:
-      LOG(INFO) << "-------- PrologueBegin";
-      break;
-    case kPseudoPrologueEnd:
-      LOG(INFO) << "-------- PrologueEnd";
-      break;
-    case kPseudoEpilogueBegin:
-      LOG(INFO) << "-------- EpilogueBegin";
-      break;
-    case kPseudoEpilogueEnd:
-      LOG(INFO) << "-------- EpilogueEnd";
-      break;
-    case kPseudoBarrier:
-      LOG(INFO) << "-------- BARRIER";
-      break;
-    case kPseudoEntryBlock:
-      LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest;
-      break;
-    case kPseudoDalvikByteCodeBoundary:
-      if (lir->operands[0] == 0) {
-         // NOTE: only used for debug listings.
-         lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string"));
-      }
-      LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
-                << lir->dalvik_offset << " @ "
-                << UnwrapPointer<char>(lir->operands[0]);
-      break;
-    case kPseudoExitBlock:
-      LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
-      break;
-    case kPseudoPseudoAlign4:
-      LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex
-                << offset << "): .align4";
-      break;
-    case kPseudoEHBlockLabel:
-      LOG(INFO) << "Exception_Handling:";
-      break;
-    case kPseudoTargetLabel:
-    case kPseudoNormalBlockLabel:
-      LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":";
-      break;
-    case kPseudoThrowTarget:
-      LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":";
-      break;
-    case kPseudoIntrinsicRetry:
-      LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":";
-      break;
-    case kPseudoSuspendTarget:
-      LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":";
-      break;
-    case kPseudoSafepointPC:
-      LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
-      break;
-    case kPseudoExportedPC:
-      LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
-      break;
-    case kPseudoCaseLabel:
-      LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x"
-                << std::hex << lir->operands[0] << "|" << std::dec <<
-        lir->operands[0];
-      break;
-    default:
-      if (lir->flags.is_nop && !dump_nop) {
-        break;
-      } else {
-        std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode),
-                                               lir, base_addr));
-        std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
-                                                    lir, base_addr));
-        LOG(INFO) << StringPrintf("%5p|0x%02x: %-9s%s%s",
-                                  base_addr + offset,
-                                  lir->dalvik_offset,
-                                  op_name.c_str(), op_operands.c_str(),
-                                  lir->flags.is_nop ? "(nop)" : "");
-      }
-      break;
-  }
-
-  if (lir->u.m.use_mask && (!lir->flags.is_nop || dump_nop)) {
-    DUMP_RESOURCE_MASK(DumpResourceMask(lir, *lir->u.m.use_mask, "use"));
-  }
-  if (lir->u.m.def_mask && (!lir->flags.is_nop || dump_nop)) {
-    DUMP_RESOURCE_MASK(DumpResourceMask(lir, *lir->u.m.def_mask, "def"));
-  }
-}
-
-void Mir2Lir::DumpPromotionMap() {
-  uint32_t num_regs = mir_graph_->GetNumOfCodeAndTempVRs();
-  for (uint32_t i = 0; i < num_regs; i++) {
-    PromotionMap v_reg_map = promotion_map_[i];
-    std::string buf;
-    if (v_reg_map.fp_location == kLocPhysReg) {
-      StringAppendF(&buf, " : s%d", RegStorage::RegNum(v_reg_map.fp_reg));
-    }
-
-    std::string buf3;
-    if (i < mir_graph_->GetNumOfCodeVRs()) {
-      StringAppendF(&buf3, "%02d", i);
-    } else if (i == mir_graph_->GetNumOfCodeVRs()) {
-      buf3 = "Method*";
-    } else {
-      uint32_t diff = i - mir_graph_->GetNumOfCodeVRs();
-      StringAppendF(&buf3, "ct%d", diff);
-    }
-
-    LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
-                              v_reg_map.core_location == kLocPhysReg ?
-                              "r" : "SP+", v_reg_map.core_location == kLocPhysReg ?
-                              v_reg_map.core_reg : SRegOffset(i),
-                              buf.c_str());
-  }
-}
-
-void Mir2Lir::UpdateLIROffsets() {
-  // Only used for code listings.
-  size_t offset = 0;
-  for (LIR* lir = first_lir_insn_; lir != nullptr; lir = lir->next) {
-    lir->offset = offset;
-    if (!lir->flags.is_nop && !IsPseudoLirOp(lir->opcode)) {
-      offset += GetInsnSize(lir);
-    } else if (lir->opcode == kPseudoPseudoAlign4) {
-      offset += (offset & 0x2);
-    }
-  }
-}
-
-void Mir2Lir::MarkGCCard(int opt_flags, RegStorage val_reg, RegStorage tgt_addr_reg) {
-  DCHECK(val_reg.Valid());
-  DCHECK_EQ(val_reg.Is64Bit(), cu_->target64);
-  if ((opt_flags & MIR_STORE_NON_NULL_VALUE) != 0) {
-    UnconditionallyMarkGCCard(tgt_addr_reg);
-  } else {
-    LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, nullptr);
-    UnconditionallyMarkGCCard(tgt_addr_reg);
-    LIR* target = NewLIR0(kPseudoTargetLabel);
-    branch_over->target = target;
-  }
-}
-
-/* Dump instructions and constant pool contents */
-void Mir2Lir::CodegenDump() {
-  LOG(INFO) << "Dumping LIR insns for "
-            << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-  LIR* lir_insn;
-  int insns_size = mir_graph_->GetNumDalvikInsns();
-
-  LOG(INFO) << "Regs (excluding ins) : " << mir_graph_->GetNumOfLocalCodeVRs();
-  LOG(INFO) << "Ins          : " << mir_graph_->GetNumOfInVRs();
-  LOG(INFO) << "Outs         : " << mir_graph_->GetNumOfOutVRs();
-  LOG(INFO) << "CoreSpills       : " << num_core_spills_;
-  LOG(INFO) << "FPSpills       : " << num_fp_spills_;
-  LOG(INFO) << "CompilerTemps    : " << mir_graph_->GetNumUsedCompilerTemps();
-  LOG(INFO) << "Frame size       : " << frame_size_;
-  LOG(INFO) << "code size is " << total_size_ <<
-    " bytes, Dalvik size is " << insns_size * 2;
-  LOG(INFO) << "expansion factor: "
-            << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
-  DumpPromotionMap();
-  UpdateLIROffsets();
-  for (lir_insn = first_lir_insn_; lir_insn != nullptr; lir_insn = lir_insn->next) {
-    DumpLIRInsn(lir_insn, 0);
-  }
-  for (lir_insn = literal_list_; lir_insn != nullptr; lir_insn = lir_insn->next) {
-    LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
-                              lir_insn->operands[0]);
-  }
-
-  const DexFile::MethodId& method_id =
-      cu_->dex_file->GetMethodId(cu_->method_idx);
-  const Signature signature = cu_->dex_file->GetMethodSignature(method_id);
-  const char* name = cu_->dex_file->GetMethodName(method_id);
-  const char* descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
-
-  // Dump mapping tables
-  if (!encoded_mapping_table_.empty()) {
-    MappingTable table(&encoded_mapping_table_[0]);
-    DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature,
-                     table.PcToDexSize(), table.PcToDexBegin());
-    DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature,
-                     table.DexToPcSize(), table.DexToPcBegin());
-  }
-}
-
-/*
- * Search the existing constants in the literal pool for an exact or close match
- * within specified delta (greater or equal to 0).
- */
-LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) {
-  while (data_target) {
-    if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta)
-      return data_target;
-    data_target = data_target->next;
-  }
-  return nullptr;
-}
-
-/* Search the existing constants in the literal pool for an exact wide match */
-LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
-  bool lo_match = false;
-  LIR* lo_target = nullptr;
-  while (data_target) {
-    if (lo_match && (data_target->operands[0] == val_hi)) {
-      // Record high word in case we need to expand this later.
-      lo_target->operands[1] = val_hi;
-      return lo_target;
-    }
-    lo_match = false;
-    if (data_target->operands[0] == val_lo) {
-      lo_match = true;
-      lo_target = data_target;
-    }
-    data_target = data_target->next;
-  }
-  return nullptr;
-}
-
-/* Search the existing constants in the literal pool for an exact method match */
-LIR* Mir2Lir::ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method) {
-  while (data_target) {
-    if (static_cast<uint32_t>(data_target->operands[0]) == method.dex_method_index &&
-        UnwrapPointer<DexFile>(data_target->operands[1]) == method.dex_file) {
-      return data_target;
-    }
-    data_target = data_target->next;
-  }
-  return nullptr;
-}
-
-/* Search the existing constants in the literal pool for an exact class match */
-LIR* Mir2Lir::ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx) {
-  while (data_target) {
-    if (static_cast<uint32_t>(data_target->operands[0]) == type_idx &&
-        UnwrapPointer<DexFile>(data_target->operands[1]) == &dex_file) {
-      return data_target;
-    }
-    data_target = data_target->next;
-  }
-  return nullptr;
-}
-
-/*
- * The following are building blocks to insert constants into the pool or
- * instruction streams.
- */
-
-/* Add a 32-bit constant to the constant pool */
-LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) {
-  /* Add the constant to the literal pool */
-  if (constant_list_p) {
-    LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
-    new_value->operands[0] = value;
-    new_value->next = *constant_list_p;
-    *constant_list_p = new_value;
-    estimated_native_code_size_ += sizeof(value);
-    return new_value;
-  }
-  return nullptr;
-}
-
-/* Add a 64-bit constant to the constant pool or mixed with code */
-LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) {
-  AddWordData(constant_list_p, val_hi);
-  return AddWordData(constant_list_p, val_lo);
-}
-
-/**
- * @brief Push a compressed reference which needs patching at link/patchoat-time.
- * @details This needs to be kept consistent with the code which actually does the patching in
- *   oat_writer.cc and in the patchoat tool.
- */
-static void PushUnpatchedReference(CodeBuffer* buf) {
-  // Note that we can safely initialize the patches to zero. The code deduplication mechanism takes
-  // the patches into account when determining whether two pieces of codes are functionally
-  // equivalent.
-  Push32(buf, UINT32_C(0));
-}
-
-static void AlignBuffer(CodeBuffer* buf, size_t offset) {
-  DCHECK_LE(buf->size(), offset);
-  buf->insert(buf->end(), offset - buf->size(), 0u);
-}
-
-/* Write the literal pool to the output stream */
-void Mir2Lir::InstallLiteralPools() {
-  AlignBuffer(&code_buffer_, data_offset_);
-  LIR* data_lir = literal_list_;
-  while (data_lir != nullptr) {
-    Push32(&code_buffer_, data_lir->operands[0]);
-    data_lir = NEXT_LIR(data_lir);
-  }
-  // TODO: patches_.reserve() as needed.
-  // Push code and method literals, record offsets for the compiler to patch.
-  data_lir = code_literal_list_;
-  while (data_lir != nullptr) {
-    uint32_t target_method_idx = data_lir->operands[0];
-    const DexFile* target_dex_file = UnwrapPointer<DexFile>(data_lir->operands[1]);
-    patches_.push_back(LinkerPatch::CodePatch(code_buffer_.size(),
-                                              target_dex_file, target_method_idx));
-    PushUnpatchedReference(&code_buffer_);
-    data_lir = NEXT_LIR(data_lir);
-  }
-  data_lir = method_literal_list_;
-  while (data_lir != nullptr) {
-    uint32_t target_method_idx = data_lir->operands[0];
-    const DexFile* target_dex_file = UnwrapPointer<DexFile>(data_lir->operands[1]);
-    patches_.push_back(LinkerPatch::MethodPatch(code_buffer_.size(),
-                                                target_dex_file, target_method_idx));
-    PushUnpatchedReference(&code_buffer_);
-    data_lir = NEXT_LIR(data_lir);
-  }
-  // Push class literals.
-  data_lir = class_literal_list_;
-  while (data_lir != nullptr) {
-    uint32_t target_type_idx = data_lir->operands[0];
-    const DexFile* class_dex_file = UnwrapPointer<DexFile>(data_lir->operands[1]);
-    patches_.push_back(LinkerPatch::TypePatch(code_buffer_.size(),
-                                              class_dex_file, target_type_idx));
-    PushUnpatchedReference(&code_buffer_);
-    data_lir = NEXT_LIR(data_lir);
-  }
-}
-
-/* Write the switch tables to the output stream */
-void Mir2Lir::InstallSwitchTables() {
-  for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) {
-    AlignBuffer(&code_buffer_, tab_rec->offset);
-    /*
-     * For Arm, our reference point is the address of the bx
-     * instruction that does the launch, so we have to subtract
-     * the auto pc-advance.  For other targets the reference point
-     * is a label, so we can use the offset as-is.
-     */
-    int bx_offset = INVALID_OFFSET;
-    switch (cu_->instruction_set) {
-      case kThumb2:
-        DCHECK(tab_rec->anchor->flags.fixup != kFixupNone);
-        bx_offset = tab_rec->anchor->offset + 4;
-        break;
-      case kX86_64:
-        // RIP relative to switch table.
-        bx_offset = tab_rec->offset;
-        break;
-      case kX86:
-      case kArm64:
-      case kMips:
-      case kMips64:
-        bx_offset = tab_rec->anchor->offset;
-        break;
-      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
-    }
-    if (cu_->verbose) {
-      LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
-    }
-    if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
-      DCHECK(tab_rec->switch_mir != nullptr);
-      BasicBlock* bb = mir_graph_->GetBasicBlock(tab_rec->switch_mir->bb);
-      DCHECK(bb != nullptr);
-      int elems = 0;
-      for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
-        int key = successor_block_info->key;
-        int target = successor_block_info->block;
-        LIR* boundary_lir = InsertCaseLabel(target, key);
-        DCHECK(boundary_lir != nullptr);
-        int disp = boundary_lir->offset - bx_offset;
-        Push32(&code_buffer_, key);
-        Push32(&code_buffer_, disp);
-        if (cu_->verbose) {
-          LOG(INFO) << "  Case[" << elems << "] key: 0x"
-                    << std::hex << key << ", disp: 0x"
-                    << std::hex << disp;
-        }
-        elems++;
-      }
-      DCHECK_EQ(elems, tab_rec->table[1]);
-    } else {
-      DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
-                static_cast<int>(Instruction::kPackedSwitchSignature));
-      DCHECK(tab_rec->switch_mir != nullptr);
-      BasicBlock* bb = mir_graph_->GetBasicBlock(tab_rec->switch_mir->bb);
-      DCHECK(bb != nullptr);
-      int elems = 0;
-      int low_key = s4FromSwitchData(&tab_rec->table[2]);
-      for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
-        int key = successor_block_info->key;
-        DCHECK_EQ(elems + low_key, key);
-        int target = successor_block_info->block;
-        LIR* boundary_lir = InsertCaseLabel(target, key);
-        DCHECK(boundary_lir != nullptr);
-        int disp = boundary_lir->offset - bx_offset;
-        Push32(&code_buffer_, disp);
-        if (cu_->verbose) {
-          LOG(INFO) << "  Case[" << elems << "] disp: 0x"
-                    << std::hex << disp;
-        }
-        elems++;
-      }
-      DCHECK_EQ(elems, tab_rec->table[1]);
-    }
-  }
-}
-
-/* Write the fill array dta to the output stream */
-void Mir2Lir::InstallFillArrayData() {
-  for (Mir2Lir::FillArrayData* tab_rec : fill_array_data_) {
-    AlignBuffer(&code_buffer_, tab_rec->offset);
-    for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
-      code_buffer_.push_back(tab_rec->table[i] & 0xFF);
-      code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF);
-    }
-  }
-}
-
-static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
-  for (; lir != nullptr; lir = lir->next) {
-    lir->offset = offset;
-    offset += 4;
-  }
-  return offset;
-}
-
-static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset,
-                                            unsigned int element_size) {
-  // Align to natural pointer size.
-  offset = RoundUp(offset, element_size);
-  for (; lir != nullptr; lir = lir->next) {
-    lir->offset = offset;
-    offset += element_size;
-  }
-  return offset;
-}
-
-// Make sure we have a code address for every declared catch entry
-bool Mir2Lir::VerifyCatchEntries() {
-  MappingTable table(&encoded_mapping_table_[0]);
-  std::vector<uint32_t> dex_pcs;
-  dex_pcs.reserve(table.DexToPcSize());
-  for (auto it = table.DexToPcBegin(), end = table.DexToPcEnd(); it != end; ++it) {
-    dex_pcs.push_back(it.DexPc());
-  }
-  // Sort dex_pcs, so that we can quickly check it against the ordered mir_graph_->catches_.
-  std::sort(dex_pcs.begin(), dex_pcs.end());
-
-  bool success = true;
-  auto it = dex_pcs.begin(), end = dex_pcs.end();
-  for (uint32_t dex_pc : mir_graph_->catches_) {
-    while (it != end && *it < dex_pc) {
-      LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << *it;
-      ++it;
-      success = false;
-    }
-    if (it == end || *it > dex_pc) {
-      LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc;
-      success = false;
-    } else {
-      ++it;
-    }
-  }
-  if (!success) {
-    LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-    LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: "
-              << table.DexToPcSize();
-  }
-  return success;
-}
-
-
-void Mir2Lir::CreateMappingTables() {
-  bool generate_src_map = cu_->compiler_driver->GetCompilerOptions().GetGenerateDebugInfo();
-
-  uint32_t pc2dex_data_size = 0u;
-  uint32_t pc2dex_entries = 0u;
-  uint32_t pc2dex_offset = 0u;
-  uint32_t pc2dex_dalvik_offset = 0u;
-  uint32_t pc2dex_src_entries = 0u;
-  uint32_t dex2pc_data_size = 0u;
-  uint32_t dex2pc_entries = 0u;
-  uint32_t dex2pc_offset = 0u;
-  uint32_t dex2pc_dalvik_offset = 0u;
-  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
-    pc2dex_src_entries++;
-    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
-      pc2dex_entries += 1;
-      DCHECK(pc2dex_offset <= tgt_lir->offset);
-      pc2dex_data_size += UnsignedLeb128Size(tgt_lir->offset - pc2dex_offset);
-      pc2dex_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) -
-                                           static_cast<int32_t>(pc2dex_dalvik_offset));
-      pc2dex_offset = tgt_lir->offset;
-      pc2dex_dalvik_offset = tgt_lir->dalvik_offset;
-    }
-    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
-      dex2pc_entries += 1;
-      DCHECK(dex2pc_offset <= tgt_lir->offset);
-      dex2pc_data_size += UnsignedLeb128Size(tgt_lir->offset - dex2pc_offset);
-      dex2pc_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) -
-                                           static_cast<int32_t>(dex2pc_dalvik_offset));
-      dex2pc_offset = tgt_lir->offset;
-      dex2pc_dalvik_offset = tgt_lir->dalvik_offset;
-    }
-  }
-
-  if (generate_src_map) {
-    src_mapping_table_.reserve(pc2dex_src_entries);
-  }
-
-  uint32_t total_entries = pc2dex_entries + dex2pc_entries;
-  uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
-  uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
-  encoded_mapping_table_.resize(data_size);
-  uint8_t* write_pos = &encoded_mapping_table_[0];
-  write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
-  write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
-  DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), hdr_data_size);
-  uint8_t* write_pos2 = write_pos + pc2dex_data_size;
-
-  bool is_in_prologue_or_epilogue = false;
-  pc2dex_offset = 0u;
-  pc2dex_dalvik_offset = 0u;
-  dex2pc_offset = 0u;
-  dex2pc_dalvik_offset = 0u;
-  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
-    if (generate_src_map && !tgt_lir->flags.is_nop && tgt_lir->opcode >= 0) {
-      if (!is_in_prologue_or_epilogue) {
-        src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
-                static_cast<int32_t>(tgt_lir->dalvik_offset)}));
-      }
-    }
-    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
-      DCHECK(pc2dex_offset <= tgt_lir->offset);
-      write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset);
-      write_pos = EncodeSignedLeb128(write_pos, static_cast<int32_t>(tgt_lir->dalvik_offset) -
-                                     static_cast<int32_t>(pc2dex_dalvik_offset));
-      pc2dex_offset = tgt_lir->offset;
-      pc2dex_dalvik_offset = tgt_lir->dalvik_offset;
-    }
-    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
-      DCHECK(dex2pc_offset <= tgt_lir->offset);
-      write_pos2 = EncodeUnsignedLeb128(write_pos2, tgt_lir->offset - dex2pc_offset);
-      write_pos2 = EncodeSignedLeb128(write_pos2, static_cast<int32_t>(tgt_lir->dalvik_offset) -
-                                      static_cast<int32_t>(dex2pc_dalvik_offset));
-      dex2pc_offset = tgt_lir->offset;
-      dex2pc_dalvik_offset = tgt_lir->dalvik_offset;
-    }
-    if (tgt_lir->opcode == kPseudoPrologueBegin || tgt_lir->opcode == kPseudoEpilogueBegin) {
-      is_in_prologue_or_epilogue = true;
-    }
-    if (tgt_lir->opcode == kPseudoPrologueEnd || tgt_lir->opcode == kPseudoEpilogueEnd) {
-      is_in_prologue_or_epilogue = false;
-    }
-  }
-  DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]),
-            hdr_data_size + pc2dex_data_size);
-  DCHECK_EQ(static_cast<size_t>(write_pos2 - &encoded_mapping_table_[0]), data_size);
-
-  if (kIsDebugBuild) {
-    CHECK(VerifyCatchEntries());
-
-    // Verify the encoded table holds the expected data.
-    MappingTable table(&encoded_mapping_table_[0]);
-    CHECK_EQ(table.TotalSize(), total_entries);
-    CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
-    auto it = table.PcToDexBegin();
-    auto it2 = table.DexToPcBegin();
-    for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
-      if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
-        CHECK_EQ(tgt_lir->offset, it.NativePcOffset());
-        CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc());
-        ++it;
-      }
-      if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
-        CHECK_EQ(tgt_lir->offset, it2.NativePcOffset());
-        CHECK_EQ(tgt_lir->dalvik_offset, it2.DexPc());
-        ++it2;
-      }
-    }
-    CHECK(it == table.PcToDexEnd());
-    CHECK(it2 == table.DexToPcEnd());
-  }
-}
-
-void Mir2Lir::CreateNativeGcMap() {
-  if (UNLIKELY((cu_->disable_opt & (1u << kPromoteRegs)) != 0u)) {
-    // If we're not promoting to physical registers, it's safe to use the verifier's notion of
-    // references. (We disable register promotion when type inference finds a type conflict and
-    // in that the case we defer to the verifier to avoid using the compiler's conflicting info.)
-    CreateNativeGcMapWithoutRegisterPromotion();
-    return;
-  }
-
-  ArenaBitVector* references = new (arena_) ArenaBitVector(arena_, mir_graph_->GetNumSSARegs(),
-                                                           false);
-
-  // Calculate max native offset and max reference vreg.
-  MIR* prev_mir = nullptr;
-  int max_ref_vreg = -1;
-  CodeOffset max_native_offset = 0u;
-  for (const auto& entry : safepoints_) {
-    uint32_t native_offset = entry.first->offset;
-    max_native_offset = std::max(max_native_offset, native_offset);
-    MIR* mir = entry.second;
-    UpdateReferenceVRegs(mir, prev_mir, references);
-    max_ref_vreg = std::max(max_ref_vreg, references->GetHighestBitSet());
-    prev_mir = mir;
-  }
-
-#if defined(BYTE_ORDER) && (BYTE_ORDER == LITTLE_ENDIAN)
-  static constexpr bool kLittleEndian = true;
-#else
-  static constexpr bool kLittleEndian = false;
-#endif
-
-  // Build the GC map.
-  uint32_t reg_width = static_cast<uint32_t>((max_ref_vreg + 8) / 8);
-  GcMapBuilder native_gc_map_builder(&native_gc_map_,
-                                     safepoints_.size(),
-                                     max_native_offset, reg_width);
-  if (kLittleEndian) {
-    for (const auto& entry : safepoints_) {
-      uint32_t native_offset = entry.first->offset;
-      MIR* mir = entry.second;
-      UpdateReferenceVRegs(mir, prev_mir, references);
-      // For little-endian, the bytes comprising the bit vector's raw storage are what we need.
-      native_gc_map_builder.AddEntry(native_offset,
-                                     reinterpret_cast<const uint8_t*>(references->GetRawStorage()));
-      prev_mir = mir;
-    }
-  } else {
-    ArenaVector<uint8_t> references_buffer(arena_->Adapter());
-    references_buffer.resize(reg_width);
-    for (const auto& entry : safepoints_) {
-      uint32_t native_offset = entry.first->offset;
-      MIR* mir = entry.second;
-      UpdateReferenceVRegs(mir, prev_mir, references);
-      // Big-endian or unknown endianness, manually translate the bit vector data.
-      const auto* raw_storage = references->GetRawStorage();
-      for (size_t i = 0; i != reg_width; ++i) {
-        references_buffer[i] = static_cast<uint8_t>(
-            raw_storage[i / sizeof(raw_storage[0])] >> (8u * (i % sizeof(raw_storage[0]))));
-      }
-      native_gc_map_builder.AddEntry(native_offset, references_buffer.data());
-      prev_mir = mir;
-    }
-  }
-}
-
-void Mir2Lir::CreateNativeGcMapWithoutRegisterPromotion() {
-  DCHECK(!encoded_mapping_table_.empty());
-  MappingTable mapping_table(&encoded_mapping_table_[0]);
-  uint32_t max_native_offset = 0;
-  for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) {
-    uint32_t native_offset = it.NativePcOffset();
-    if (native_offset > max_native_offset) {
-      max_native_offset = native_offset;
-    }
-  }
-  MethodReference method_ref(cu_->dex_file, cu_->method_idx);
-  const std::vector<uint8_t>& gc_map_raw =
-      mir_graph_->GetCurrentDexCompilationUnit()->GetVerifiedMethod()->GetDexGcMap();
-  verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
-  DCHECK_EQ(gc_map_raw.size(), dex_gc_map.RawSize());
-  // Compute native offset to references size.
-  GcMapBuilder native_gc_map_builder(&native_gc_map_,
-                                     mapping_table.PcToDexSize(),
-                                     max_native_offset, dex_gc_map.RegWidth());
-
-  for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) {
-    uint32_t native_offset = it.NativePcOffset();
-    uint32_t dex_pc = it.DexPc();
-    const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
-    CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
-        ": " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-    native_gc_map_builder.AddEntry(native_offset, references);
-  }
-
-  // Maybe not necessary, but this could help prevent errors where we access the verified method
-  // after it has been deleted.
-  mir_graph_->GetCurrentDexCompilationUnit()->ClearVerifiedMethod();
-}
-
-/* Determine the offset of each literal field */
-int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
-  offset = AssignLiteralOffsetCommon(literal_list_, offset);
-  constexpr unsigned int ptr_size = sizeof(uint32_t);
-  static_assert(ptr_size >= sizeof(mirror::HeapReference<mirror::Object>),
-                "Pointer size cannot hold a heap reference");
-  offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset, ptr_size);
-  offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset, ptr_size);
-  offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset, ptr_size);
-  return offset;
-}
-
-int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) {
-  for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) {
-    tab_rec->offset = offset;
-    if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
-      offset += tab_rec->table[1] * (sizeof(int) * 2);
-    } else {
-      DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
-                static_cast<int>(Instruction::kPackedSwitchSignature));
-      offset += tab_rec->table[1] * sizeof(int);
-    }
-  }
-  return offset;
-}
-
-int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) {
-  for (Mir2Lir::FillArrayData* tab_rec : fill_array_data_) {
-    tab_rec->offset = offset;
-    offset += tab_rec->size;
-    // word align
-    offset = RoundUp(offset, 4);
-  }
-  return offset;
-}
-
-/*
- * Insert a kPseudoCaseLabel at the beginning of the Dalvik
- * offset vaddr if pretty-printing, otherise use the standard block
- * label.  The selected label will be used to fix up the case
- * branch table during the assembly phase.  All resource flags
- * are set to prevent code motion.  KeyVal is just there for debugging.
- */
-LIR* Mir2Lir::InsertCaseLabel(uint32_t bbid, int keyVal) {
-  LIR* boundary_lir = &block_label_list_[bbid];
-  LIR* res = boundary_lir;
-  if (cu_->verbose) {
-    // Only pay the expense if we're pretty-printing.
-    LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
-    BasicBlock* bb = mir_graph_->GetBasicBlock(bbid);
-    DCHECK(bb != nullptr);
-    new_label->dalvik_offset = bb->start_offset;
-    new_label->opcode = kPseudoCaseLabel;
-    new_label->operands[0] = keyVal;
-    new_label->flags.fixup = kFixupLabel;
-    DCHECK(!new_label->flags.use_def_invalid);
-    new_label->u.m.def_mask = &kEncodeAll;
-    InsertLIRAfter(boundary_lir, new_label);
-  }
-  return res;
-}
-
-void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) {
-  /*
-   * Sparse switch data format:
-   *  ushort ident = 0x0200   magic value
-   *  ushort size       number of entries in the table; > 0
-   *  int keys[size]      keys, sorted low-to-high; 32-bit aligned
-   *  int targets[size]     branch targets, relative to switch opcode
-   *
-   * Total size is (2+size*4) 16-bit code units.
-   */
-  uint16_t ident = table[0];
-  int entries = table[1];
-  const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
-  const int32_t* targets = &keys[entries];
-  LOG(INFO) <<  "Sparse switch table - ident:0x" << std::hex << ident
-            << ", entries: " << std::dec << entries;
-  for (int i = 0; i < entries; i++) {
-    LOG(INFO) << "  Key[" << keys[i] << "] -> 0x" << std::hex << targets[i];
-  }
-}
-
-void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
-  /*
-   * Packed switch data format:
-   *  ushort ident = 0x0100   magic value
-   *  ushort size       number of entries in the table
-   *  int first_key       first (and lowest) switch case value
-   *  int targets[size]     branch targets, relative to switch opcode
-   *
-   * Total size is (4+size*2) 16-bit code units.
-   */
-  uint16_t ident = table[0];
-  const int32_t* targets = reinterpret_cast<const int32_t*>(&table[4]);
-  int entries = table[1];
-  int low_key = s4FromSwitchData(&table[2]);
-  LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
-            << ", entries: " << std::dec << entries << ", low_key: " << low_key;
-  for (int i = 0; i < entries; i++) {
-    LOG(INFO) << "  Key[" << (i + low_key) << "] -> 0x" << std::hex
-              << targets[i];
-  }
-}
-
-/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
-void Mir2Lir::MarkBoundary(DexOffset offset ATTRIBUTE_UNUSED, const char* inst_str) {
-  // NOTE: only used for debug listings.
-  NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
-}
-
-// Convert relation of src1/src2 to src2/src1
-ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
-  ConditionCode res;
-  switch (before) {
-    case kCondEq: res = kCondEq; break;
-    case kCondNe: res = kCondNe; break;
-    case kCondLt: res = kCondGt; break;
-    case kCondGt: res = kCondLt; break;
-    case kCondLe: res = kCondGe; break;
-    case kCondGe: res = kCondLe; break;
-    default:
-      LOG(FATAL) << "Unexpected ccode " << before;
-      UNREACHABLE();
-  }
-  return res;
-}
-
-ConditionCode Mir2Lir::NegateComparison(ConditionCode before) {
-  ConditionCode res;
-  switch (before) {
-    case kCondEq: res = kCondNe; break;
-    case kCondNe: res = kCondEq; break;
-    case kCondLt: res = kCondGe; break;
-    case kCondGt: res = kCondLe; break;
-    case kCondLe: res = kCondGt; break;
-    case kCondGe: res = kCondLt; break;
-    default:
-      LOG(FATAL) << "Unexpected ccode " << before;
-      UNREACHABLE();
-  }
-  return res;
-}
-
-// TODO: move to mir_to_lir.cc
-Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
-    : literal_list_(nullptr),
-      method_literal_list_(nullptr),
-      class_literal_list_(nullptr),
-      code_literal_list_(nullptr),
-      first_fixup_(nullptr),
-      arena_(arena),
-      cu_(cu),
-      mir_graph_(mir_graph),
-      switch_tables_(arena->Adapter(kArenaAllocSwitchTable)),
-      fill_array_data_(arena->Adapter(kArenaAllocFillArrayData)),
-      tempreg_info_(arena->Adapter()),
-      reginfo_map_(arena->Adapter()),
-      pointer_storage_(arena->Adapter()),
-      data_offset_(0),
-      total_size_(0),
-      block_label_list_(nullptr),
-      promotion_map_(nullptr),
-      current_dalvik_offset_(0),
-      current_mir_(nullptr),
-      estimated_native_code_size_(0),
-      reg_pool_(nullptr),
-      live_sreg_(0),
-      code_buffer_(mir_graph->GetArena()->Adapter()),
-      encoded_mapping_table_(mir_graph->GetArena()->Adapter()),
-      core_vmap_table_(mir_graph->GetArena()->Adapter()),
-      fp_vmap_table_(mir_graph->GetArena()->Adapter()),
-      native_gc_map_(mir_graph->GetArena()->Adapter()),
-      patches_(mir_graph->GetArena()->Adapter()),
-      num_core_spills_(0),
-      num_fp_spills_(0),
-      frame_size_(0),
-      core_spill_mask_(0),
-      fp_spill_mask_(0),
-      first_lir_insn_(nullptr),
-      last_lir_insn_(nullptr),
-      slow_paths_(arena->Adapter(kArenaAllocSlowPaths)),
-      mem_ref_type_(ResourceMask::kHeapRef),
-      mask_cache_(arena),
-      safepoints_(arena->Adapter()),
-      dex_cache_arrays_layout_(cu->compiler_driver->GetDexCacheArraysLayout(cu->dex_file)),
-      pc_rel_temp_(nullptr),
-      dex_cache_arrays_min_offset_(std::numeric_limits<uint32_t>::max()),
-      cfi_(&last_lir_insn_,
-           cu->compiler_driver->GetCompilerOptions().GenerateAnyDebugInfo(),
-           arena),
-      in_to_reg_storage_mapping_(arena) {
-  switch_tables_.reserve(4);
-  fill_array_data_.reserve(4);
-  tempreg_info_.reserve(20);
-  reginfo_map_.reserve(RegStorage::kMaxRegs);
-  pointer_storage_.reserve(128);
-  slow_paths_.reserve(32);
-  // Reserve pointer id 0 for null.
-  size_t null_idx = WrapPointer<void>(nullptr);
-  DCHECK_EQ(null_idx, 0U);
-}
-
-void Mir2Lir::Materialize() {
-  cu_->NewTimingSplit("RegisterAllocation");
-  CompilerInitializeRegAlloc();  // Needs to happen after SSA naming
-
-  /* Allocate Registers using simple local allocation scheme */
-  SimpleRegAlloc();
-
-  /* First try the custom light codegen for special cases. */
-  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
-  bool special_worked = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
-      ->GenSpecial(this, cu_->method_idx);
-
-  /* Take normal path for converting MIR to LIR only if the special codegen did not succeed. */
-  if (special_worked == false) {
-    MethodMIR2LIR();
-  }
-
-  /* Method is not empty */
-  if (first_lir_insn_) {
-    /* Convert LIR into machine code. */
-    AssembleLIR();
-
-    if ((cu_->enable_debug & (1 << kDebugCodegenDump)) != 0) {
-      CodegenDump();
-    }
-  }
-}
-
-CompiledMethod* Mir2Lir::GetCompiledMethod() {
-  // Combine vmap tables - core regs, then fp regs - into vmap_table.
-  Leb128EncodingVector<> vmap_encoder;
-  if (frame_size_ > 0) {
-    // Prefix the encoded data with its size.
-    size_t size = core_vmap_table_.size() + 1 /* marker */ + fp_vmap_table_.size();
-    vmap_encoder.Reserve(size + 1u);  // All values are likely to be one byte in ULEB128 (<128).
-    vmap_encoder.PushBackUnsigned(size);
-    // Core regs may have been inserted out of order - sort first.
-    std::sort(core_vmap_table_.begin(), core_vmap_table_.end());
-    for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) {
-      // Copy, stripping out the phys register sort key.
-      vmap_encoder.PushBackUnsigned(
-          ~(~0u << VREG_NUM_WIDTH) & (core_vmap_table_[i] + VmapTable::kEntryAdjustment));
-    }
-    // Push a marker to take place of lr.
-    vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker);
-    if (cu_->instruction_set == kThumb2) {
-      // fp regs already sorted.
-      for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) {
-        vmap_encoder.PushBackUnsigned(fp_vmap_table_[i] + VmapTable::kEntryAdjustment);
-      }
-    } else {
-      // For other platforms regs may have been inserted out of order - sort first.
-      std::sort(fp_vmap_table_.begin(), fp_vmap_table_.end());
-      for (size_t i = 0 ; i < fp_vmap_table_.size(); ++i) {
-        // Copy, stripping out the phys register sort key.
-        vmap_encoder.PushBackUnsigned(
-            ~(~0u << VREG_NUM_WIDTH) & (fp_vmap_table_[i] + VmapTable::kEntryAdjustment));
-      }
-    }
-  } else {
-    DCHECK_EQ(POPCOUNT(core_spill_mask_), 0);
-    DCHECK_EQ(POPCOUNT(fp_spill_mask_), 0);
-    DCHECK_EQ(core_vmap_table_.size(), 0u);
-    DCHECK_EQ(fp_vmap_table_.size(), 0u);
-    vmap_encoder.PushBackUnsigned(0u);  // Size is 0.
-  }
-
-  // Sort patches by literal offset. Required for .oat_patches encoding.
-  std::sort(patches_.begin(), patches_.end(), [](const LinkerPatch& lhs, const LinkerPatch& rhs) {
-    return lhs.LiteralOffset() < rhs.LiteralOffset();
-  });
-
-  return CompiledMethod::SwapAllocCompiledMethod(
-      cu_->compiler_driver, cu_->instruction_set,
-      ArrayRef<const uint8_t>(code_buffer_),
-      frame_size_, core_spill_mask_, fp_spill_mask_,
-      ArrayRef<const SrcMapElem>(src_mapping_table_),
-      ArrayRef<const uint8_t>(encoded_mapping_table_),
-      ArrayRef<const uint8_t>(vmap_encoder.GetData()),
-      ArrayRef<const uint8_t>(native_gc_map_),
-      ArrayRef<const uint8_t>(*cfi_.Patch(code_buffer_.size())),
-      ArrayRef<const LinkerPatch>(patches_));
-}
-
-size_t Mir2Lir::GetMaxPossibleCompilerTemps() const {
-  // Chose a reasonably small value in order to contain stack growth.
-  // Backends that are smarter about spill region can return larger values.
-  const size_t max_compiler_temps = 10;
-  return max_compiler_temps;
-}
-
-size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() {
-  // By default assume that the Mir2Lir will need one slot for each temporary.
-  // If the backend can better determine temps that have non-overlapping ranges and
-  // temps that do not need spilled, it can actually provide a small region.
-  mir_graph_->CommitCompilerTemps();
-  return mir_graph_->GetNumBytesForSpecialTemps() + mir_graph_->GetMaximumBytesForNonSpecialTemps();
-}
-
-int Mir2Lir::ComputeFrameSize() {
-  /* Figure out the frame size */
-  uint32_t size = num_core_spills_ * GetBytesPerGprSpillLocation(cu_->instruction_set)
-                  + num_fp_spills_ * GetBytesPerFprSpillLocation(cu_->instruction_set)
-                  + sizeof(uint32_t)  // Filler.
-                  + mir_graph_->GetNumOfLocalCodeVRs()  * sizeof(uint32_t)
-                  + mir_graph_->GetNumOfOutVRs() * sizeof(uint32_t)
-                  + GetNumBytesForCompilerTempSpillRegion();
-  /* Align and set */
-  return RoundUp(size, kStackAlignment);
-}
-
-/*
- * Append an LIR instruction to the LIR list maintained by a compilation
- * unit
- */
-void Mir2Lir::AppendLIR(LIR* lir) {
-  if (first_lir_insn_ == nullptr) {
-    DCHECK(last_lir_insn_ == nullptr);
-    last_lir_insn_ = first_lir_insn_ = lir;
-    lir->prev = lir->next = nullptr;
-  } else {
-    last_lir_insn_->next = lir;
-    lir->prev = last_lir_insn_;
-    lir->next = nullptr;
-    last_lir_insn_ = lir;
-  }
-}
-
-/*
- * Insert an LIR instruction before the current instruction, which cannot be the
- * first instruction.
- *
- * prev_lir <-> new_lir <-> current_lir
- */
-void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
-  DCHECK(current_lir->prev != nullptr);
-  LIR *prev_lir = current_lir->prev;
-
-  prev_lir->next = new_lir;
-  new_lir->prev = prev_lir;
-  new_lir->next = current_lir;
-  current_lir->prev = new_lir;
-}
-
-/*
- * Insert an LIR instruction after the current instruction, which cannot be the
- * last instruction.
- *
- * current_lir -> new_lir -> old_next
- */
-void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) {
-  new_lir->prev = current_lir;
-  new_lir->next = current_lir->next;
-  current_lir->next = new_lir;
-  new_lir->next->prev = new_lir;
-}
-
-bool Mir2Lir::PartiallyIntersects(RegLocation rl_src, RegLocation rl_dest) {
-  DCHECK(rl_src.wide);
-  DCHECK(rl_dest.wide);
-  return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1);
-}
-
-bool Mir2Lir::Intersects(RegLocation rl_src, RegLocation rl_dest) {
-  DCHECK(rl_src.wide);
-  DCHECK(rl_dest.wide);
-  return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) <= 1);
-}
-
-LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
-                                int offset, int check_value, LIR* target, LIR** compare) {
-  // Handle this for architectures that can't compare to memory.
-  LIR* inst = Load32Disp(base_reg, offset, temp_reg);
-  if (compare != nullptr) {
-    *compare = inst;
-  }
-  LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
-  return branch;
-}
-
-void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) {
-  slow_paths_.push_back(slowpath);
-  ResetDefTracking();
-}
-
-void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type,
-                              SpecialTargetRegister symbolic_reg) {
-  LIR* data_target = ScanLiteralPoolMethod(code_literal_list_, target_method);
-  if (data_target == nullptr) {
-    data_target = AddWordData(&code_literal_list_, target_method.dex_method_index);
-    data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
-    // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
-    // the same method invoked with kVirtual, kSuper and kInterface but the class linker will
-    // resolve these invokes to the same method, so we don't care which one we record here.
-    data_target->operands[2] = type;
-  }
-  // Loads a code pointer. Code from oat file can be mapped anywhere.
-  OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target);
-  DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
-  DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
-}
-
-void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
-                                SpecialTargetRegister symbolic_reg) {
-  LIR* data_target = ScanLiteralPoolMethod(method_literal_list_, target_method);
-  if (data_target == nullptr) {
-    data_target = AddWordData(&method_literal_list_, target_method.dex_method_index);
-    data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
-    // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
-    // the same method invoked with kVirtual, kSuper and kInterface but the class linker will
-    // resolve these invokes to the same method, so we don't care which one we record here.
-    data_target->operands[2] = type;
-  }
-  // Loads an ArtMethod pointer, which is not a reference.
-  OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target);
-  DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
-  DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
-}
-
-void Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
-                            SpecialTargetRegister symbolic_reg) {
-  // Use the literal pool and a PC-relative load from a data word.
-  LIR* data_target = ScanLiteralPoolClass(class_literal_list_, dex_file, type_idx);
-  if (data_target == nullptr) {
-    data_target = AddWordData(&class_literal_list_, type_idx);
-    data_target->operands[1] = WrapPointer(const_cast<DexFile*>(&dex_file));
-  }
-  // Loads a Class pointer, which is a reference as it lives in the heap.
-  OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
-}
-
-bool Mir2Lir::CanUseOpPcRelDexCacheArrayLoad() const {
-  return false;
-}
-
-void Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file ATTRIBUTE_UNUSED,
-                                       int offset ATTRIBUTE_UNUSED,
-                                       RegStorage r_dest ATTRIBUTE_UNUSED,
-                                       bool wide ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "No generic implementation.";
-  UNREACHABLE();
-}
-
-RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) {
-  if (loc.location == kLocPhysReg) {
-    DCHECK(!loc.reg.Is32Bit());
-    if (loc.reg.IsPair()) {
-      RegisterInfo* info_lo = GetRegInfo(loc.reg.GetLow());
-      RegisterInfo* info_hi = GetRegInfo(loc.reg.GetHigh());
-      info_lo->SetIsWide(false);
-      info_hi->SetIsWide(false);
-      loc.reg = info_lo->GetReg();
-    } else {
-      RegisterInfo* info = GetRegInfo(loc.reg);
-      RegisterInfo* info_new = info->FindMatchingView(RegisterInfo::k32SoloStorageMask);
-      DCHECK(info_new != nullptr);
-      if (info->IsLive() && (info->SReg() == loc.s_reg_low)) {
-        info->MarkDead();
-        info_new->MarkLive(loc.s_reg_low);
-      }
-      loc.reg = info_new->GetReg();
-    }
-    DCHECK(loc.reg.Valid());
-  }
-  loc.wide = false;
-  return loc;
-}
-
-void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED,
-                                                  MIR* mir ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
-  UNREACHABLE();
-}
-
-void Mir2Lir::InitReferenceVRegs(BasicBlock* bb, BitVector* references) {
-  // Mark the references coming from the first predecessor.
-  DCHECK(bb != nullptr);
-  DCHECK(bb->block_type == kEntryBlock || !bb->predecessors.empty());
-  BasicBlock* first_bb =
-      (bb->block_type == kEntryBlock) ? bb : mir_graph_->GetBasicBlock(bb->predecessors[0]);
-  DCHECK(first_bb != nullptr);
-  DCHECK(first_bb->data_flow_info != nullptr);
-  DCHECK(first_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
-  const int32_t* first_vreg_to_ssa_map = first_bb->data_flow_info->vreg_to_ssa_map_exit;
-  references->ClearAllBits();
-  for (uint32_t vreg = 0,
-       num_vregs = mir_graph_->GetNumOfCodeVRs() + mir_graph_->GetNumUsedCompilerTemps();
-       vreg != num_vregs; ++vreg) {
-    int32_t sreg = first_vreg_to_ssa_map[vreg];
-    if (sreg != INVALID_SREG && mir_graph_->reg_location_[sreg].ref &&
-        !mir_graph_->IsConstantNullRef(mir_graph_->reg_location_[sreg])) {
-      references->SetBit(vreg);
-    }
-  }
-  // Unmark the references that are merging with a different value.
-  for (size_t i = 1u, num_pred = bb->predecessors.size(); i < num_pred; ++i) {
-    BasicBlock* pred_bb = mir_graph_->GetBasicBlock(bb->predecessors[i]);
-    DCHECK(pred_bb != nullptr);
-    DCHECK(pred_bb->data_flow_info != nullptr);
-    DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
-    const int32_t* pred_vreg_to_ssa_map = pred_bb->data_flow_info->vreg_to_ssa_map_exit;
-    for (uint32_t vreg : references->Indexes()) {
-      if (first_vreg_to_ssa_map[vreg] != pred_vreg_to_ssa_map[vreg]) {
-        // NOTE: The BitVectorSet::IndexIterator will not check the pointed-to bit again,
-        // so clearing the bit has no effect on the iterator.
-        references->ClearBit(vreg);
-      }
-    }
-  }
-}
-
-bool Mir2Lir::UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references) {
-  DCHECK(mir == nullptr || mir->bb == prev_mir->bb);
-  DCHECK(prev_mir != nullptr);
-  while (prev_mir != nullptr) {
-    if (prev_mir == mir) {
-      return true;
-    }
-    const size_t num_defs = prev_mir->ssa_rep->num_defs;
-    const int32_t* defs = prev_mir->ssa_rep->defs;
-    if (num_defs == 1u && mir_graph_->reg_location_[defs[0]].ref &&
-        !mir_graph_->IsConstantNullRef(mir_graph_->reg_location_[defs[0]])) {
-      references->SetBit(mir_graph_->SRegToVReg(defs[0]));
-    } else {
-      for (size_t i = 0u; i != num_defs; ++i) {
-        references->ClearBit(mir_graph_->SRegToVReg(defs[i]));
-      }
-    }
-    prev_mir = prev_mir->next;
-  }
-  return false;
-}
-
-void Mir2Lir::UpdateReferenceVRegs(MIR* mir, MIR* prev_mir, BitVector* references) {
-  if (mir == nullptr) {
-    // Safepoint in entry sequence.
-    InitReferenceVRegs(mir_graph_->GetEntryBlock(), references);
-    return;
-  }
-  if (IsInstructionReturn(mir->dalvikInsn.opcode) ||
-      mir->dalvikInsn.opcode == Instruction::RETURN_VOID_NO_BARRIER) {
-    references->ClearAllBits();
-    if (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT) {
-      references->SetBit(mir_graph_->SRegToVReg(mir->ssa_rep->uses[0]));
-    }
-    return;
-  }
-  if (prev_mir != nullptr && mir->bb == prev_mir->bb &&
-      UpdateReferenceVRegsLocal(mir, prev_mir, references)) {
-    return;
-  }
-  BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb);
-  DCHECK(bb != nullptr);
-  InitReferenceVRegs(bb, references);
-  bool success = UpdateReferenceVRegsLocal(mir, bb->first_mir_insn, references);
-  DCHECK(success) << "MIR @0x" << std::hex << mir->offset << " not in BB#" << std::dec << mir->bb;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 48c4356..4a98342 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -21,11 +21,8 @@
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/mutex-inl.h"
-#include "dex/compiler_ir.h"
 #include "driver/compiler_driver.h"
 #include "thread-inl.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir.h"
 #include "dex_instruction-inl.h"
 #include "driver/dex_compilation_unit.h"
 #include "verifier/method_verifier-inl.h"
@@ -198,31 +195,6 @@
 static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopy],
               "SystemArrayCopy must be static");
 
-MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke) {
-  MIR* insn = mir_graph->NewMIR();
-  insn->offset = invoke->offset;
-  insn->optimization_flags = MIR_CALLEE;
-  return insn;
-}
-
-uint32_t GetInvokeReg(MIR* invoke, uint32_t arg) {
-  DCHECK_LT(arg, invoke->dalvikInsn.vA);
-  DCHECK(!MIR::DecodedInstruction::IsPseudoMirOp(invoke->dalvikInsn.opcode));
-  if (IsInvokeInstructionRange(invoke->dalvikInsn.opcode)) {
-    return invoke->dalvikInsn.vC + arg;  // Range invoke.
-  } else {
-    DCHECK_EQ(Instruction::FormatOf(invoke->dalvikInsn.opcode), Instruction::k35c);
-    return invoke->dalvikInsn.arg[arg];  // Non-range invoke.
-  }
-}
-
-bool WideArgIsInConsecutiveDalvikRegs(MIR* invoke, uint32_t arg) {
-  DCHECK_LT(arg + 1, invoke->dalvikInsn.vA);
-  DCHECK(!MIR::DecodedInstruction::IsPseudoMirOp(invoke->dalvikInsn.opcode));
-  return IsInvokeInstructionRange(invoke->dalvikInsn.opcode) ||
-      invoke->dalvikInsn.arg[arg + 1u] == invoke->dalvikInsn.arg[arg] + 1u;
-}
-
 }  // anonymous namespace
 
 const uint32_t DexFileMethodInliner::kIndexUnresolved;
@@ -736,241 +708,12 @@
   return res;
 }
 
-bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
-  InlineMethod intrinsic;
-  {
-    ReaderMutexLock mu(Thread::Current(), lock_);
-    auto it = inline_methods_.find(info->method_ref.dex_method_index);
-    if (it == inline_methods_.end() || (it->second.flags & kInlineIntrinsic) == 0) {
-      return false;
-    }
-    intrinsic = it->second;
-  }
-  if (kIntrinsicIsStatic[intrinsic.opcode] != (info->type == kStatic)) {
-    // Invoke type mismatch.
-    return false;
-  }
-  switch (intrinsic.opcode) {
-    case kIntrinsicDoubleCvt:
-      return backend->GenInlinedDoubleCvt(info);
-    case kIntrinsicFloatCvt:
-      return backend->GenInlinedFloatCvt(info);
-    case kIntrinsicReverseBytes:
-      return backend->GenInlinedReverseBytes(info, static_cast<OpSize>(intrinsic.d.data));
-    case kIntrinsicReverseBits:
-      return backend->GenInlinedReverseBits(info, static_cast<OpSize>(intrinsic.d.data));
-    case kIntrinsicAbsInt:
-      return backend->GenInlinedAbsInt(info);
-    case kIntrinsicAbsLong:
-      return backend->GenInlinedAbsLong(info);
-    case kIntrinsicAbsFloat:
-      return backend->GenInlinedAbsFloat(info);
-    case kIntrinsicAbsDouble:
-      return backend->GenInlinedAbsDouble(info);
-    case kIntrinsicMinMaxInt:
-      return backend->GenInlinedMinMax(info, intrinsic.d.data & kIntrinsicFlagMin, false /* is_long */);
-    case kIntrinsicMinMaxLong:
-      return backend->GenInlinedMinMax(info, intrinsic.d.data & kIntrinsicFlagMin, true /* is_long */);
-    case kIntrinsicMinMaxFloat:
-      return backend->GenInlinedMinMaxFP(info, intrinsic.d.data & kIntrinsicFlagMin, false /* is_double */);
-    case kIntrinsicMinMaxDouble:
-      return backend->GenInlinedMinMaxFP(info, intrinsic.d.data & kIntrinsicFlagMin, true /* is_double */);
-    case kIntrinsicCos:
-    case kIntrinsicSin:
-    case kIntrinsicAcos:
-    case kIntrinsicAsin:
-    case kIntrinsicAtan:
-    case kIntrinsicAtan2:
-    case kIntrinsicCbrt:
-    case kIntrinsicCosh:
-    case kIntrinsicExp:
-    case kIntrinsicExpm1:
-    case kIntrinsicHypot:
-    case kIntrinsicLog:
-    case kIntrinsicLog10:
-    case kIntrinsicNextAfter:
-    case kIntrinsicSinh:
-    case kIntrinsicTan:
-    case kIntrinsicTanh:
-      // Not implemented in Quick.
-      return false;
-    case kIntrinsicSqrt:
-      return backend->GenInlinedSqrt(info);
-    case kIntrinsicCeil:
-      return backend->GenInlinedCeil(info);
-    case kIntrinsicFloor:
-      return backend->GenInlinedFloor(info);
-    case kIntrinsicRint:
-      return backend->GenInlinedRint(info);
-    case kIntrinsicRoundFloat:
-      return backend->GenInlinedRound(info, false /* is_double */);
-    case kIntrinsicRoundDouble:
-      return backend->GenInlinedRound(info, true /* is_double */);
-    case kIntrinsicReferenceGetReferent:
-      return backend->GenInlinedReferenceGetReferent(info);
-    case kIntrinsicCharAt:
-      return backend->GenInlinedCharAt(info);
-    case kIntrinsicCompareTo:
-      return backend->GenInlinedStringCompareTo(info);
-    case kIntrinsicEquals:
-      // Quick does not implement this intrinsic.
-      return false;
-    case kIntrinsicGetCharsNoCheck:
-      return backend->GenInlinedStringGetCharsNoCheck(info);
-    case kIntrinsicIsEmptyOrLength:
-      return backend->GenInlinedStringIsEmptyOrLength(
-          info, intrinsic.d.data & kIntrinsicFlagIsEmpty);
-    case kIntrinsicIndexOf:
-      return backend->GenInlinedIndexOf(info, intrinsic.d.data & kIntrinsicFlagBase0);
-    case kIntrinsicNewStringFromBytes:
-      return backend->GenInlinedStringFactoryNewStringFromBytes(info);
-    case kIntrinsicNewStringFromChars:
-      return backend->GenInlinedStringFactoryNewStringFromChars(info);
-    case kIntrinsicNewStringFromString:
-      return backend->GenInlinedStringFactoryNewStringFromString(info);
-    case kIntrinsicCurrentThread:
-      return backend->GenInlinedCurrentThread(info);
-    case kIntrinsicPeek:
-      return backend->GenInlinedPeek(info, static_cast<OpSize>(intrinsic.d.data));
-    case kIntrinsicPoke:
-      return backend->GenInlinedPoke(info, static_cast<OpSize>(intrinsic.d.data));
-    case kIntrinsicCas:
-      return backend->GenInlinedCas(info, intrinsic.d.data & kIntrinsicFlagIsLong,
-                                    intrinsic.d.data & kIntrinsicFlagIsObject);
-    case kIntrinsicUnsafeGet:
-      return backend->GenInlinedUnsafeGet(info, intrinsic.d.data & kIntrinsicFlagIsLong,
-                                          intrinsic.d.data & kIntrinsicFlagIsObject,
-                                          intrinsic.d.data & kIntrinsicFlagIsVolatile);
-    case kIntrinsicUnsafePut:
-      return backend->GenInlinedUnsafePut(info, intrinsic.d.data & kIntrinsicFlagIsLong,
-                                          intrinsic.d.data & kIntrinsicFlagIsObject,
-                                          intrinsic.d.data & kIntrinsicFlagIsVolatile,
-                                          intrinsic.d.data & kIntrinsicFlagIsOrdered);
-    case kIntrinsicSystemArrayCopyCharArray:
-      return backend->GenInlinedArrayCopyCharArray(info);
-    case kIntrinsicFloat2Int:
-    case kIntrinsicDouble2Long:
-    case kIntrinsicFloatIsInfinite:
-    case kIntrinsicDoubleIsInfinite:
-    case kIntrinsicFloatIsNaN:
-    case kIntrinsicDoubleIsNaN:
-    case kIntrinsicBitCount:
-    case kIntrinsicCompare:
-    case kIntrinsicHighestOneBit:
-    case kIntrinsicLowestOneBit:
-    case kIntrinsicNumberOfLeadingZeros:
-    case kIntrinsicNumberOfTrailingZeros:
-    case kIntrinsicRotateRight:
-    case kIntrinsicRotateLeft:
-    case kIntrinsicSignum:
-    case kIntrinsicUnsafeGetAndAddInt:
-    case kIntrinsicUnsafeGetAndAddLong:
-    case kIntrinsicUnsafeGetAndSetInt:
-    case kIntrinsicUnsafeGetAndSetLong:
-    case kIntrinsicUnsafeGetAndSetObject:
-    case kIntrinsicUnsafeLoadFence:
-    case kIntrinsicUnsafeStoreFence:
-    case kIntrinsicUnsafeFullFence:
-    case kIntrinsicSystemArrayCopy:
-      return false;   // not implemented in quick.
-    default:
-      LOG(FATAL) << "Unexpected intrinsic opcode: " << intrinsic.opcode;
-      return false;  // avoid warning "control reaches end of non-void function"
-  }
-}
-
 bool DexFileMethodInliner::IsSpecial(uint32_t method_index) {
   ReaderMutexLock mu(Thread::Current(), lock_);
   auto it = inline_methods_.find(method_index);
   return it != inline_methods_.end() && (it->second.flags & kInlineSpecial) != 0;
 }
 
-bool DexFileMethodInliner::GenSpecial(Mir2Lir* backend, uint32_t method_idx) {
-  InlineMethod special;
-  {
-    ReaderMutexLock mu(Thread::Current(), lock_);
-    auto it = inline_methods_.find(method_idx);
-    if (it == inline_methods_.end() || (it->second.flags & kInlineSpecial) == 0) {
-      return false;
-    }
-    special = it->second;
-  }
-  return backend->SpecialMIR2LIR(special);
-}
-
-bool DexFileMethodInliner::GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                                     uint32_t method_idx) {
-  // Check that we're allowed to inline.
-  {
-    CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
-    if (!cu->compiler_driver->MayInline(dex_file_, cu->dex_file)) {
-      VLOG(compiler) << "Won't inline " << method_idx << " in "
-                     << cu->dex_file->GetLocation() << " from "
-                     << dex_file_->GetLocation();
-      return false;
-    }
-  }
-
-  InlineMethod method;
-  {
-    ReaderMutexLock mu(Thread::Current(), lock_);
-    auto it = inline_methods_.find(method_idx);
-    if (it == inline_methods_.end() || (it->second.flags & kInlineSpecial) == 0) {
-      return false;
-    }
-    method = it->second;
-  }
-
-  MIR* move_result = nullptr;
-  bool result = true;
-  switch (method.opcode) {
-    case kInlineOpNop:
-      break;
-    case kInlineOpNonWideConst:
-      move_result = mir_graph->FindMoveResult(bb, invoke);
-      result = GenInlineConst(mir_graph, bb, invoke, move_result, method);
-      break;
-    case kInlineOpReturnArg:
-      move_result = mir_graph->FindMoveResult(bb, invoke);
-      result = GenInlineReturnArg(mir_graph, bb, invoke, move_result, method);
-      break;
-    case kInlineOpIGet:
-      move_result = mir_graph->FindMoveResult(bb, invoke);
-      result = GenInlineIGet(mir_graph, bb, invoke, move_result, method);
-      break;
-    case kInlineOpIPut:
-      move_result = mir_graph->FindMoveResult(bb, invoke);
-      result = GenInlineIPut(mir_graph, bb, invoke, move_result, method);
-      break;
-    case kInlineOpConstructor:
-    case kInlineStringInit:
-      return false;
-    default:
-      LOG(FATAL) << "Unexpected inline op: " << method.opcode;
-      break;
-  }
-  if (result) {
-    // If the invoke has not been eliminated yet, check now whether we should do it.
-    // This is done so that dataflow analysis does not get tripped up seeing nop invoke.
-    if (static_cast<int>(invoke->dalvikInsn.opcode) != kMirOpNop) {
-      bool is_static = IsInstructionInvokeStatic(invoke->dalvikInsn.opcode);
-      if (is_static || (invoke->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
-        // No null object register involved here so we can eliminate the invoke.
-        invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-      } else {
-        // Invoke was kept around because null check needed to be done.
-        invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNullCheck);
-        // For invokes, the object register is in vC. For null check mir, it is in vA.
-        invoke->dalvikInsn.vA = invoke->dalvikInsn.vC;
-      }
-    }
-    if (move_result != nullptr) {
-      move_result->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-    }
-  }
-  return result;
-}
-
 uint32_t DexFileMethodInliner::FindClassIndex(const DexFile* dex_file, IndexCache* cache,
                                               ClassCacheIndex index) {
   uint32_t* class_index = &cache->class_indexes[index];
@@ -1100,191 +843,6 @@
   }
 }
 
-bool DexFileMethodInliner::GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                                          MIR* move_result, const InlineMethod& method) {
-  if (move_result == nullptr) {
-    // Result is unused.
-    return true;
-  }
-
-  // Check the opcode and for MOVE_RESULT_OBJECT check also that the constant is null.
-  DCHECK(move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT ||
-         (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT &&
-             method.d.data == 0u));
-
-  // Insert the CONST instruction.
-  MIR* insn = AllocReplacementMIR(mir_graph, invoke);
-  insn->dalvikInsn.opcode = Instruction::CONST;
-  insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
-  insn->dalvikInsn.vB = method.d.data;
-  insn->meta.method_lowering_info = invoke->meta.method_lowering_info;  // Preserve type info.
-  bb->InsertMIRAfter(move_result, insn);
-  return true;
-}
-
-bool DexFileMethodInliner::GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                                              MIR* move_result, const InlineMethod& method) {
-  if (move_result == nullptr) {
-    // Result is unused.
-    return true;
-  }
-
-  // Select opcode and argument.
-  const InlineReturnArgData& data = method.d.return_data;
-  Instruction::Code opcode = Instruction::MOVE_FROM16;
-  uint32_t arg = GetInvokeReg(invoke, data.arg);
-  if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
-    DCHECK_EQ(data.is_object, 1u);
-    DCHECK_EQ(data.is_wide, 0u);
-    opcode = Instruction::MOVE_OBJECT_FROM16;
-  } else if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE) {
-    DCHECK_EQ(data.is_wide, 1u);
-    DCHECK_EQ(data.is_object, 0u);
-    opcode = Instruction::MOVE_WIDE_FROM16;
-    if (!WideArgIsInConsecutiveDalvikRegs(invoke, data.arg)) {
-      // The two halfs of the source value are not in consecutive dalvik registers in INVOKE.
-      return false;
-    }
-  } else {
-    DCHECK(move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT);
-    DCHECK_EQ(data.is_wide, 0u);
-    DCHECK_EQ(data.is_object, 0u);
-  }
-
-  // Insert the move instruction
-  MIR* insn = AllocReplacementMIR(mir_graph, invoke);
-  insn->dalvikInsn.opcode = opcode;
-  insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
-  insn->dalvikInsn.vB = arg;
-  insn->meta.method_lowering_info = invoke->meta.method_lowering_info;  // Preserve type info.
-  bb->InsertMIRAfter(move_result, insn);
-  return true;
-}
-
-bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                                         MIR* move_result, const InlineMethod& method) {
-  CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
-  if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
-    return false;
-  }
-
-  const InlineIGetIPutData& data = method.d.ifield_data;
-  Instruction::Code opcode = static_cast<Instruction::Code>(Instruction::IGET + data.op_variant);
-  DCHECK_EQ(InlineMethodAnalyser::IGetVariant(opcode), data.op_variant);
-  uint32_t object_reg = GetInvokeReg(invoke, data.object_arg);
-
-  if (move_result == nullptr) {
-    // Result is unused. If volatile, we still need to emit the IGET but we have no destination.
-    return !data.is_volatile;
-  }
-
-  DCHECK_EQ(data.method_is_static != 0u, IsInstructionInvokeStatic(invoke->dalvikInsn.opcode));
-  bool object_is_this = (data.method_is_static == 0u && data.object_arg == 0u);
-  if (!object_is_this) {
-    // TODO: Implement inlining of IGET on non-"this" registers (needs correct stack trace for NPE).
-    // Allow synthetic accessors. We don't care about losing their stack frame in NPE.
-    if (!InlineMethodAnalyser::IsSyntheticAccessor(
-        mir_graph->GetMethodLoweringInfo(invoke).GetTargetMethod())) {
-      return false;
-    }
-  }
-
-  if (object_is_this) {
-    // Mark invoke as NOP, null-check is done on IGET. No aborts after this.
-    invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-  }
-
-  MIR* insn = AllocReplacementMIR(mir_graph, invoke);
-  insn->offset = invoke->offset;
-  insn->dalvikInsn.opcode = opcode;
-  insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
-  insn->dalvikInsn.vB = object_reg;
-  mir_graph->ComputeInlineIFieldLoweringInfo(data.field_idx, invoke, insn);
-
-  DCHECK(mir_graph->GetIFieldLoweringInfo(insn).IsResolved());
-  DCHECK(mir_graph->GetIFieldLoweringInfo(insn).FastGet());
-  DCHECK_EQ(data.field_offset, mir_graph->GetIFieldLoweringInfo(insn).FieldOffset().Uint32Value());
-  DCHECK_EQ(data.is_volatile, mir_graph->GetIFieldLoweringInfo(insn).IsVolatile() ? 1u : 0u);
-
-  bb->InsertMIRAfter(move_result, insn);
-  return true;
-}
-
-bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                                         MIR* move_result, const InlineMethod& method) {
-  CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
-  if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
-    return false;
-  }
-
-  const InlineIGetIPutData& data = method.d.ifield_data;
-  Instruction::Code opcode = static_cast<Instruction::Code>(Instruction::IPUT + data.op_variant);
-  DCHECK_EQ(InlineMethodAnalyser::IPutVariant(opcode), data.op_variant);
-  uint32_t object_reg = GetInvokeReg(invoke, data.object_arg);
-  uint32_t src_reg = GetInvokeReg(invoke, data.src_arg);
-  uint32_t return_reg =
-      data.return_arg_plus1 != 0u ? GetInvokeReg(invoke, data.return_arg_plus1 - 1u) : 0u;
-
-  if (opcode == Instruction::IPUT_WIDE && !WideArgIsInConsecutiveDalvikRegs(invoke, data.src_arg)) {
-    // The two halfs of the source value are not in consecutive dalvik registers in INVOKE.
-    return false;
-  }
-
-  DCHECK(move_result == nullptr || data.return_arg_plus1 != 0u);
-  if (move_result != nullptr && move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE &&
-      !WideArgIsInConsecutiveDalvikRegs(invoke, data.return_arg_plus1 - 1u)) {
-    // The two halfs of the return value are not in consecutive dalvik registers in INVOKE.
-    return false;
-  }
-
-  DCHECK_EQ(data.method_is_static != 0u, IsInstructionInvokeStatic(invoke->dalvikInsn.opcode));
-  bool object_is_this = (data.method_is_static == 0u && data.object_arg == 0u);
-  if (!object_is_this) {
-    // TODO: Implement inlining of IPUT on non-"this" registers (needs correct stack trace for NPE).
-    // Allow synthetic accessors. We don't care about losing their stack frame in NPE.
-    if (!InlineMethodAnalyser::IsSyntheticAccessor(
-        mir_graph->GetMethodLoweringInfo(invoke).GetTargetMethod())) {
-      return false;
-    }
-  }
-
-  if (object_is_this) {
-    // Mark invoke as NOP, null-check is done on IPUT. No aborts after this.
-    invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
-  }
-
-  MIR* insn = AllocReplacementMIR(mir_graph, invoke);
-  insn->dalvikInsn.opcode = opcode;
-  insn->dalvikInsn.vA = src_reg;
-  insn->dalvikInsn.vB = object_reg;
-  mir_graph->ComputeInlineIFieldLoweringInfo(data.field_idx, invoke, insn);
-
-  DCHECK(mir_graph->GetIFieldLoweringInfo(insn).IsResolved());
-  DCHECK(mir_graph->GetIFieldLoweringInfo(insn).FastPut());
-  DCHECK_EQ(data.field_offset, mir_graph->GetIFieldLoweringInfo(insn).FieldOffset().Uint32Value());
-  DCHECK_EQ(data.is_volatile, mir_graph->GetIFieldLoweringInfo(insn).IsVolatile() ? 1u : 0u);
-
-  bb->InsertMIRAfter(invoke, insn);
-
-  if (move_result != nullptr) {
-    MIR* move = AllocReplacementMIR(mir_graph, invoke);
-    move->offset = move_result->offset;
-    if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT) {
-      move->dalvikInsn.opcode = Instruction::MOVE_FROM16;
-    } else if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
-      move->dalvikInsn.opcode = Instruction::MOVE_OBJECT_FROM16;
-    } else {
-      DCHECK_EQ(move_result->dalvikInsn.opcode, Instruction::MOVE_RESULT_WIDE);
-      move->dalvikInsn.opcode = Instruction::MOVE_WIDE_FROM16;
-    }
-    move->dalvikInsn.vA = move_result->dalvikInsn.vA;
-    move->dalvikInsn.vB = return_reg;
-    move->meta.method_lowering_info = invoke->meta.method_lowering_info;  // Preserve type info.
-    bb->InsertMIRAfter(insn, move);
-  }
-  return true;
-}
-
 uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index, size_t pointer_size) {
   ReaderMutexLock mu(Thread::Current(), lock_);
   auto it = inline_methods_.find(method_index);
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 34b56cd..fbe403f 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -31,12 +31,6 @@
 class MethodVerifier;
 }  // namespace verifier
 
-class BasicBlock;
-struct CallInfo;
-class MIR;
-class MIRGraph;
-class Mir2Lir;
-
 /**
  * Handles inlining of methods from a particular DexFile.
  *
@@ -75,27 +69,11 @@
     bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) REQUIRES(!lock_);
 
     /**
-     * Generate code for an intrinsic function invocation.
-     */
-    bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) REQUIRES(!lock_);
-
-    /**
      * Check whether a particular method index corresponds to a special function.
      */
     bool IsSpecial(uint32_t method_index) REQUIRES(!lock_);
 
     /**
-     * Generate code for a special function.
-     */
-    bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) REQUIRES(!lock_);
-
-    /**
-     * Try to inline an invoke.
-     */
-    bool GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, uint32_t method_idx)
-        REQUIRES(!lock_);
-
-    /**
      * Gets the thread pointer entrypoint offset for a string init method index and pointer size.
      */
     uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size)
@@ -405,15 +383,6 @@
 
     bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) REQUIRES(!lock_);
 
-    static bool GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                               MIR* move_result, const InlineMethod& method);
-    static bool GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                                   MIR* move_result, const InlineMethod& method);
-    static bool GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                              MIR* move_result, const InlineMethod& method);
-    static bool GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
-                              MIR* move_result, const InlineMethod& method);
-
     ReaderWriterMutex lock_;
     /*
      * Maps method indexes (for the particular DexFile) to Intrinsic defintions.
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
deleted file mode 100644
index 5da7214..0000000
--- a/compiler/dex/quick/gen_common.cc
+++ /dev/null
@@ -1,2253 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mir_to_lir-inl.h"
-
-#include <functional>
-
-#include "arch/arm/instruction_set_features_arm.h"
-#include "base/bit_utils.h"
-#include "base/macros.h"
-#include "dex/compiler_ir.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/arm/arm_lir.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "mirror/array.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/object_reference.h"
-#include "utils/dex_cache_arrays_layout-inl.h"
-#include "verifier/method_verifier.h"
-
-namespace art {
-
-// Shortcuts to repeatedly used long types.
-typedef mirror::ObjectArray<mirror::Object> ObjArray;
-typedef mirror::ObjectArray<mirror::Class> ClassArray;
-
-/*
- * This source files contains "gen" codegen routines that should
- * be applicable to most targets.  Only mid-level support utilities
- * and "op" calls may be used here.
- */
-
-ALWAYS_INLINE static inline bool ForceSlowFieldPath(CompilationUnit* cu) {
-  return (cu->enable_debug & (1 << kDebugSlowFieldPath)) != 0;
-}
-
-ALWAYS_INLINE static inline bool ForceSlowStringPath(CompilationUnit* cu) {
-  return (cu->enable_debug & (1 << kDebugSlowStringPath)) != 0;
-}
-
-ALWAYS_INLINE static inline bool ForceSlowTypePath(CompilationUnit* cu) {
-  return (cu->enable_debug & (1 << kDebugSlowTypePath)) != 0;
-}
-
-void Mir2Lir::GenIfNullUseHelperImm(RegStorage r_result, QuickEntrypointEnum trampoline, int imm) {
-  class CallHelperImmMethodSlowPath : public LIRSlowPath {
-   public:
-    CallHelperImmMethodSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont,
-                                QuickEntrypointEnum trampoline_in, int imm_in,
-                                RegStorage r_result_in)
-        : LIRSlowPath(m2l, fromfast, cont), trampoline_(trampoline_in),
-          imm_(imm_in), r_result_(r_result_in) {
-    }
-
-    void Compile() {
-      GenerateTargetLabel();
-      m2l_->CallRuntimeHelperImm(trampoline_, imm_, true);
-      m2l_->OpRegCopy(r_result_,  m2l_->TargetReg(kRet0, kRef));
-      m2l_->OpUnconditionalBranch(cont_);
-    }
-
-   private:
-    QuickEntrypointEnum trampoline_;
-    const int imm_;
-    const RegStorage r_result_;
-  };
-
-  LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr);
-  LIR* cont = NewLIR0(kPseudoTargetLabel);
-
-  AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm,
-                                                       r_result));
-}
-
-void Mir2Lir::LoadTypeFromCache(uint32_t type_index, RegStorage class_reg) {
-  if (CanUseOpPcRelDexCacheArrayLoad()) {
-    uint32_t offset = dex_cache_arrays_layout_.TypeOffset(type_index);
-    OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg, false);
-  } else {
-    RegStorage r_method = LoadCurrMethodWithHint(class_reg);
-    MemberOffset resolved_types_offset = ArtMethod::DexCacheResolvedTypesOffset(
-        GetInstructionSetPointerSize(cu_->instruction_set));
-    LoadBaseDisp(r_method, resolved_types_offset.Int32Value(), class_reg,
-                 cu_->target64 ? k64 : k32, kNotVolatile);
-    int32_t offset_of_type = GetCacheOffset(type_index);
-    LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
-  }
-}
-
-RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& field_info,
-                                               int opt_flags) {
-  DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
-  // May do runtime call so everything to home locations.
-  FlushAllRegs();
-  // Using fixed register to sync with possible call to runtime support.
-  RegStorage r_base = TargetReg(kArg0, kRef);
-  LockTemp(r_base);
-  LoadTypeFromCache(field_info.StorageIndex(), r_base);
-  // r_base now points at static storage (Class*) or null if the type is not yet resolved.
-  LIR* unresolved_branch = nullptr;
-  if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
-    // Check if r_base is null.
-    unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, nullptr);
-  }
-  LIR* uninit_branch = nullptr;
-  if (!field_info.IsClassInitialized() && (opt_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
-    // Check if r_base is not yet initialized class.
-    RegStorage r_tmp = TargetReg(kArg2, kNotWide);
-    LockTemp(r_tmp);
-    uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base,
-                                      mirror::Class::StatusOffset().Int32Value(),
-                                      mirror::Class::kStatusInitialized, nullptr, nullptr);
-    FreeTemp(r_tmp);
-  }
-  if (unresolved_branch != nullptr || uninit_branch != nullptr) {
-    //
-    // Slow path to ensure a class is initialized for sget/sput.
-    //
-    class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
-     public:
-      // There are up to two branches to the static field slow path, the "unresolved" when the type
-      // entry in the dex cache is null, and the "uninit" when the class is not yet initialized.
-      // At least one will be non-null here, otherwise we wouldn't generate the slow path.
-      StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
-                          RegStorage r_base_in)
-          : LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
-            second_branch_(unresolved != nullptr ? uninit : nullptr),
-            storage_index_(storage_index), r_base_(r_base_in) {
-      }
-
-      void Compile() {
-        LIR* target = GenerateTargetLabel();
-        if (second_branch_ != nullptr) {
-          second_branch_->target = target;
-        }
-        m2l_->CallRuntimeHelperImm(kQuickInitializeStaticStorage, storage_index_, true);
-        // Copy helper's result into r_base, a no-op on all but MIPS.
-        m2l_->OpRegCopy(r_base_,  m2l_->TargetReg(kRet0, kRef));
-
-        m2l_->OpUnconditionalBranch(cont_);
-      }
-
-     private:
-      // Second branch to the slow path, or null if there's only one branch.
-      LIR* const second_branch_;
-
-      const int storage_index_;
-      const RegStorage r_base_;
-    };
-
-    // The slow path is invoked if the r_base is null or the class pointed
-    // to by it is not initialized.
-    LIR* cont = NewLIR0(kPseudoTargetLabel);
-    AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
-                                                 field_info.StorageIndex(), r_base));
-  }
-  return r_base;
-}
-
-/*
- * Generate a kPseudoBarrier marker to indicate the boundary of special
- * blocks.
- */
-void Mir2Lir::GenBarrier() {
-  LIR* barrier = NewLIR0(kPseudoBarrier);
-  /* Mark all resources as being clobbered */
-  DCHECK(!barrier->flags.use_def_invalid);
-  barrier->u.m.def_mask = &kEncodeAll;
-}
-
-void Mir2Lir::GenDivZeroException() {
-  LIR* branch = OpUnconditionalBranch(nullptr);
-  AddDivZeroCheckSlowPath(branch);
-}
-
-void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) {
-  LIR* branch = OpCondBranch(c_code, nullptr);
-  AddDivZeroCheckSlowPath(branch);
-}
-
-void Mir2Lir::GenDivZeroCheck(RegStorage reg) {
-  LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr);
-  AddDivZeroCheckSlowPath(branch);
-}
-
-void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) {
-  class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath {
-   public:
-    DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch_in)
-        : LIRSlowPath(m2l, branch_in) {
-    }
-
-    void Compile() OVERRIDE {
-      m2l_->ResetRegPool();
-      m2l_->ResetDefTracking();
-      GenerateTargetLabel(kPseudoThrowTarget);
-      m2l_->CallRuntimeHelper(kQuickThrowDivZero, true);
-    }
-  };
-
-  AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch));
-}
-
-void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) {
-  class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
-   public:
-    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, RegStorage index_in,
-                             RegStorage length_in)
-        : LIRSlowPath(m2l, branch_in),
-          index_(index_in), length_(length_in) {
-    }
-
-    void Compile() OVERRIDE {
-      m2l_->ResetRegPool();
-      m2l_->ResetDefTracking();
-      GenerateTargetLabel(kPseudoThrowTarget);
-      m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, index_, length_, true);
-    }
-
-   private:
-    const RegStorage index_;
-    const RegStorage length_;
-  };
-
-  LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr);
-  AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length));
-}
-
-void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) {
-  class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
-   public:
-    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, int index_in, RegStorage length_in)
-        : LIRSlowPath(m2l, branch_in),
-          index_(index_in), length_(length_in) {
-    }
-
-    void Compile() OVERRIDE {
-      m2l_->ResetRegPool();
-      m2l_->ResetDefTracking();
-      GenerateTargetLabel(kPseudoThrowTarget);
-
-      RegStorage arg1_32 = m2l_->TargetReg(kArg1, kNotWide);
-      RegStorage arg0_32 = m2l_->TargetReg(kArg0, kNotWide);
-
-      m2l_->OpRegCopy(arg1_32, length_);
-      m2l_->LoadConstant(arg0_32, index_);
-      m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, arg0_32, arg1_32, true);
-    }
-
-   private:
-    const int32_t index_;
-    const RegStorage length_;
-  };
-
-  LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr);
-  AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length));
-}
-
-LIR* Mir2Lir::GenNullCheck(RegStorage reg) {
-  class NullCheckSlowPath : public Mir2Lir::LIRSlowPath {
-   public:
-    NullCheckSlowPath(Mir2Lir* m2l, LIR* branch)
-        : LIRSlowPath(m2l, branch) {
-    }
-
-    void Compile() OVERRIDE {
-      m2l_->ResetRegPool();
-      m2l_->ResetDefTracking();
-      GenerateTargetLabel(kPseudoThrowTarget);
-      m2l_->CallRuntimeHelper(kQuickThrowNullPointer, true);
-    }
-  };
-
-  LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr);
-  AddSlowPath(new (arena_) NullCheckSlowPath(this, branch));
-  return branch;
-}
-
-/* Perform null-check on a register.  */
-LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) {
-  if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-    return GenExplicitNullCheck(m_reg, opt_flags);
-  }
-  // If null check has not been eliminated, reset redundant store tracking.
-  if ((opt_flags & MIR_IGNORE_NULL_CHECK) == 0) {
-    ResetDefTracking();
-  }
-  return nullptr;
-}
-
-/* Perform an explicit null-check on a register.  */
-LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
-  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
-    return nullptr;
-  }
-  return GenNullCheck(m_reg);
-}
-
-void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
-  if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
-      return;
-    }
-    // Insert after last instruction.
-    MarkSafepointPC(last_lir_insn_);
-  }
-}
-
-void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) {
-  if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
-      return;
-    }
-    MarkSafepointPCAfter(after);
-  }
-}
-
-void Mir2Lir::MarkPossibleStackOverflowException() {
-  if (cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
-    MarkSafepointPC(last_lir_insn_);
-  }
-}
-
-void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
-  if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
-      return;
-    }
-    // Force an implicit null check by performing a memory operation (load) from the given
-    // register with offset 0.  This will cause a signal if the register contains 0 (null).
-    RegStorage tmp = AllocTemp();
-    // TODO: for Mips, would be best to use rZERO as the bogus register target.
-    LIR* load = Load32Disp(reg, 0, tmp);
-    FreeTemp(tmp);
-    MarkSafepointPC(load);
-  }
-}
-
-void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
-                                  RegLocation rl_src2, LIR* taken) {
-  ConditionCode cond;
-  RegisterClass reg_class = (rl_src1.ref || rl_src2.ref) ? kRefReg : kCoreReg;
-  switch (opcode) {
-    case Instruction::IF_EQ:
-      cond = kCondEq;
-      break;
-    case Instruction::IF_NE:
-      cond = kCondNe;
-      break;
-    case Instruction::IF_LT:
-      cond = kCondLt;
-      break;
-    case Instruction::IF_GE:
-      cond = kCondGe;
-      break;
-    case Instruction::IF_GT:
-      cond = kCondGt;
-      break;
-    case Instruction::IF_LE:
-      cond = kCondLe;
-      break;
-    default:
-      cond = static_cast<ConditionCode>(0);
-      LOG(FATAL) << "Unexpected opcode " << opcode;
-  }
-
-  // Normalize such that if either operand is constant, src2 will be constant
-  if (rl_src1.is_const) {
-    RegLocation rl_temp = rl_src1;
-    rl_src1 = rl_src2;
-    rl_src2 = rl_temp;
-    cond = FlipComparisonOrder(cond);
-  }
-
-  rl_src1 = LoadValue(rl_src1, reg_class);
-  // Is this really an immediate comparison?
-  if (rl_src2.is_const) {
-    // If it's already live in a register or not easily materialized, just keep going
-    RegLocation rl_temp = UpdateLoc(rl_src2);
-    int32_t constant_value = mir_graph_->ConstantValue(rl_src2);
-    if ((rl_temp.location == kLocDalvikFrame) &&
-        InexpensiveConstantInt(constant_value, opcode)) {
-      // OK - convert this to a compare immediate and branch
-      OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken);
-      return;
-    }
-
-    // It's also commonly more efficient to have a test against zero with Eq/Ne. This is not worse
-    // for x86, and allows a cbz/cbnz for Arm and Mips. At the same time, it works around a register
-    // mismatch for 64b systems, where a reference is compared against null, as dex bytecode uses
-    // the 32b literal 0 for null.
-    if (constant_value == 0 && (cond == kCondEq || cond == kCondNe)) {
-      // Use the OpCmpImmBranch and ignore the value in the register.
-      OpCmpImmBranch(cond, rl_src1.reg, 0, taken);
-      return;
-    }
-  }
-
-  rl_src2 = LoadValue(rl_src2, reg_class);
-  OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
-}
-
-void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken) {
-  ConditionCode cond;
-  RegisterClass reg_class = rl_src.ref ? kRefReg : kCoreReg;
-  rl_src = LoadValue(rl_src, reg_class);
-  switch (opcode) {
-    case Instruction::IF_EQZ:
-      cond = kCondEq;
-      break;
-    case Instruction::IF_NEZ:
-      cond = kCondNe;
-      break;
-    case Instruction::IF_LTZ:
-      cond = kCondLt;
-      break;
-    case Instruction::IF_GEZ:
-      cond = kCondGe;
-      break;
-    case Instruction::IF_GTZ:
-      cond = kCondGt;
-      break;
-    case Instruction::IF_LEZ:
-      cond = kCondLe;
-      break;
-    default:
-      cond = static_cast<ConditionCode>(0);
-      LOG(FATAL) << "Unexpected opcode " << opcode;
-  }
-  OpCmpImmBranch(cond, rl_src.reg, 0, taken);
-}
-
-void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (rl_src.location == kLocPhysReg) {
-    OpRegCopy(rl_result.reg, rl_src.reg);
-  } else {
-    LoadValueDirect(rl_src, rl_result.reg.GetLow());
-  }
-  OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Mir2Lir::GenLongToInt(RegLocation rl_dest, RegLocation rl_src) {
-  rl_src = UpdateLocWide(rl_src);
-  rl_src = NarrowRegLoc(rl_src);
-  StoreValue(rl_dest, rl_src);
-}
-
-void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
-                              RegLocation rl_src) {
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpKind op = kOpInvalid;
-  switch (opcode) {
-    case Instruction::INT_TO_BYTE:
-      op = kOp2Byte;
-      break;
-    case Instruction::INT_TO_SHORT:
-       op = kOp2Short;
-       break;
-    case Instruction::INT_TO_CHAR:
-       op = kOp2Char;
-       break;
-    default:
-      LOG(ERROR) << "Bad int conversion type";
-  }
-  OpRegReg(op, rl_result.reg, rl_src.reg);
-  StoreValue(rl_dest, rl_result);
-}
-
-/*
- * Let helper function take care of everything.  Will call
- * Array::AllocFromCode(type_idx, method, count);
- * Note: AllocFromCode will handle checks for errNegativeArraySize.
- */
-void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
-                          RegLocation rl_src) {
-  FlushAllRegs();  /* Everything to home location */
-  const DexFile* dex_file = cu_->dex_file;
-  CompilerDriver* driver = cu_->compiler_driver;
-  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) {
-    bool is_type_initialized;  // Ignored as an array does not have an initializer.
-    bool use_direct_type_ptr;
-    uintptr_t direct_type_ptr;
-    bool is_finalizable;
-    if (kEmbedClassInCode &&
-        driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr,
-                                   &direct_type_ptr, &is_finalizable)) {
-      // The fast path.
-      if (!use_direct_type_ptr) {
-        LoadClassType(*dex_file, type_idx, kArg0);
-        CallRuntimeHelperRegRegLocationMethod(kQuickAllocArrayResolved, TargetReg(kArg0, kNotWide),
-                                              rl_src, true);
-      } else {
-        // Use the direct pointer.
-        CallRuntimeHelperImmRegLocationMethod(kQuickAllocArrayResolved, direct_type_ptr, rl_src,
-                                              true);
-      }
-    } else {
-      // The slow path.
-      CallRuntimeHelperImmRegLocationMethod(kQuickAllocArray, type_idx, rl_src, true);
-    }
-  } else {
-    CallRuntimeHelperImmRegLocationMethod(kQuickAllocArrayWithAccessCheck, type_idx, rl_src, true);
-  }
-  StoreValue(rl_dest, GetReturn(kRefReg));
-}
-
-/*
- * Similar to GenNewArray, but with post-allocation initialization.
- * Verifier guarantees we're dealing with an array class.  Current
- * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
- * Current code also throws internal unimp if not 'L', '[' or 'I'.
- */
-void Mir2Lir::GenFilledNewArray(CallInfo* info) {
-  size_t elems = info->num_arg_words;
-  int type_idx = info->index;
-  FlushAllRegs();  /* Everything to home location */
-  QuickEntrypointEnum target;
-  if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
-                                                       type_idx)) {
-    target = kQuickCheckAndAllocArray;
-  } else {
-    target = kQuickCheckAndAllocArrayWithAccessCheck;
-  }
-  CallRuntimeHelperImmImmMethod(target, type_idx, elems, true);
-  FreeTemp(TargetReg(kArg2, kNotWide));
-  FreeTemp(TargetReg(kArg1, kNotWide));
-  /*
-   * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
-   * return region.  Because AllocFromCode placed the new array
-   * in kRet0, we'll just lock it into place.  When debugger support is
-   * added, it may be necessary to additionally copy all return
-   * values to a home location in thread-local storage
-   */
-  RegStorage ref_reg = TargetReg(kRet0, kRef);
-  LockTemp(ref_reg);
-
-  // TODO: use the correct component size, currently all supported types
-  // share array alignment with ints (see comment at head of function)
-  size_t component_size = sizeof(int32_t);
-
-  if (elems > 5) {
-    DCHECK(info->is_range);  // Non-range insn can't encode more than 5 elems.
-    /*
-     * Bit of ugliness here.  We're going generate a mem copy loop
-     * on the register range, but it is possible that some regs
-     * in the range have been promoted.  This is unlikely, but
-     * before generating the copy, we'll just force a flush
-     * of any regs in the source range that have been promoted to
-     * home location.
-     */
-    for (size_t i = 0; i < elems; i++) {
-      RegLocation loc = UpdateLoc(info->args[i]);
-      if (loc.location == kLocPhysReg) {
-        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-        if (loc.ref) {
-          StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
-        } else {
-          Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
-        }
-      }
-    }
-    /*
-     * TUNING note: generated code here could be much improved, but
-     * this is an uncommon operation and isn't especially performance
-     * critical.
-     */
-    // This is addressing the stack, which may be out of the 4G area.
-    RegStorage r_src = AllocTempRef();
-    RegStorage r_dst = AllocTempRef();
-    RegStorage r_idx = AllocTempRef();  // Not really a reference, but match src/dst.
-    RegStorage r_val;
-    switch (cu_->instruction_set) {
-      case kThumb2:
-      case kArm64:
-        r_val = TargetReg(kLr, kNotWide);
-        break;
-      case kX86:
-      case kX86_64:
-        FreeTemp(ref_reg);
-        r_val = AllocTemp();
-        break;
-      case kMips:
-      case kMips64:
-        r_val = AllocTemp();
-        break;
-      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
-    }
-    // Set up source pointer
-    RegLocation rl_first = info->args[0];
-    OpRegRegImm(kOpAdd, r_src, TargetPtrReg(kSp), SRegOffset(rl_first.s_reg_low));
-    // Set up the target pointer
-    OpRegRegImm(kOpAdd, r_dst, ref_reg,
-                mirror::Array::DataOffset(component_size).Int32Value());
-    // Set up the loop counter (known to be > 0)
-    LoadConstant(r_idx, static_cast<int>(elems - 1));
-    // Generate the copy loop.  Going backwards for convenience
-    LIR* loop_head_target = NewLIR0(kPseudoTargetLabel);
-    // Copy next element
-    {
-      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-      LoadBaseIndexed(r_src, r_idx, r_val, 2, k32);
-      // NOTE: No dalvik register annotation, local optimizations will be stopped
-      // by the loop boundaries.
-    }
-    StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32);
-    FreeTemp(r_val);
-    OpDecAndBranch(kCondGe, r_idx, loop_head_target);
-    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-      // Restore the target pointer
-      OpRegRegImm(kOpAdd, ref_reg, r_dst,
-                  -mirror::Array::DataOffset(component_size).Int32Value());
-    }
-    FreeTemp(r_idx);
-    FreeTemp(r_dst);
-    FreeTemp(r_src);
-  } else {
-    DCHECK_LE(elems, 5u);  // Usually but not necessarily non-range.
-    // TUNING: interleave
-    for (size_t i = 0; i < elems; i++) {
-      RegLocation rl_arg;
-      if (info->args[i].ref) {
-        rl_arg = LoadValue(info->args[i], kRefReg);
-        StoreRefDisp(ref_reg,
-                    mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg,
-                    kNotVolatile);
-      } else {
-        rl_arg = LoadValue(info->args[i], kCoreReg);
-        Store32Disp(ref_reg,
-                    mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
-      }
-      // If the LoadValue caused a temp to be allocated, free it
-      if (IsTemp(rl_arg.reg)) {
-        FreeTemp(rl_arg.reg);
-      }
-    }
-  }
-  if (elems != 0 && info->args[0].ref) {
-    // If there is at least one potentially non-null value, unconditionally mark the GC card.
-    for (size_t i = 0; i < elems; i++) {
-      if (!mir_graph_->IsConstantNullRef(info->args[i])) {
-        UnconditionallyMarkGCCard(ref_reg);
-        break;
-      }
-    }
-  }
-  if (info->result.location != kLocInvalid) {
-    StoreValue(info->result, GetReturn(kRefReg));
-  }
-}
-
-/*
- * Array data table format:
- *  ushort ident = 0x0300   magic value
- *  ushort width            width of each element in the table
- *  uint   size             number of elements in the table
- *  ubyte  data[size*width] table of data values (may contain a single-byte
- *                          padding at the end)
- *
- * Total size is 4+(width * size + 1)/2 16-bit code units.
- */
-void Mir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  if (kIsDebugBuild) {
-    const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-    const Instruction::ArrayDataPayload* payload =
-        reinterpret_cast<const Instruction::ArrayDataPayload*>(table);
-    CHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
-  }
-  uint32_t table_offset_from_start = mir->offset + static_cast<int32_t>(table_offset);
-  CallRuntimeHelperImmRegLocation(kQuickHandleFillArrayData, table_offset_from_start, rl_src, true);
-}
-
-void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, OpSize size) {
-  const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
-  DCHECK_EQ(SPutMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
-  cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
-  if (!ForceSlowFieldPath(cu_) && field_info.FastPut()) {
-    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
-    RegStorage r_base;
-    if (field_info.IsReferrersClass()) {
-      // Fast path, static storage base is this method's class
-      r_base = AllocTempRef();
-      RegStorage r_method = LoadCurrMethodWithHint(r_base);
-      LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
-                  kNotVolatile);
-    } else {
-      // Medium path, static storage base in a different class which requires checks that the other
-      // class is initialized.
-      r_base = GenGetOtherTypeForSgetSput(field_info, mir->optimization_flags);
-      if (!field_info.IsClassInitialized() &&
-          (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
-        // Ensure load of status and store of value don't re-order.
-        // TODO: Presumably the actual value store is control-dependent on the status load,
-        // and will thus not be reordered in any case, since stores are never speculated.
-        // Does later code "know" that the class is now initialized?  If so, we still
-        // need the barrier to guard later static loads.
-        GenMemBarrier(kLoadAny);
-      }
-    }
-    // rBase now holds static storage base
-    RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
-    if (IsWide(size)) {
-      rl_src = LoadValueWide(rl_src, reg_class);
-    } else {
-      rl_src = LoadValue(rl_src, reg_class);
-    }
-    if (IsRef(size)) {
-      StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg,
-                   field_info.IsVolatile() ? kVolatile : kNotVolatile);
-    } else {
-      StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, size,
-                    field_info.IsVolatile() ? kVolatile : kNotVolatile);
-    }
-    if (IsRef(size) && !mir_graph_->IsConstantNullRef(rl_src)) {
-      MarkGCCard(mir->optimization_flags, rl_src.reg, r_base);
-    }
-    FreeTemp(r_base);
-  } else {
-    FlushAllRegs();  // Everything to home locations
-    QuickEntrypointEnum target;
-    switch (size) {
-      case kReference:
-        target = kQuickSetObjStatic;
-        break;
-      case k64:
-      case kDouble:
-        target = kQuickSet64Static;
-        break;
-      case k32:
-      case kSingle:
-        target = kQuickSet32Static;
-        break;
-      case kSignedHalf:
-      case kUnsignedHalf:
-        target = kQuickSet16Static;
-        break;
-      case kSignedByte:
-      case kUnsignedByte:
-        target = kQuickSet8Static;
-        break;
-      case kWord:  // Intentional fallthrough.
-      default:
-        LOG(FATAL) << "Can't determine entrypoint for: " << size;
-        target = kQuickSet32Static;
-    }
-    CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_src, true);
-  }
-}
-
-void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Type type) {
-  const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
-  DCHECK_EQ(SGetMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
-  cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
-
-  if (!ForceSlowFieldPath(cu_) && field_info.FastGet()) {
-    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
-    RegStorage r_base;
-    if (field_info.IsReferrersClass()) {
-      // Fast path, static storage base is this method's class
-      r_base = AllocTempRef();
-      RegStorage r_method = LoadCurrMethodWithHint(r_base);
-      LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), r_base,
-                  kNotVolatile);
-    } else {
-      // Medium path, static storage base in a different class which requires checks that the other
-      // class is initialized
-      r_base = GenGetOtherTypeForSgetSput(field_info, mir->optimization_flags);
-      if (!field_info.IsClassInitialized() &&
-          (mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0) {
-        // Ensure load of status and load of value don't re-order.
-        GenMemBarrier(kLoadAny);
-      }
-    }
-    // r_base now holds static storage base
-    RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
-    RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
-
-    int field_offset = field_info.FieldOffset().Int32Value();
-    if (IsRef(size)) {
-      // TODO: DCHECK?
-      LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile :
-          kNotVolatile);
-    } else {
-      LoadBaseDisp(r_base, field_offset, rl_result.reg, size, field_info.IsVolatile() ?
-          kVolatile : kNotVolatile);
-    }
-    FreeTemp(r_base);
-
-    if (IsWide(size)) {
-      StoreValueWide(rl_dest, rl_result);
-    } else {
-      StoreValue(rl_dest, rl_result);
-    }
-  } else {
-    DCHECK(SizeMatchesTypeForEntrypoint(size, type));
-    FlushAllRegs();  // Everything to home locations
-    QuickEntrypointEnum target;
-    switch (type) {
-      case Primitive::kPrimNot:
-        target = kQuickGetObjStatic;
-        break;
-      case Primitive::kPrimLong:
-      case Primitive::kPrimDouble:
-        target = kQuickGet64Static;
-        break;
-      case Primitive::kPrimInt:
-      case Primitive::kPrimFloat:
-        target = kQuickGet32Static;
-        break;
-      case Primitive::kPrimShort:
-        target = kQuickGetShortStatic;
-        break;
-      case Primitive::kPrimChar:
-        target = kQuickGetCharStatic;
-        break;
-      case Primitive::kPrimByte:
-        target = kQuickGetByteStatic;
-        break;
-      case Primitive::kPrimBoolean:
-        target = kQuickGetBooleanStatic;
-        break;
-      case Primitive::kPrimVoid:  // Intentional fallthrough.
-      default:
-        LOG(FATAL) << "Can't determine entrypoint for: " << type;
-        target = kQuickGet32Static;
-    }
-    CallRuntimeHelperImm(target, field_info.FieldIndex(), true);
-
-    // FIXME: pGetXXStatic always return an int or int64 regardless of rl_dest.fp.
-    if (IsWide(size)) {
-      RegLocation rl_result = GetReturnWide(kCoreReg);
-      StoreValueWide(rl_dest, rl_result);
-    } else {
-      RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg);
-      StoreValue(rl_dest, rl_result);
-    }
-  }
-}
-
-// Generate code for all slow paths.
-void Mir2Lir::HandleSlowPaths() {
-  // We should check slow_paths_.Size() every time, because a new slow path
-  // may be created during slowpath->Compile().
-  for (LIRSlowPath* slowpath : slow_paths_) {
-    slowpath->Compile();
-  }
-  slow_paths_.clear();
-}
-
-void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
-                      RegLocation rl_dest, RegLocation rl_obj) {
-  const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
-  if (kIsDebugBuild) {
-    auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
-        IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
-        IGetMemAccessType(mir->dalvikInsn.opcode);
-    DCHECK_EQ(mem_access_type, field_info.MemAccessType()) << mir->dalvikInsn.opcode;
-  }
-  cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
-  if (!ForceSlowFieldPath(cu_) && field_info.FastGet()) {
-    RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
-    // A load of the class will lead to an iget with offset 0.
-    DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
-    rl_obj = LoadValue(rl_obj, kRefReg);
-    GenNullCheck(rl_obj.reg, opt_flags);
-    RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
-    int field_offset = field_info.FieldOffset().Int32Value();
-    LIR* load_lir;
-    if (IsRef(size)) {
-      load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ?
-          kVolatile : kNotVolatile);
-    } else {
-      load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, size,
-                              field_info.IsVolatile() ? kVolatile : kNotVolatile);
-    }
-    MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir);
-    if (IsWide(size)) {
-      StoreValueWide(rl_dest, rl_result);
-    } else {
-      StoreValue(rl_dest, rl_result);
-    }
-  } else {
-    DCHECK(SizeMatchesTypeForEntrypoint(size, type));
-    QuickEntrypointEnum target;
-    switch (type) {
-      case Primitive::kPrimNot:
-        target = kQuickGetObjInstance;
-        break;
-      case Primitive::kPrimLong:
-      case Primitive::kPrimDouble:
-        target = kQuickGet64Instance;
-        break;
-      case Primitive::kPrimFloat:
-      case Primitive::kPrimInt:
-        target = kQuickGet32Instance;
-        break;
-      case Primitive::kPrimShort:
-        target = kQuickGetShortInstance;
-        break;
-      case Primitive::kPrimChar:
-        target = kQuickGetCharInstance;
-        break;
-      case Primitive::kPrimByte:
-        target = kQuickGetByteInstance;
-        break;
-      case Primitive::kPrimBoolean:
-        target = kQuickGetBooleanInstance;
-        break;
-      case Primitive::kPrimVoid:  // Intentional fallthrough.
-      default:
-        LOG(FATAL) << "Can't determine entrypoint for: " << type;
-        target = kQuickGet32Instance;
-    }
-    // Second argument of pGetXXInstance is always a reference.
-    DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U);
-    CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_obj, true);
-
-    // FIXME: pGetXXInstance always return an int or int64 regardless of rl_dest.fp.
-    if (IsWide(size)) {
-      RegLocation rl_result = GetReturnWide(kCoreReg);
-      StoreValueWide(rl_dest, rl_result);
-    } else {
-      RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg);
-      StoreValue(rl_dest, rl_result);
-    }
-  }
-}
-
-void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
-                      RegLocation rl_src, RegLocation rl_obj) {
-  const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
-  if (kIsDebugBuild) {
-    auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
-        IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
-        IPutMemAccessType(mir->dalvikInsn.opcode);
-    DCHECK_EQ(mem_access_type, field_info.MemAccessType());
-  }
-  cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
-  if (!ForceSlowFieldPath(cu_) && field_info.FastPut()) {
-    RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
-    // Dex code never writes to the class field.
-    DCHECK_GE(static_cast<uint32_t>(field_info.FieldOffset().Int32Value()),
-              sizeof(mirror::HeapReference<mirror::Class>));
-    rl_obj = LoadValue(rl_obj, kRefReg);
-    if (IsWide(size)) {
-      rl_src = LoadValueWide(rl_src, reg_class);
-    } else {
-      rl_src = LoadValue(rl_src, reg_class);
-    }
-    GenNullCheck(rl_obj.reg, opt_flags);
-    int field_offset = field_info.FieldOffset().Int32Value();
-    LIR* null_ck_insn;
-    if (IsRef(size)) {
-      null_ck_insn = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ?
-          kVolatile : kNotVolatile);
-    } else {
-      null_ck_insn = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, size,
-                                   field_info.IsVolatile() ? kVolatile : kNotVolatile);
-    }
-    MarkPossibleNullPointerExceptionAfter(opt_flags, null_ck_insn);
-    if (IsRef(size) && !mir_graph_->IsConstantNullRef(rl_src)) {
-      MarkGCCard(opt_flags, rl_src.reg, rl_obj.reg);
-    }
-  } else {
-    QuickEntrypointEnum target;
-    switch (size) {
-      case kReference:
-        target = kQuickSetObjInstance;
-        break;
-      case k64:
-      case kDouble:
-        target = kQuickSet64Instance;
-        break;
-      case k32:
-      case kSingle:
-        target = kQuickSet32Instance;
-        break;
-      case kSignedHalf:
-      case kUnsignedHalf:
-        target = kQuickSet16Instance;
-        break;
-      case kSignedByte:
-      case kUnsignedByte:
-        target = kQuickSet8Instance;
-        break;
-      case kWord:  // Intentional fallthrough.
-      default:
-        LOG(FATAL) << "Can't determine entrypoint for: " << size;
-        target = kQuickSet32Instance;
-    }
-    CallRuntimeHelperImmRegLocationRegLocation(target, field_info.FieldIndex(), rl_obj, rl_src,
-                                               true);
-  }
-}
-
-void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
-                             RegLocation rl_src) {
-  bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK);
-  bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) &&
-      (opt_flags & MIR_IGNORE_NULL_CHECK));
-  QuickEntrypointEnum target = needs_range_check
-        ? (needs_null_check ? kQuickAputObjectWithNullAndBoundCheck
-                            : kQuickAputObjectWithBoundCheck)
-        : kQuickAputObject;
-  CallRuntimeHelperRegLocationRegLocationRegLocation(target, rl_array, rl_index, rl_src, true);
-}
-
-void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
-  RegLocation rl_result;
-  if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
-                                                        *cu_->dex_file,
-                                                        type_idx)) {
-    // Call out to helper which resolves type and verifies access.
-    // Resolved type returned in kRet0.
-    CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
-    rl_result = GetReturn(kRefReg);
-  } else {
-    rl_result = EvalLoc(rl_dest, kRefReg, true);
-    // We don't need access checks, load type from dex cache
-    LoadTypeFromCache(type_idx, rl_result.reg);
-    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
-        type_idx) || ForceSlowTypePath(cu_)) {
-      // Slow path, at runtime test if type is null and if so initialize
-      FlushAllRegs();
-      GenIfNullUseHelperImm(rl_result.reg, kQuickInitializeType, type_idx);
-    }
-  }
-  StoreValue(rl_dest, rl_result);
-}
-
-void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
-  /* NOTE: Most strings should be available at compile time */
-  int32_t offset_of_string = GetCacheOffset(string_idx);
-  if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
-      *cu_->dex_file, string_idx) || ForceSlowStringPath(cu_)) {
-    // slow path, resolve string if not in dex cache
-    FlushAllRegs();
-    LockCallTemps();  // Using explicit registers
-
-    // Might call out to helper, which will return resolved string in kRet0
-    RegStorage ret0 = TargetReg(kRet0, kRef);
-    if (CanUseOpPcRelDexCacheArrayLoad()) {
-      size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx);
-      OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, ret0, false);
-    } else {
-      // Method to declaring class.
-      RegStorage arg0 = TargetReg(kArg0, kRef);
-      RegStorage r_method = LoadCurrMethodWithHint(arg0);
-      LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), arg0, kNotVolatile);
-      // Declaring class to dex cache strings.
-      LoadBaseDisp(arg0, mirror::Class::DexCacheStringsOffset().Int32Value(), arg0,
-                   cu_->target64 ? k64 : k32, kNotVolatile);
-
-      LoadRefDisp(arg0, offset_of_string, ret0, kNotVolatile);
-    }
-    GenIfNullUseHelperImm(ret0, kQuickResolveString, string_idx);
-
-    GenBarrier();
-    StoreValue(rl_dest, GetReturn(kRefReg));
-  } else {
-    RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
-    if (CanUseOpPcRelDexCacheArrayLoad()) {
-      size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx);
-      OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg, false);
-    } else {
-      RegLocation rl_method = LoadCurrMethod();
-      RegStorage res_reg = AllocTempRef();
-      LoadRefDisp(rl_method.reg, ArtMethod::DeclaringClassOffset().Int32Value(), res_reg,
-                  kNotVolatile);
-      LoadBaseDisp(res_reg, mirror::Class::DexCacheStringsOffset().Int32Value(), res_reg,
-                   cu_->target64 ? k64 : k32, kNotVolatile);
-      LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile);
-      FreeTemp(res_reg);
-    }
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-/*
- * Let helper function take care of everything.  Will
- * call Class::NewInstanceFromCode(type_idx, method);
- */
-void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
-  FlushAllRegs();  /* Everything to home location */
-  // alloc will always check for resolution, do we also need to verify
-  // access because the verifier was unable to?
-  const DexFile* dex_file = cu_->dex_file;
-  CompilerDriver* driver = cu_->compiler_driver;
-  bool finalizable;
-  if (driver->CanAccessInstantiableTypeWithoutChecks(cu_->method_idx,
-                                                     *dex_file,
-                                                     type_idx,
-                                                     &finalizable)) {
-    bool is_type_initialized;
-    bool use_direct_type_ptr;
-    uintptr_t direct_type_ptr;
-    bool is_finalizable;
-    if (kEmbedClassInCode &&
-        driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr,
-                                   &direct_type_ptr, &is_finalizable) &&
-                                   !is_finalizable) {
-      // The fast path.
-      if (!use_direct_type_ptr) {
-        LoadClassType(*dex_file, type_idx, kArg0);
-        if (!is_type_initialized) {
-          CallRuntimeHelperRegMethod(kQuickAllocObjectResolved, TargetReg(kArg0, kRef), true);
-        } else {
-          CallRuntimeHelperRegMethod(kQuickAllocObjectInitialized, TargetReg(kArg0, kRef), true);
-        }
-      } else {
-        // Use the direct pointer.
-        if (!is_type_initialized) {
-          CallRuntimeHelperImmMethod(kQuickAllocObjectResolved, direct_type_ptr, true);
-        } else {
-          CallRuntimeHelperImmMethod(kQuickAllocObjectInitialized, direct_type_ptr, true);
-        }
-      }
-    } else {
-      // The slow path.
-      CallRuntimeHelperImmMethod(kQuickAllocObject, type_idx, true);
-    }
-  } else {
-    CallRuntimeHelperImmMethod(kQuickAllocObjectWithAccessCheck, type_idx, true);
-  }
-  StoreValue(rl_dest, GetReturn(kRefReg));
-}
-
-void Mir2Lir::GenThrow(RegLocation rl_src) {
-  FlushAllRegs();
-  CallRuntimeHelperRegLocation(kQuickDeliverException, rl_src, true);
-}
-
-// For final classes there are no sub-classes to check and so we can answer the instance-of
-// question with simple comparisons.
-void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
-                                 RegLocation rl_src) {
-  // X86 has its own implementation.
-  DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64);
-
-  RegLocation object = LoadValue(rl_src, kRefReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegStorage result_reg = rl_result.reg;
-  if (IsSameReg(result_reg, object.reg)) {
-    result_reg = AllocTypedTemp(false, kCoreReg);
-    DCHECK(!IsSameReg(result_reg, object.reg));
-  }
-  LoadConstant(result_reg, 0);     // assume false
-  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
-
-  RegStorage check_class = AllocTypedTemp(false, kRefReg);
-  RegStorage object_class = AllocTypedTemp(false, kRefReg);
-
-  if (use_declaring_class) {
-    RegStorage r_method = LoadCurrMethodWithHint(check_class);
-    LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(), check_class,
-                kNotVolatile);
-    LoadRefDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class,
-                kNotVolatile);
-  } else {
-    LoadTypeFromCache(type_idx, check_class);
-    LoadRefDisp(object.reg,  mirror::Object::ClassOffset().Int32Value(), object_class,
-                kNotVolatile);
-  }
-
-  // FIXME: what should we be comparing here? compressed or decompressed references?
-  if (cu_->instruction_set == kThumb2) {
-    OpRegReg(kOpCmp, check_class, object_class);  // Same?
-    LIR* it = OpIT(kCondEq, "");   // if-convert the test
-    LoadConstant(result_reg, 1);     // .eq case - load true
-    OpEndIT(it);
-  } else {
-    GenSelectConst32(check_class, object_class, kCondEq, 1, 0, result_reg, kCoreReg);
-  }
-  LIR* target = NewLIR0(kPseudoTargetLabel);
-  null_branchover->target = target;
-  FreeTemp(object_class);
-  FreeTemp(check_class);
-  if (IsTemp(result_reg)) {
-    OpRegCopy(rl_result.reg, result_reg);
-    FreeTemp(result_reg);
-  }
-  StoreValue(rl_dest, rl_result);
-}
-
-void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
-                                         bool type_known_abstract, bool use_declaring_class,
-                                         bool can_assume_type_is_in_dex_cache,
-                                         uint32_t type_idx, RegLocation rl_dest,
-                                         RegLocation rl_src) {
-  FlushAllRegs();
-  // May generate a call - use explicit registers
-  LockCallTemps();
-  RegStorage class_reg = TargetReg(kArg2, kRef);  // kArg2 will hold the Class*
-  RegStorage ref_reg = TargetReg(kArg0, kRef);  // kArg0 will hold the ref.
-  RegStorage ret_reg = GetReturn(kRefReg).reg;
-  if (needs_access_check) {
-    // Check we have access to type_idx and if not throw IllegalAccessError,
-    // returns Class* in kArg0
-    CallRuntimeHelperImmMethod(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
-    OpRegCopy(class_reg, ret_reg);  // Align usage with fast path
-    LoadValueDirectFixed(rl_src, ref_reg);  // kArg0 <= ref
-  } else if (use_declaring_class) {
-    RegStorage r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef));
-    LoadValueDirectFixed(rl_src, ref_reg);  // kArg0 <= ref
-    LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(),
-                class_reg, kNotVolatile);
-  } else {
-    if (can_assume_type_is_in_dex_cache) {
-      // Conditionally, as in the other case we will also load it.
-      LoadValueDirectFixed(rl_src, ref_reg);  // kArg0 <= ref
-    }
-
-    // Load dex cache entry into class_reg (kArg2)
-    LoadTypeFromCache(type_idx, class_reg);
-    if (!can_assume_type_is_in_dex_cache) {
-      GenIfNullUseHelperImm(class_reg, kQuickInitializeType, type_idx);
-
-      // Should load value here.
-      LoadValueDirectFixed(rl_src, ref_reg);  // kArg0 <= ref
-    }
-  }
-  /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
-  RegLocation rl_result = GetReturn(kCoreReg);
-  if (!IsSameReg(rl_result.reg, ref_reg)) {
-    // On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
-    LoadConstant(rl_result.reg, 0);
-  }
-  LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, nullptr);
-
-  /* load object->klass_ */
-  RegStorage ref_class_reg = TargetReg(kArg1, kRef);  // kArg1 will hold the Class* of ref.
-  DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
-  LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
-              ref_class_reg, kNotVolatile);
-  /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
-  LIR* branchover = nullptr;
-  if (type_known_final) {
-    // rl_result == ref == class.
-    GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
-                     kCoreReg);
-  } else {
-    if (cu_->instruction_set == kThumb2) {
-      RegStorage r_tgt = LoadHelper(kQuickInstanceofNonTrivial);
-      LIR* it = nullptr;
-      if (!type_known_abstract) {
-      /* Uses conditional nullification */
-        OpRegReg(kOpCmp, ref_class_reg, class_reg);  // Same?
-        it = OpIT(kCondEq, "EE");   // if-convert the test
-        LoadConstant(rl_result.reg, 1);     // .eq case - load true
-      }
-      OpRegCopy(ref_reg, class_reg);    // .ne case - arg0 <= class
-      OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
-      if (it != nullptr) {
-        OpEndIT(it);
-      }
-      FreeTemp(r_tgt);
-    } else {
-      if (!type_known_abstract) {
-        /* Uses branchovers */
-        LoadConstant(rl_result.reg, 1);     // assume true
-        branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), nullptr);
-      }
-
-      OpRegCopy(TargetReg(kArg0, kRef), class_reg);    // .ne case - arg0 <= class
-      CallRuntimeHelper(kQuickInstanceofNonTrivial, false);
-    }
-  }
-  // TODO: only clobber when type isn't final?
-  ClobberCallerSave();
-  /* branch targets here */
-  LIR* target = NewLIR0(kPseudoTargetLabel);
-  StoreValue(rl_dest, rl_result);
-  branch1->target = target;
-  if (branchover != nullptr) {
-    branchover->target = target;
-  }
-}
-
-void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
-  bool type_known_final, type_known_abstract, use_declaring_class;
-  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
-                                                                              *cu_->dex_file,
-                                                                              type_idx,
-                                                                              &type_known_final,
-                                                                              &type_known_abstract,
-                                                                              &use_declaring_class);
-  bool can_assume_type_is_in_dex_cache = !needs_access_check &&
-      cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
-
-  if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
-    GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
-  } else {
-    GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
-                               use_declaring_class, can_assume_type_is_in_dex_cache,
-                               type_idx, rl_dest, rl_src);
-  }
-}
-
-void Mir2Lir::GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx,
-                           RegLocation rl_src) {
-  if ((opt_flags & MIR_IGNORE_CHECK_CAST) != 0) {
-    // Compiler analysis proved that this check-cast would never cause an exception.
-    return;
-  }
-  bool type_known_final, type_known_abstract, use_declaring_class;
-  bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
-                                                                              *cu_->dex_file,
-                                                                              type_idx,
-                                                                              &type_known_final,
-                                                                              &type_known_abstract,
-                                                                              &use_declaring_class);
-  // Note: currently type_known_final is unused, as optimizing will only improve the performance
-  // of the exception throw path.
-  DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
-  if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) {
-    // Verifier type analysis proved this check cast would never cause an exception.
-    return;
-  }
-  FlushAllRegs();
-  // May generate a call - use explicit registers
-  LockCallTemps();
-  RegStorage class_reg = TargetReg(kArg2, kRef);  // kArg2 will hold the Class*
-  if (needs_access_check) {
-    // Check we have access to type_idx and if not throw IllegalAccessError,
-    // returns Class* in kRet0
-    // InitializeTypeAndVerifyAccess(idx, method)
-    CallRuntimeHelperImmMethod(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
-    OpRegCopy(class_reg, TargetReg(kRet0, kRef));  // Align usage with fast path
-  } else if (use_declaring_class) {
-    RegStorage method_reg = LoadCurrMethodWithHint(TargetReg(kArg1, kRef));
-    LoadRefDisp(method_reg, ArtMethod::DeclaringClassOffset().Int32Value(),
-                class_reg, kNotVolatile);
-  } else {
-    // Load dex cache entry into class_reg (kArg2)
-    LoadTypeFromCache(type_idx, class_reg);
-    if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
-      // Need to test presence of type in dex cache at runtime
-      GenIfNullUseHelperImm(class_reg, kQuickInitializeType, type_idx);
-    }
-  }
-  // At this point, class_reg (kArg2) has class
-  LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef));  // kArg0 <= ref
-
-  // Slow path for the case where the classes are not equal.  In this case we need
-  // to call a helper function to do the check.
-  class SlowPath : public LIRSlowPath {
-   public:
-    SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load)
-        : LIRSlowPath(m2l, fromfast, cont), load_(load) {
-    }
-
-    void Compile() {
-      GenerateTargetLabel();
-
-      if (load_) {
-        m2l_->LoadRefDisp(m2l_->TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(),
-                          m2l_->TargetReg(kArg1, kRef), kNotVolatile);
-      }
-      m2l_->CallRuntimeHelperRegReg(kQuickCheckCast, m2l_->TargetReg(kArg2, kRef),
-                                    m2l_->TargetReg(kArg1, kRef), true);
-      m2l_->OpUnconditionalBranch(cont_);
-    }
-
-   private:
-    const bool load_;
-  };
-
-  if (type_known_abstract) {
-    // Easier case, run slow path if target is non-null (slow path will load from target)
-    LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0, kRef), 0, nullptr);
-    LIR* cont = NewLIR0(kPseudoTargetLabel);
-    AddSlowPath(new (arena_) SlowPath(this, branch, cont, true));
-  } else {
-    // Harder, more common case.  We need to generate a forward branch over the load
-    // if the target is null.  If it's non-null we perform the load and branch to the
-    // slow path if the classes are not equal.
-
-    /* Null is OK - continue */
-    LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0, kRef), 0, nullptr);
-    /* load object->klass_ */
-    DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
-    LoadRefDisp(TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(),
-                TargetReg(kArg1, kRef), kNotVolatile);
-
-    LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1, kRef), class_reg, nullptr);
-    LIR* cont = NewLIR0(kPseudoTargetLabel);
-
-    // Add the slow path that will not perform load since this is already done.
-    AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false));
-
-    // Set the null check to branch to the continuation.
-    branch1->target = cont;
-  }
-}
-
-void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_src2) {
-  RegLocation rl_result;
-  if (cu_->instruction_set == kThumb2) {
-    /*
-     * NOTE:  This is the one place in the code in which we might have
-     * as many as six live temporary registers.  There are 5 in the normal
-     * set for Arm.  Until we have spill capabilities, temporarily add
-     * lr to the temp set.  It is safe to do this locally, but note that
-     * lr is used explicitly elsewhere in the code generator and cannot
-     * normally be used as a general temp register.
-     */
-    MarkTemp(TargetReg(kLr, kNotWide));   // Add lr to the temp pool
-    FreeTemp(TargetReg(kLr, kNotWide));   // and make it available
-  }
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  // The longs may overlap - use intermediate temp if so
-  if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) {
-    RegStorage t_reg = AllocTemp();
-    OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
-    OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
-    OpRegCopy(rl_result.reg.GetLow(), t_reg);
-    FreeTemp(t_reg);
-  } else {
-    OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
-    OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
-  }
-  /*
-   * NOTE: If rl_dest refers to a frame variable in a large frame, the
-   * following StoreValueWide might need to allocate a temp register.
-   * To further work around the lack of a spill capability, explicitly
-   * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
-   * Remove when spill is functional.
-   */
-  FreeRegLocTemps(rl_result, rl_src1);
-  FreeRegLocTemps(rl_result, rl_src2);
-  StoreValueWide(rl_dest, rl_result);
-  if (cu_->instruction_set == kThumb2) {
-    Clobber(TargetReg(kLr, kNotWide));
-    UnmarkTemp(TargetReg(kLr, kNotWide));  // Remove lr from the temp pool
-  }
-}
-
-void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_shift) {
-  QuickEntrypointEnum target;
-  switch (opcode) {
-    case Instruction::SHL_LONG:
-    case Instruction::SHL_LONG_2ADDR:
-      target = kQuickShlLong;
-      break;
-    case Instruction::SHR_LONG:
-    case Instruction::SHR_LONG_2ADDR:
-      target = kQuickShrLong;
-      break;
-    case Instruction::USHR_LONG:
-    case Instruction::USHR_LONG_2ADDR:
-      target = kQuickUshrLong;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected case";
-      target = kQuickShlLong;
-  }
-  FlushAllRegs();   /* Send everything to home location */
-  CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_shift, false);
-  RegLocation rl_result = GetReturnWide(kCoreReg);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-
-void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2, int flags) {
-  DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64);
-  OpKind op = kOpBkpt;
-  bool is_div_rem = false;
-  bool check_zero = false;
-  bool unary = false;
-  RegLocation rl_result;
-  bool shift_op = false;
-  switch (opcode) {
-    case Instruction::NEG_INT:
-      op = kOpNeg;
-      unary = true;
-      break;
-    case Instruction::NOT_INT:
-      op = kOpMvn;
-      unary = true;
-      break;
-    case Instruction::ADD_INT:
-    case Instruction::ADD_INT_2ADDR:
-      op = kOpAdd;
-      break;
-    case Instruction::SUB_INT:
-    case Instruction::SUB_INT_2ADDR:
-      op = kOpSub;
-      break;
-    case Instruction::MUL_INT:
-    case Instruction::MUL_INT_2ADDR:
-      op = kOpMul;
-      break;
-    case Instruction::DIV_INT:
-    case Instruction::DIV_INT_2ADDR:
-      check_zero = true;
-      op = kOpDiv;
-      is_div_rem = true;
-      break;
-    /* NOTE: returns in kArg1 */
-    case Instruction::REM_INT:
-    case Instruction::REM_INT_2ADDR:
-      check_zero = true;
-      op = kOpRem;
-      is_div_rem = true;
-      break;
-    case Instruction::AND_INT:
-    case Instruction::AND_INT_2ADDR:
-      op = kOpAnd;
-      break;
-    case Instruction::OR_INT:
-    case Instruction::OR_INT_2ADDR:
-      op = kOpOr;
-      break;
-    case Instruction::XOR_INT:
-    case Instruction::XOR_INT_2ADDR:
-      op = kOpXor;
-      break;
-    case Instruction::SHL_INT:
-    case Instruction::SHL_INT_2ADDR:
-      shift_op = true;
-      op = kOpLsl;
-      break;
-    case Instruction::SHR_INT:
-    case Instruction::SHR_INT_2ADDR:
-      shift_op = true;
-      op = kOpAsr;
-      break;
-    case Instruction::USHR_INT:
-    case Instruction::USHR_INT_2ADDR:
-      shift_op = true;
-      op = kOpLsr;
-      break;
-    default:
-      LOG(FATAL) << "Invalid word arith op: " << opcode;
-  }
-  if (!is_div_rem) {
-    if (unary) {
-      rl_src1 = LoadValue(rl_src1, kCoreReg);
-      rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      OpRegReg(op, rl_result.reg, rl_src1.reg);
-    } else {
-      if ((shift_op) && (cu_->instruction_set != kArm64)) {
-        rl_src2 = LoadValue(rl_src2, kCoreReg);
-        RegStorage t_reg = AllocTemp();
-        OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31);
-        rl_src1 = LoadValue(rl_src1, kCoreReg);
-        rl_result = EvalLoc(rl_dest, kCoreReg, true);
-        OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg);
-        FreeTemp(t_reg);
-      } else {
-        rl_src1 = LoadValue(rl_src1, kCoreReg);
-        rl_src2 = LoadValue(rl_src2, kCoreReg);
-        rl_result = EvalLoc(rl_dest, kCoreReg, true);
-        OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
-      }
-    }
-    StoreValue(rl_dest, rl_result);
-  } else {
-    bool done = false;      // Set to true if we happen to find a way to use a real instruction.
-    if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64 ||
-        cu_->instruction_set == kArm64) {
-      rl_src1 = LoadValue(rl_src1, kCoreReg);
-      rl_src2 = LoadValue(rl_src2, kCoreReg);
-      if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
-        GenDivZeroCheck(rl_src2.reg);
-      }
-      rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
-      done = true;
-    } else if (cu_->instruction_set == kThumb2) {
-      if (cu_->compiler_driver->GetInstructionSetFeatures()->AsArmInstructionSetFeatures()->
-              HasDivideInstruction()) {
-        // Use ARM SDIV instruction for division.  For remainder we also need to
-        // calculate using a MUL and subtract.
-        rl_src1 = LoadValue(rl_src1, kCoreReg);
-        rl_src2 = LoadValue(rl_src2, kCoreReg);
-        if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
-          GenDivZeroCheck(rl_src2.reg);
-        }
-        rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
-        done = true;
-      }
-    }
-
-    // If we haven't already generated the code use the callout function.
-    if (!done) {
-      FlushAllRegs();   /* Send everything to home location */
-      LoadValueDirectFixed(rl_src2, TargetReg(kArg1, kNotWide));
-      RegStorage r_tgt = CallHelperSetup(kQuickIdivmod);
-      LoadValueDirectFixed(rl_src1, TargetReg(kArg0, kNotWide));
-      if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
-        GenDivZeroCheck(TargetReg(kArg1, kNotWide));
-      }
-      // NOTE: callout here is not a safepoint.
-      CallHelper(r_tgt, kQuickIdivmod, false /* not a safepoint */);
-      if (op == kOpDiv)
-        rl_result = GetReturn(kCoreReg);
-      else
-        rl_result = GetReturnAlt();
-    }
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-/*
- * The following are the first-level codegen routines that analyze the format
- * of each bytecode then either dispatch special purpose codegen routines
- * or produce corresponding Thumb instructions directly.
- */
-
-// Returns true if no more than two bits are set in 'x'.
-static bool IsPopCountLE2(unsigned int x) {
-  x &= x - 1;
-  return (x & (x - 1)) == 0;
-}
-
-// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
-// and store the result in 'rl_dest'.
-bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, bool is_div,
-                               RegLocation rl_src, RegLocation rl_dest, int lit) {
-  if ((lit < 2) || (!IsPowerOfTwo(lit))) {
-    return false;
-  }
-  int k = CTZ(lit);
-  if (k >= 30) {
-    // Avoid special cases.
-    return false;
-  }
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (is_div) {
-    RegStorage t_reg = AllocTemp();
-    if (lit == 2) {
-      // Division by 2 is by far the most common division by constant.
-      OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k);
-      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
-      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
-    } else {
-      OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
-      OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
-      OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg);
-      OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
-    }
-  } else {
-    RegStorage t_reg1 = AllocTemp();
-    RegStorage t_reg2 = AllocTemp();
-    if (lit == 2) {
-      OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k);
-      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
-      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
-      OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
-    } else {
-      OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31);
-      OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
-      OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg);
-      OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
-      OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1);
-    }
-  }
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
-// and store the result in 'rl_dest'.
-bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
-  if (lit < 0) {
-    return false;
-  }
-  if (lit == 0) {
-    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    LoadConstant(rl_result.reg, 0);
-    StoreValue(rl_dest, rl_result);
-    return true;
-  }
-  if (lit == 1) {
-    rl_src = LoadValue(rl_src, kCoreReg);
-    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    OpRegCopy(rl_result.reg, rl_src.reg);
-    StoreValue(rl_dest, rl_result);
-    return true;
-  }
-  // There is RegRegRegShift on Arm, so check for more special cases
-  if (cu_->instruction_set == kThumb2) {
-    return EasyMultiply(rl_src, rl_dest, lit);
-  }
-  // Can we simplify this multiplication?
-  bool power_of_two = false;
-  bool pop_count_le2 = false;
-  bool power_of_two_minus_one = false;
-  if (IsPowerOfTwo(lit)) {
-    power_of_two = true;
-  } else if (IsPopCountLE2(lit)) {
-    pop_count_le2 = true;
-  } else if (IsPowerOfTwo(lit + 1)) {
-    power_of_two_minus_one = true;
-  } else {
-    return false;
-  }
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (power_of_two) {
-    // Shift.
-    OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, CTZ(lit));
-  } else if (pop_count_le2) {
-    // Shift and add and shift.
-    int first_bit = CTZ(lit);
-    int second_bit = CTZ(lit ^ (1 << first_bit));
-    GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
-  } else {
-    // Reverse subtract: (src << (shift + 1)) - src.
-    DCHECK(power_of_two_minus_one);
-    // TUNING: rsb dst, src, src lsl#CTZ(lit + 1)
-    RegStorage t_reg = AllocTemp();
-    OpRegRegImm(kOpLsl, t_reg, rl_src.reg, CTZ(lit + 1));
-    OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg);
-  }
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-// Returns true if it generates instructions.
-bool Mir2Lir::HandleEasyFloatingPointDiv(RegLocation rl_dest, RegLocation rl_src1,
-                                         RegLocation rl_src2) {
-  if (!rl_src2.is_const ||
-      ((cu_->instruction_set != kThumb2) && (cu_->instruction_set != kArm64))) {
-    return false;
-  }
-
-  if (!rl_src2.wide) {
-    int32_t divisor = mir_graph_->ConstantValue(rl_src2);
-    if (CanDivideByReciprocalMultiplyFloat(divisor)) {
-      // Generate multiply by reciprocal instead of div.
-      float recip = 1.0f/bit_cast<float, int32_t>(divisor);
-      GenMultiplyByConstantFloat(rl_dest, rl_src1, bit_cast<int32_t, float>(recip));
-      return true;
-    }
-  } else {
-    int64_t divisor = mir_graph_->ConstantValueWide(rl_src2);
-    if (CanDivideByReciprocalMultiplyDouble(divisor)) {
-      // Generate multiply by reciprocal instead of div.
-      double recip = 1.0/bit_cast<double, int64_t>(divisor);
-      GenMultiplyByConstantDouble(rl_dest, rl_src1, bit_cast<int64_t, double>(recip));
-      return true;
-    }
-  }
-  return false;
-}
-
-void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
-                               int lit) {
-  RegLocation rl_result;
-  OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
-  int shift_op = false;
-  bool is_div = false;
-
-  switch (opcode) {
-    case Instruction::RSUB_INT_LIT8:
-    case Instruction::RSUB_INT: {
-      rl_src = LoadValue(rl_src, kCoreReg);
-      rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      if (cu_->instruction_set == kThumb2) {
-        OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit);
-      } else {
-        OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
-        OpRegImm(kOpAdd, rl_result.reg, lit);
-      }
-      StoreValue(rl_dest, rl_result);
-      return;
-    }
-
-    case Instruction::SUB_INT:
-    case Instruction::SUB_INT_2ADDR:
-      lit = -lit;
-      FALLTHROUGH_INTENDED;
-    case Instruction::ADD_INT:
-    case Instruction::ADD_INT_2ADDR:
-    case Instruction::ADD_INT_LIT8:
-    case Instruction::ADD_INT_LIT16:
-      op = kOpAdd;
-      break;
-    case Instruction::MUL_INT:
-    case Instruction::MUL_INT_2ADDR:
-    case Instruction::MUL_INT_LIT8:
-    case Instruction::MUL_INT_LIT16: {
-      if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
-        return;
-      }
-      op = kOpMul;
-      break;
-    }
-    case Instruction::AND_INT:
-    case Instruction::AND_INT_2ADDR:
-    case Instruction::AND_INT_LIT8:
-    case Instruction::AND_INT_LIT16:
-      op = kOpAnd;
-      break;
-    case Instruction::OR_INT:
-    case Instruction::OR_INT_2ADDR:
-    case Instruction::OR_INT_LIT8:
-    case Instruction::OR_INT_LIT16:
-      op = kOpOr;
-      break;
-    case Instruction::XOR_INT:
-    case Instruction::XOR_INT_2ADDR:
-    case Instruction::XOR_INT_LIT8:
-    case Instruction::XOR_INT_LIT16:
-      op = kOpXor;
-      break;
-    case Instruction::SHL_INT_LIT8:
-    case Instruction::SHL_INT:
-    case Instruction::SHL_INT_2ADDR:
-      lit &= 31;
-      shift_op = true;
-      op = kOpLsl;
-      break;
-    case Instruction::SHR_INT_LIT8:
-    case Instruction::SHR_INT:
-    case Instruction::SHR_INT_2ADDR:
-      lit &= 31;
-      shift_op = true;
-      op = kOpAsr;
-      break;
-    case Instruction::USHR_INT_LIT8:
-    case Instruction::USHR_INT:
-    case Instruction::USHR_INT_2ADDR:
-      lit &= 31;
-      shift_op = true;
-      op = kOpLsr;
-      break;
-
-    case Instruction::DIV_INT:
-    case Instruction::DIV_INT_2ADDR:
-    case Instruction::DIV_INT_LIT8:
-    case Instruction::DIV_INT_LIT16:
-    case Instruction::REM_INT:
-    case Instruction::REM_INT_2ADDR:
-    case Instruction::REM_INT_LIT8:
-    case Instruction::REM_INT_LIT16: {
-      if (lit == 0) {
-        GenDivZeroException();
-        return;
-      }
-      if ((opcode == Instruction::DIV_INT) ||
-          (opcode == Instruction::DIV_INT_2ADDR) ||
-          (opcode == Instruction::DIV_INT_LIT8) ||
-          (opcode == Instruction::DIV_INT_LIT16)) {
-        is_div = true;
-      } else {
-        is_div = false;
-      }
-      if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) {
-        return;
-      }
-
-      bool done = false;
-      if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64 ||
-          cu_->instruction_set == kArm64) {
-        rl_src = LoadValue(rl_src, kCoreReg);
-        rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
-        done = true;
-      } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-        rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
-        done = true;
-      } else if (cu_->instruction_set == kThumb2) {
-        if (cu_->compiler_driver->GetInstructionSetFeatures()->AsArmInstructionSetFeatures()->
-                HasDivideInstruction()) {
-          // Use ARM SDIV instruction for division.  For remainder we also need to
-          // calculate using a MUL and subtract.
-          rl_src = LoadValue(rl_src, kCoreReg);
-          rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
-          done = true;
-        }
-      }
-
-      if (!done) {
-        FlushAllRegs();   /* Everything to home location. */
-        LoadValueDirectFixed(rl_src, TargetReg(kArg0, kNotWide));
-        Clobber(TargetReg(kArg0, kNotWide));
-        CallRuntimeHelperRegImm(kQuickIdivmod, TargetReg(kArg0, kNotWide), lit, false);
-        if (is_div)
-          rl_result = GetReturn(kCoreReg);
-        else
-          rl_result = GetReturnAlt();
-      }
-      StoreValue(rl_dest, rl_result);
-      return;
-    }
-    default:
-      LOG(FATAL) << "Unexpected opcode " << opcode;
-  }
-  rl_src = LoadValue(rl_src, kCoreReg);
-  rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  // Avoid shifts by literal 0 - no support in Thumb.  Change to copy.
-  if (shift_op && (lit == 0)) {
-    OpRegCopy(rl_result.reg, rl_src.reg);
-  } else {
-    OpRegRegImm(op, rl_result.reg, rl_src.reg, lit);
-  }
-  StoreValue(rl_dest, rl_result);
-}
-
-void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_src2, int flags) {
-  RegLocation rl_result;
-  OpKind first_op = kOpBkpt;
-  OpKind second_op = kOpBkpt;
-  bool call_out = false;
-  bool check_zero = false;
-  int ret_reg = TargetReg(kRet0, kNotWide).GetReg();
-  QuickEntrypointEnum target;
-
-  switch (opcode) {
-    case Instruction::NOT_LONG:
-      rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-      rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      // Check for destructive overlap
-      if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) {
-        RegStorage t_reg = AllocTemp();
-        OpRegCopy(t_reg, rl_src2.reg.GetHigh());
-        OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
-        OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg);
-        FreeTemp(t_reg);
-      } else {
-        OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
-        OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
-      }
-      StoreValueWide(rl_dest, rl_result);
-      return;
-    case Instruction::ADD_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-      first_op = kOpAdd;
-      second_op = kOpAdc;
-      break;
-    case Instruction::SUB_LONG:
-    case Instruction::SUB_LONG_2ADDR:
-      first_op = kOpSub;
-      second_op = kOpSbc;
-      break;
-    case Instruction::MUL_LONG:
-    case Instruction::MUL_LONG_2ADDR:
-      call_out = true;
-      ret_reg = TargetReg(kRet0, kNotWide).GetReg();
-      target = kQuickLmul;
-      break;
-    case Instruction::DIV_LONG:
-    case Instruction::DIV_LONG_2ADDR:
-      call_out = true;
-      check_zero = true;
-      ret_reg = TargetReg(kRet0, kNotWide).GetReg();
-      target = kQuickLdiv;
-      break;
-    case Instruction::REM_LONG:
-    case Instruction::REM_LONG_2ADDR:
-      call_out = true;
-      check_zero = true;
-      target = kQuickLmod;
-      /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
-      ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2, kNotWide).GetReg() :
-          TargetReg(kRet0, kNotWide).GetReg();
-      break;
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::AND_LONG:
-      first_op = kOpAnd;
-      second_op = kOpAnd;
-      break;
-    case Instruction::OR_LONG:
-    case Instruction::OR_LONG_2ADDR:
-      first_op = kOpOr;
-      second_op = kOpOr;
-      break;
-    case Instruction::XOR_LONG:
-    case Instruction::XOR_LONG_2ADDR:
-      first_op = kOpXor;
-      second_op = kOpXor;
-      break;
-    default:
-      LOG(FATAL) << "Invalid long arith op";
-  }
-  if (!call_out) {
-    GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
-  } else {
-    FlushAllRegs();   /* Send everything to home location */
-    if (check_zero) {
-      RegStorage r_tmp1 = TargetReg(kArg0, kWide);
-      RegStorage r_tmp2 = TargetReg(kArg2, kWide);
-      LoadValueDirectWideFixed(rl_src2, r_tmp2);
-      RegStorage r_tgt = CallHelperSetup(target);
-      if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
-        GenDivZeroCheckWide(r_tmp2);
-      }
-      LoadValueDirectWideFixed(rl_src1, r_tmp1);
-      // NOTE: callout here is not a safepoint
-      CallHelper(r_tgt, target, false /* not safepoint */);
-    } else {
-      CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_src2, false);
-    }
-    // Adjust return regs in to handle case of rem returning kArg2/kArg3
-    if (ret_reg == TargetReg(kRet0, kNotWide).GetReg())
-      rl_result = GetReturnWide(kCoreReg);
-    else
-      rl_result = GetReturnWideAlt();
-    StoreValueWide(rl_dest, rl_result);
-  }
-}
-
-void Mir2Lir::GenConst(RegLocation rl_dest, int value) {
-  RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
-  LoadConstantNoClobber(rl_result.reg, value);
-  StoreValue(rl_dest, rl_result);
-}
-
-void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
-                                RegLocation rl_src, RegisterClass return_reg_class) {
-  /*
-   * Don't optimize the register usage since it calls out to support
-   * functions
-   */
-
-  FlushAllRegs();   /* Send everything to home location */
-  CallRuntimeHelperRegLocation(trampoline, rl_src, false);
-  if (rl_dest.wide) {
-    RegLocation rl_result = GetReturnWide(return_reg_class);
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    RegLocation rl_result = GetReturn(return_reg_class);
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-class Mir2Lir::SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
- public:
-  SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont)
-      : LIRSlowPath(m2l, branch, cont) {
-  }
-
-  void Compile() OVERRIDE {
-    m2l_->ResetRegPool();
-    m2l_->ResetDefTracking();
-    GenerateTargetLabel(kPseudoSuspendTarget);
-    m2l_->CallRuntimeHelper(kQuickTestSuspend, true);
-    if (cont_ != nullptr) {
-      m2l_->OpUnconditionalBranch(cont_);
-    }
-  }
-};
-
-/* Check if we need to check for pending suspend request */
-void Mir2Lir::GenSuspendTest(int opt_flags) {
-  if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK) != 0) {
-    return;
-  }
-  if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
-    FlushAllRegs();
-    LIR* branch = OpTestSuspend(nullptr);
-    LIR* cont = NewLIR0(kPseudoTargetLabel);
-    AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont));
-  } else {
-    FlushAllRegs();     // TODO: needed?
-    LIR* inst = CheckSuspendUsingLoad();
-    MarkSafepointPC(inst);
-  }
-}
-
-/* Check if we need to check for pending suspend request */
-void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
-  if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK) != 0) {
-    OpUnconditionalBranch(target);
-    return;
-  }
-  if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
-    OpTestSuspend(target);
-    FlushAllRegs();
-    LIR* branch = OpUnconditionalBranch(nullptr);
-    AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, target));
-  } else {
-    // For the implicit suspend check, just perform the trigger
-    // load and branch to the target.
-    FlushAllRegs();
-    LIR* inst = CheckSuspendUsingLoad();
-    MarkSafepointPC(inst);
-    OpUnconditionalBranch(target);
-  }
-}
-
-/* Call out to helper assembly routine that will null check obj and then lock it. */
-void Mir2Lir::GenMonitorEnter(int opt_flags ATTRIBUTE_UNUSED, RegLocation rl_src) {
-  // TODO: avoid null check with specialized non-null helper.
-  FlushAllRegs();
-  CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
-}
-
-/* Call out to helper assembly routine that will null check obj and then unlock it. */
-void Mir2Lir::GenMonitorExit(int opt_flags ATTRIBUTE_UNUSED, RegLocation rl_src) {
-  // TODO: avoid null check with specialized non-null helper.
-  FlushAllRegs();
-  CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
-}
-
-/* Generic code for generating a wide constant into a VR. */
-void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
-  RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
-  LoadConstantWide(rl_result.reg, value);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void Mir2Lir::GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb);
-  DCHECK(bb != nullptr);
-  ArenaVector<SuccessorBlockInfo*>::const_iterator succ_bb_iter = bb->successor_blocks.cbegin();
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  const uint16_t entries = table[1];
-  // Chained cmp-and-branch.
-  const int32_t* as_int32 = reinterpret_cast<const int32_t*>(&table[2]);
-  int32_t starting_key = as_int32[0];
-  rl_src = LoadValue(rl_src, kCoreReg);
-  int i = 0;
-  for (; i < entries; ++i, ++succ_bb_iter) {
-    if (!InexpensiveConstantInt(starting_key + i, Instruction::Code::IF_EQ)) {
-      // Switch to using a temp and add.
-      break;
-    }
-    SuccessorBlockInfo* successor_block_info = *succ_bb_iter;
-    DCHECK(successor_block_info != nullptr);
-    int case_block_id = successor_block_info->block;
-    DCHECK_EQ(starting_key + i, successor_block_info->key);
-    OpCmpImmBranch(kCondEq, rl_src.reg, starting_key + i, &block_label_list_[case_block_id]);
-  }
-  if (i < entries) {
-    // The rest do not seem to be inexpensive. Try to allocate a temp and use add.
-    RegStorage key_temp = AllocTypedTemp(false, kCoreReg, false);
-    if (key_temp.Valid()) {
-      LoadConstantNoClobber(key_temp, starting_key + i);
-      for (; i < entries - 1; ++i, ++succ_bb_iter) {
-        SuccessorBlockInfo* successor_block_info = *succ_bb_iter;
-        DCHECK(successor_block_info != nullptr);
-        int case_block_id = successor_block_info->block;
-        DCHECK_EQ(starting_key + i, successor_block_info->key);
-        OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block_id]);
-        OpRegImm(kOpAdd, key_temp, 1);  // Increment key.
-      }
-      SuccessorBlockInfo* successor_block_info = *succ_bb_iter;
-      DCHECK(successor_block_info != nullptr);
-      int case_block_id = successor_block_info->block;
-      DCHECK_EQ(starting_key + i, successor_block_info->key);
-      OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block_id]);
-    } else {
-      // No free temp, just finish the old loop.
-      for (; i < entries; ++i, ++succ_bb_iter) {
-        SuccessorBlockInfo* successor_block_info = *succ_bb_iter;
-        DCHECK(successor_block_info != nullptr);
-        int case_block_id = successor_block_info->block;
-        DCHECK_EQ(starting_key + i, successor_block_info->key);
-        OpCmpImmBranch(kCondEq, rl_src.reg, starting_key + i, &block_label_list_[case_block_id]);
-      }
-    }
-  }
-}
-
-void Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  if (cu_->verbose) {
-    DumpPackedSwitchTable(table);
-  }
-
-  const uint16_t entries = table[1];
-  if (entries <= kSmallSwitchThreshold) {
-    GenSmallPackedSwitch(mir, table_offset, rl_src);
-  } else {
-    // Use the backend-specific implementation.
-    GenLargePackedSwitch(mir, table_offset, rl_src);
-  }
-}
-
-void Mir2Lir::GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb);
-  DCHECK(bb != nullptr);
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  const uint16_t entries = table[1];
-  // Chained cmp-and-branch.
-  rl_src = LoadValue(rl_src, kCoreReg);
-  int i = 0;
-  for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
-    int case_block_id = successor_block_info->block;
-    int key = successor_block_info->key;
-    OpCmpImmBranch(kCondEq, rl_src.reg, key, &block_label_list_[case_block_id]);
-    i++;
-  }
-  DCHECK_EQ(i, entries);
-}
-
-void Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  if (cu_->verbose) {
-    DumpSparseSwitchTable(table);
-  }
-
-  const uint16_t entries = table[1];
-  if (entries <= kSmallSwitchThreshold) {
-    GenSmallSparseSwitch(mir, table_offset, rl_src);
-  } else {
-    // Use the backend-specific implementation.
-    GenLargeSparseSwitch(mir, table_offset, rl_src);
-  }
-}
-
-bool Mir2Lir::SizeMatchesTypeForEntrypoint(OpSize size, Primitive::Type type) {
-  switch (size) {
-    case kReference:
-      return type == Primitive::kPrimNot;
-    case k64:
-    case kDouble:
-      return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
-    case k32:
-    case kSingle:
-      return type == Primitive::kPrimInt || type == Primitive::kPrimFloat;
-    case kSignedHalf:
-      return type == Primitive::kPrimShort;
-    case kUnsignedHalf:
-      return type == Primitive::kPrimChar;
-    case kSignedByte:
-      return type == Primitive::kPrimByte;
-    case kUnsignedByte:
-      return type == Primitive::kPrimBoolean;
-    case kWord:  // Intentional fallthrough.
-    default:
-      return false;  // There are no sane types with this op size.
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
deleted file mode 100755
index 11d0c9a..0000000
--- a/compiler/dex/quick/gen_invoke.cc
+++ /dev/null
@@ -1,1630 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mir_to_lir-inl.h"
-
-#include "arm/codegen_arm.h"
-#include "dex/compiler_ir.h"
-#include "dex/dex_flags.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex_file-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "invoke_type.h"
-#include "mirror/array.h"
-#include "mirror/class-inl.h"
-#include "mirror/dex_cache.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/string.h"
-#include "scoped_thread_state_change.h"
-
-namespace art {
-
-// Shortcuts to repeatedly used long types.
-typedef mirror::ObjectArray<mirror::Object> ObjArray;
-
-/*
- * This source files contains "gen" codegen routines that should
- * be applicable to most targets.  Only mid-level support utilities
- * and "op" calls may be used here.
- */
-
-void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
-  class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
-   public:
-    IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in)
-        : LIRSlowPath(m2l, branch_in, resume_in), info_(info_in) {
-      DCHECK_EQ(info_in->offset, current_dex_pc_);
-    }
-
-    void Compile() {
-      m2l_->ResetRegPool();
-      m2l_->ResetDefTracking();
-      GenerateTargetLabel(kPseudoIntrinsicRetry);
-      // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
-      m2l_->GenInvokeNoInline(info_);
-      if (cont_ != nullptr) {
-        m2l_->OpUnconditionalBranch(cont_);
-      }
-    }
-
-   private:
-    CallInfo* const info_;
-  };
-
-  AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
-}
-
-/*
- * To save scheduling time, helper calls are broken into two parts: generation of
- * the helper target address, and the actual call to the helper.  Because x86
- * has a memory call operation, part 1 is a NOP for x86.  For other targets,
- * load arguments between the two parts.
- */
-// template <size_t pointer_size>
-RegStorage Mir2Lir::CallHelperSetup(QuickEntrypointEnum trampoline) {
-  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
-    return RegStorage::InvalidReg();
-  } else {
-    return LoadHelper(trampoline);
-  }
-}
-
-LIR* Mir2Lir::CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
-                         bool use_link) {
-  LIR* call_inst = InvokeTrampoline(use_link ? kOpBlx : kOpBx, r_tgt, trampoline);
-
-  if (r_tgt.Valid()) {
-    FreeTemp(r_tgt);
-  }
-
-  if (safepoint_pc) {
-    MarkSafepointPC(call_inst);
-  }
-  return call_inst;
-}
-
-void Mir2Lir::CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0,
-                                   bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
-                                           bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  if (arg0.wide == 0) {
-    LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
-  } else {
-    LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
-  }
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
-                                      bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
-  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0,
-                                              RegLocation arg1, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  if (arg1.wide == 0) {
-    LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
-  } else {
-    RegStorage r_tmp = TargetReg(kArg2, kWide);
-    LoadValueDirectWideFixed(arg1, r_tmp);
-  }
-  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0,
-                                              int arg1, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  DCHECK(!arg0.wide);
-  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
-  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
-                                      bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1);
-  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
-                                      bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
-  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0,
-                                         bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
-  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
-                                         bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
-  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
-  if (r_tmp.NotExactlyEquals(arg0)) {
-    OpRegCopy(r_tmp, arg0);
-  }
-  LoadCurrMethodDirect(TargetReg(kArg1, kRef));
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperRegRegLocationMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
-                                                    RegLocation arg1, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  DCHECK(!IsSameReg(TargetReg(kArg2, arg0.GetWideKind()), arg0));
-  RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
-  if (r_tmp.NotExactlyEquals(arg0)) {
-    OpRegCopy(r_tmp, arg0);
-  }
-  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
-  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline,
-                                                      RegLocation arg0, RegLocation arg1,
-                                                      bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  if (cu_->instruction_set == kArm64 || cu_->instruction_set == kMips64 ||
-      cu_->instruction_set == kX86_64) {
-    RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
-
-    RegStorage arg1_reg;
-    if (arg1.fp == arg0.fp) {
-      arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1);
-    } else {
-      arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1);
-    }
-
-    if (arg0.wide == 0) {
-      LoadValueDirectFixed(arg0, arg0_reg);
-    } else {
-      LoadValueDirectWideFixed(arg0, arg0_reg);
-    }
-
-    if (arg1.wide == 0) {
-      LoadValueDirectFixed(arg1, arg1_reg);
-    } else {
-      LoadValueDirectWideFixed(arg1, arg1_reg);
-    }
-  } else {
-    DCHECK(!cu_->target64);
-    if (arg0.wide == 0) {
-      LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide));
-      if (arg1.wide == 0) {
-        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
-        if (cu_->instruction_set == kMips) {
-          LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg1, kNotWide));
-        } else {
-          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide));
-        }
-      } else {
-        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
-        if (cu_->instruction_set == kMips) {
-          LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
-        } else {
-          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide));
-        }
-      }
-    } else {
-      LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
-      if (arg1.wide == 0) {
-        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
-        if (cu_->instruction_set == kMips) {
-          LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kNotWide));
-        } else {
-          LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
-        }
-      } else {
-        // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
-        if (cu_->instruction_set == kMips) {
-          LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
-        } else {
-          LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
-        }
-      }
-    }
-  }
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
-  WideKind arg0_kind = arg0.GetWideKind();
-  WideKind arg1_kind = arg1.GetWideKind();
-  if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) {
-    if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) {
-      // Swap kArg0 and kArg1 with kArg2 as temp.
-      OpRegCopy(TargetReg(kArg2, arg1_kind), arg1);
-      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
-      OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind));
-    } else {
-      OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
-      OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
-    }
-  } else {
-    OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
-    OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
-  }
-}
-
-void Mir2Lir::CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0,
-                                      RegStorage arg1, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  CopyToArgumentRegs(arg0, arg1);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
-                                         RegStorage arg1, int arg2, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  CopyToArgumentRegs(arg0, arg1);
-  LoadConstant(TargetReg(kArg2, kNotWide), arg2);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperImmRegLocationMethod(QuickEntrypointEnum trampoline, int arg0,
-                                                    RegLocation arg1, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
-  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
-  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperImmImmMethod(QuickEntrypointEnum trampoline, int arg0, int arg1,
-                                            bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  LoadCurrMethodDirect(TargetReg(kArg2, kRef));
-  LoadConstant(TargetReg(kArg1, kNotWide), arg1);
-  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
-                                                         RegLocation arg1,
-                                                         RegLocation arg2, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);  // The static_cast works around an
-                                                        // instantiation bug in GCC.
-  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
-  if (arg2.wide == 0) {
-    LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
-  } else {
-    LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide));
-  }
-  LoadConstant(TargetReg(kArg0, kNotWide), arg0);
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
-    QuickEntrypointEnum trampoline,
-    RegLocation arg0,
-    RegLocation arg1,
-    RegLocation arg2,
-    bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
-  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
-  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
-    QuickEntrypointEnum trampoline, RegLocation arg0, RegLocation arg1, RegLocation arg2,
-    RegLocation arg3, bool safepoint_pc) {
-  RegStorage r_tgt = CallHelperSetup(trampoline);
-  LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
-  LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
-  LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
-  LoadValueDirectFixed(arg3, TargetReg(kArg3, arg3));
-  ClobberCallerSave();
-  CallHelper(r_tgt, trampoline, safepoint_pc);
-}
-
-/*
- * If there are any ins passed in registers that have not been promoted
- * to a callee-save register, flush them to the frame.  Perform initial
- * assignment of promoted arguments.
- *
- * ArgLocs is an array of location records describing the incoming arguments
- * with one location record per word of argument.
- */
-// TODO: Support 64-bit argument registers.
-void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
-  /*
-   * Dummy up a RegLocation for the incoming ArtMethod*
-   * It will attempt to keep kArg0 live (or copy it to home location
-   * if promoted).
-   */
-  RegLocation rl_src = rl_method;
-  rl_src.location = kLocPhysReg;
-  rl_src.reg = TargetReg(kArg0, kRef);
-  rl_src.home = false;
-  MarkLive(rl_src);
-  if (cu_->target64) {
-    DCHECK(rl_method.wide);
-    StoreValueWide(rl_method, rl_src);
-  } else {
-    StoreValue(rl_method, rl_src);
-  }
-  // If Method* has been promoted, explicitly flush
-  if (rl_method.location == kLocPhysReg) {
-    StoreBaseDisp(TargetPtrReg(kSp), 0, rl_src.reg, kWord, kNotVolatile);
-  }
-
-  if (mir_graph_->GetNumOfInVRs() == 0) {
-    return;
-  }
-
-  int start_vreg = mir_graph_->GetFirstInVR();
-  /*
-   * Copy incoming arguments to their proper home locations.
-   * NOTE: an older version of dx had an issue in which
-   * it would reuse static method argument registers.
-   * This could result in the same Dalvik virtual register
-   * being promoted to both core and fp regs. To account for this,
-   * we only copy to the corresponding promoted physical register
-   * if it matches the type of the SSA name for the incoming
-   * argument.  It is also possible that long and double arguments
-   * end up half-promoted.  In those cases, we must flush the promoted
-   * half to memory as well.
-   */
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-  RegLocation* t_loc = nullptr;
-  EnsureInitializedArgMappingToPhysicalReg();
-  for (uint32_t i = 0; i < mir_graph_->GetNumOfInVRs(); i += t_loc->wide ? 2 : 1) {
-    // get reg corresponding to input
-    RegStorage reg = in_to_reg_storage_mapping_.GetReg(i);
-    t_loc = &ArgLocs[i];
-
-    // If the wide input appeared as single, flush it and go
-    // as it comes from memory.
-    if (t_loc->wide && reg.Valid() && !reg.Is64Bit()) {
-      // The memory already holds the half. Don't do anything.
-      reg = RegStorage::InvalidReg();
-    }
-
-    if (reg.Valid()) {
-      // If arriving in register.
-
-      // We have already updated the arg location with promoted info
-      // so we can be based on it.
-      if (t_loc->location == kLocPhysReg) {
-        // Just copy it.
-        if (t_loc->wide) {
-          OpRegCopyWide(t_loc->reg, reg);
-        } else {
-          OpRegCopy(t_loc->reg, reg);
-        }
-      } else {
-        // Needs flush.
-        int offset = SRegOffset(start_vreg + i);
-        if (t_loc->ref) {
-          StoreRefDisp(TargetPtrReg(kSp), offset, reg, kNotVolatile);
-        } else {
-          StoreBaseDisp(TargetPtrReg(kSp), offset, reg, t_loc->wide ? k64 : k32, kNotVolatile);
-        }
-      }
-    } else {
-      // If arriving in frame & promoted.
-      if (t_loc->location == kLocPhysReg) {
-        int offset = SRegOffset(start_vreg + i);
-        if (t_loc->ref) {
-          LoadRefDisp(TargetPtrReg(kSp), offset, t_loc->reg, kNotVolatile);
-        } else {
-          LoadBaseDisp(TargetPtrReg(kSp), offset, t_loc->reg, t_loc->wide ? k64 : k32,
-                       kNotVolatile);
-        }
-      }
-    }
-  }
-}
-
-static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) {
-  RegLocation rl_arg = info->args[0];
-  cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef));
-}
-
-static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
-  cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags);
-  // get this->klass_ [use kArg1, set kArg0]
-  cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(),
-                  cg->TargetReg(kArg0, kRef),
-                  kNotVolatile);
-  cg->MarkPossibleNullPointerException(info->opt_flags);
-}
-
-static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
-                                                       const CompilationUnit* cu, Mir2Lir* cg) {
-  if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
-    int32_t offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-        InstructionSetPointerSize(cu->instruction_set)).Int32Value();
-    // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
-    cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
-                     cg->TargetPtrReg(kInvokeTgt));
-    return true;
-  }
-  return false;
-}
-
-/*
- * Bit of a hack here - in the absence of a real scheduling pass,
- * emit the next instruction in a virtual invoke sequence.
- * We can use kLr as a temp prior to target address loading
- * Note also that we'll load the first argument ("this") into
- * kArg1 here rather than the standard GenDalvikArgs.
- */
-static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
-                         int state, const MethodReference& target_method ATTRIBUTE_UNUSED,
-                         uint32_t method_idx, uintptr_t, uintptr_t,
-                         InvokeType) {
-  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-  /*
-   * This is the fast path in which the target virtual method is
-   * fully resolved at compile time.
-   */
-  switch (state) {
-    case 0:
-      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
-      break;
-    case 1:
-      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
-                                                  // Includes a null-check.
-      break;
-    case 2: {
-      // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0]
-      const size_t pointer_size = InstructionSetPointerSize(
-          cu->compiler_driver->GetInstructionSet());
-      int32_t offset = mirror::Class::EmbeddedVTableEntryOffset(
-          method_idx, pointer_size).Uint32Value();
-      // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0]
-      cg->LoadWordDisp(cg->TargetPtrReg(kArg0), offset, cg->TargetPtrReg(kArg0));
-      break;
-    }
-    case 3:
-      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
-        break;                                    // kInvokeTgt := kArg0->entrypoint
-      }
-      DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
-      FALLTHROUGH_INTENDED;
-    default:
-      return -1;
-  }
-  return state + 1;
-}
-
-/*
- * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
- * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
- * more than one interface method map to the same index. Note also that we'll load the first
- * argument ("this") into kArg1 here rather than the standard GenDalvikArgs.
- */
-static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
-                                 const MethodReference& target_method,
-                                 uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
-  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-
-  switch (state) {
-    case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
-      CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
-      cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index);
-      if (cu->instruction_set == kX86) {
-        cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide));
-      }
-      break;
-    case 1:
-      CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
-      break;
-    case 2:
-      CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
-                                                  // Includes a null-check.
-      break;
-    case 3: {  // Get target method [use kInvokeTgt, set kArg0]
-      const size_t pointer_size = InstructionSetPointerSize(
-          cu->compiler_driver->GetInstructionSet());
-      int32_t offset = mirror::Class::EmbeddedImTableEntryOffset(
-          method_idx % mirror::Class::kImtSize, pointer_size).Uint32Value();
-      // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0]
-      cg->LoadWordDisp(cg->TargetPtrReg(kArg0), offset, cg->TargetPtrReg(kArg0));
-      break;
-    }
-    case 4:
-      if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
-        break;                                    // kInvokeTgt := kArg0->entrypoint
-      }
-      DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
-      FALLTHROUGH_INTENDED;
-    default:
-      return -1;
-  }
-  return state + 1;
-}
-
-static int NextInvokeInsnSP(CompilationUnit* cu,
-                            CallInfo* info ATTRIBUTE_UNUSED,
-                            QuickEntrypointEnum trampoline,
-                            int state,
-                            const MethodReference& target_method,
-                            uint32_t method_idx ATTRIBUTE_UNUSED) {
-  Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-
-  /*
-   * This handles the case in which the base method is not fully
-   * resolved at compile time, we bail to a runtime helper.
-   */
-  if (state == 0) {
-    if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
-      // Load trampoline target
-      int32_t disp;
-      if (cu->target64) {
-        disp = GetThreadOffset<8>(trampoline).Int32Value();
-      } else {
-        disp = GetThreadOffset<4>(trampoline).Int32Value();
-      }
-      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), disp, cg->TargetPtrReg(kInvokeTgt));
-    }
-    // Load kArg0 with method index
-    CHECK_EQ(cu->dex_file, target_method.dex_file);
-    cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index);
-    return 1;
-  }
-  return -1;
-}
-
-static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
-                                int state,
-                                const MethodReference& target_method,
-                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
-  return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
-                          target_method, 0);
-}
-
-static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
-                                const MethodReference& target_method,
-                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
-  return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
-                          target_method, 0);
-}
-
-static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
-                               const MethodReference& target_method,
-                               uint32_t, uintptr_t, uintptr_t, InvokeType) {
-  return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
-                          target_method, 0);
-}
-
-static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
-                           const MethodReference& target_method,
-                           uint32_t, uintptr_t, uintptr_t, InvokeType) {
-  return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
-                          target_method, 0);
-}
-
-static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
-                                                CallInfo* info, int state,
-                                                const MethodReference& target_method,
-                                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
-  return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
-                          target_method, 0);
-}
-
-// Default implementation of implicit null pointer check.
-// Overridden by arch specific as necessary.
-void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
-  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
-    return;
-  }
-  RegStorage tmp = AllocTemp();
-  Load32Disp(reg, 0, tmp);
-  MarkPossibleNullPointerException(opt_flags);
-  FreeTemp(tmp);
-}
-
-/**
- * @brief Used to flush promoted registers if they are used as argument
- * in an invocation.
- * @param info the infromation about arguments for invocation.
- * @param start the first argument we should start to look from.
- */
-void Mir2Lir::GenDalvikArgsFlushPromoted(CallInfo* info, int start) {
-  if (cu_->disable_opt & (1 << kPromoteRegs)) {
-    // This make sense only if promotion is enabled.
-    return;
-  }
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-  // Scan the rest of the args - if in phys_reg flush to memory
-  for (size_t next_arg = start; next_arg < info->num_arg_words;) {
-    RegLocation loc = info->args[next_arg];
-    if (loc.wide) {
-      loc = UpdateLocWide(loc);
-      if (loc.location == kLocPhysReg) {
-        StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
-      }
-      next_arg += 2;
-    } else {
-      loc = UpdateLoc(loc);
-      if (loc.location == kLocPhysReg) {
-        if (loc.ref) {
-          StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
-        } else {
-          StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32,
-                        kNotVolatile);
-        }
-      }
-      next_arg++;
-    }
-  }
-}
-
-/**
- * @brief Used to optimize the copying of VRs which are arguments of invocation.
- * Please note that you should flush promoted registers first if you copy.
- * If implementation does copying it may skip several of the first VRs but must copy
- * till the end. Implementation must return the number of skipped VRs
- * (it might be all VRs).
- * @see GenDalvikArgsFlushPromoted
- * @param info the information about arguments for invocation.
- * @param first the first argument we should start to look from.
- * @param count the number of remaining arguments we can handle.
- * @return the number of arguments which we did not handle. Unhandled arguments
- * must be attached to the first one.
- */
-int Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
-  // call is pretty expensive, let's use it if count is big.
-  if (count > 16) {
-    GenDalvikArgsFlushPromoted(info, first);
-    int start_offset = SRegOffset(info->args[first].s_reg_low);
-    int outs_offset = StackVisitor::GetOutVROffset(first, cu_->instruction_set);
-
-    OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset);
-    OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset);
-    CallRuntimeHelperRegRegImm(kQuickMemcpy, TargetReg(kArg0, kRef), TargetReg(kArg1, kRef),
-                               count * 4, false);
-    count = 0;
-  }
-  return count;
-}
-
-int Mir2Lir::GenDalvikArgs(CallInfo* info, int call_state,
-                           LIR** pcrLabel, NextCallInsn next_call_insn,
-                           const MethodReference& target_method,
-                           uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
-                           InvokeType type, bool skip_this) {
-  // If no arguments, just return.
-  if (info->num_arg_words == 0u)
-    return call_state;
-
-  const size_t start_index = skip_this ? 1 : 0;
-
-  // Get architecture dependent mapping between output VRs and physical registers
-  // basing on shorty of method to call.
-  InToRegStorageMapping in_to_reg_storage_mapping(arena_);
-  {
-    const char* target_shorty = mir_graph_->GetShortyFromMethodReference(target_method);
-    ShortyIterator shorty_iterator(target_shorty, type == kStatic);
-    in_to_reg_storage_mapping.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
-  }
-
-  size_t stack_map_start = std::max(in_to_reg_storage_mapping.GetEndMappedIn(), start_index);
-  if ((stack_map_start < info->num_arg_words) && info->args[stack_map_start].high_word) {
-    // It is possible that the last mapped reg is 32 bit while arg is 64-bit.
-    // It will be handled together with low part mapped to register.
-    stack_map_start++;
-  }
-  size_t regs_left_to_pass_via_stack = info->num_arg_words - stack_map_start;
-
-  // If it is a range case we can try to copy remaining VRs (not mapped to physical registers)
-  // using more optimal algorithm.
-  if (info->is_range && regs_left_to_pass_via_stack > 1) {
-    regs_left_to_pass_via_stack = GenDalvikArgsBulkCopy(info, stack_map_start,
-                                                        regs_left_to_pass_via_stack);
-  }
-
-  // Now handle any remaining VRs mapped to stack.
-  if (in_to_reg_storage_mapping.HasArgumentsOnStack()) {
-    // Two temps but do not use kArg1, it might be this which we can skip.
-    // Separate single and wide - it can give some advantage.
-    RegStorage regRef = TargetReg(kArg3, kRef);
-    RegStorage regSingle = TargetReg(kArg3, kNotWide);
-    RegStorage regWide = TargetReg(kArg2, kWide);
-    for (size_t i = start_index; i < stack_map_start + regs_left_to_pass_via_stack; i++) {
-      RegLocation rl_arg = info->args[i];
-      rl_arg = UpdateRawLoc(rl_arg);
-      RegStorage reg = in_to_reg_storage_mapping.GetReg(i);
-      if (!reg.Valid()) {
-        int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
-        {
-          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-          if (rl_arg.wide) {
-            if (rl_arg.location == kLocPhysReg) {
-              StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
-            } else {
-              LoadValueDirectWideFixed(rl_arg, regWide);
-              StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile);
-            }
-          } else {
-            if (rl_arg.location == kLocPhysReg) {
-              if (rl_arg.ref) {
-                StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
-              } else {
-                StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
-              }
-            } else {
-              if (rl_arg.ref) {
-                LoadValueDirectFixed(rl_arg, regRef);
-                StoreRefDisp(TargetPtrReg(kSp), out_offset, regRef, kNotVolatile);
-              } else {
-                LoadValueDirectFixed(rl_arg, regSingle);
-                StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile);
-              }
-            }
-          }
-        }
-        call_state = next_call_insn(cu_, info, call_state, target_method,
-                                    vtable_idx, direct_code, direct_method, type);
-      }
-      if (rl_arg.wide) {
-        i++;
-      }
-    }
-  }
-
-  // Finish with VRs mapped to physical registers.
-  for (size_t i = start_index; i < stack_map_start; i++) {
-    RegLocation rl_arg = info->args[i];
-    rl_arg = UpdateRawLoc(rl_arg);
-    RegStorage reg = in_to_reg_storage_mapping.GetReg(i);
-    if (reg.Valid()) {
-      if (rl_arg.wide) {
-        // if reg is not 64-bit (it is half of 64-bit) then handle it separately.
-        if (!reg.Is64Bit()) {
-          ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-          if (rl_arg.location == kLocPhysReg) {
-            int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
-            // Dump it to memory.
-            StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
-            LoadBaseDisp(TargetPtrReg(kSp), out_offset, reg, k32, kNotVolatile);
-          } else {
-            int high_offset = StackVisitor::GetOutVROffset(i + 1, cu_->instruction_set);
-            // First, use target reg for high part.
-            LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low + 1), reg, k32,
-                         kNotVolatile);
-            StoreBaseDisp(TargetPtrReg(kSp), high_offset, reg, k32, kNotVolatile);
-            // Now, use target reg for low part.
-            LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low), reg, k32, kNotVolatile);
-            int low_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
-            // And store it to the expected memory location.
-            StoreBaseDisp(TargetPtrReg(kSp), low_offset, reg, k32, kNotVolatile);
-          }
-        } else {
-          LoadValueDirectWideFixed(rl_arg, reg);
-        }
-      } else {
-        LoadValueDirectFixed(rl_arg, reg);
-      }
-      call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
-                               direct_code, direct_method, type);
-    }
-    if (rl_arg.wide) {
-      i++;
-    }
-  }
-
-  call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
-                           direct_code, direct_method, type);
-  if (pcrLabel) {
-    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-      *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
-    } else {
-      *pcrLabel = nullptr;
-      GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
-    }
-  }
-  return call_state;
-}
-
-void Mir2Lir::EnsureInitializedArgMappingToPhysicalReg() {
-  if (!in_to_reg_storage_mapping_.IsInitialized()) {
-    ShortyIterator shorty_iterator(cu_->shorty, cu_->invoke_type == kStatic);
-    in_to_reg_storage_mapping_.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
-  }
-}
-
-RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
-  RegLocation res;
-  if (info->result.location == kLocInvalid) {
-    // If result is unused, return a sink target based on type of invoke target.
-    res = GetReturn(
-        ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
-  } else {
-    res = info->result;
-  }
-  return res;
-}
-
-RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
-  RegLocation res;
-  if (info->result.location == kLocInvalid) {
-    // If result is unused, return a sink target based on type of invoke target.
-    res = GetReturnWide(ShortyToRegClass(
-        mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
-  } else {
-    res = info->result;
-  }
-  return res;
-}
-
-bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) {
-  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
-    // TODO: add Mips and Mips64 implementations.
-    return false;
-  }
-
-  bool use_direct_type_ptr;
-  uintptr_t direct_type_ptr;
-  ClassReference ref;
-  if (!cu_->compiler_driver->CanEmbedReferenceTypeInCode(&ref,
-        &use_direct_type_ptr, &direct_type_ptr)) {
-    return false;
-  }
-
-  RegStorage reg_class = TargetReg(kArg1, kRef);
-  Clobber(reg_class);
-  LockTemp(reg_class);
-  if (use_direct_type_ptr) {
-    LoadConstant(reg_class, direct_type_ptr);
-  } else {
-    uint16_t type_idx = ref.first->GetClassDef(ref.second).class_idx_;
-    LoadClassType(*ref.first, type_idx, kArg1);
-  }
-
-  uint32_t slow_path_flag_offset = cu_->compiler_driver->GetReferenceSlowFlagOffset();
-  uint32_t disable_flag_offset = cu_->compiler_driver->GetReferenceDisableFlagOffset();
-  CHECK(slow_path_flag_offset && disable_flag_offset &&
-        (slow_path_flag_offset != disable_flag_offset));
-
-  // intrinsic logic start.
-  RegLocation rl_obj = info->args[0];
-  rl_obj = LoadValue(rl_obj, kRefReg);
-
-  RegStorage reg_slow_path = AllocTemp();
-  RegStorage reg_disabled = AllocTemp();
-  LoadBaseDisp(reg_class, slow_path_flag_offset, reg_slow_path, kSignedByte, kNotVolatile);
-  LoadBaseDisp(reg_class, disable_flag_offset, reg_disabled, kSignedByte, kNotVolatile);
-  FreeTemp(reg_class);
-  LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
-  FreeTemp(reg_disabled);
-
-  // if slow path, jump to JNI path target
-  LIR* slow_path_branch;
-  if (or_inst->u.m.def_mask->HasBit(ResourceMask::kCCode)) {
-    // Generate conditional branch only, as the OR set a condition state (we are interested in a 'Z' flag).
-    slow_path_branch = OpCondBranch(kCondNe, nullptr);
-  } else {
-    // Generate compare and branch.
-    slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
-  }
-  FreeTemp(reg_slow_path);
-
-  // slow path not enabled, simply load the referent of the reference object
-  RegLocation rl_dest = InlineTarget(info);
-  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
-  GenNullCheck(rl_obj.reg, info->opt_flags);
-  LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
-              kNotVolatile);
-  MarkPossibleNullPointerException(info->opt_flags);
-  StoreValue(rl_dest, rl_result);
-
-  LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
-  AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
-  ClobberCallerSave();  // We must clobber everything because slow path will return here
-  return true;
-}
-
-bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
-  // Location of char array data
-  int value_offset = mirror::String::ValueOffset().Int32Value();
-  // Location of count
-  int count_offset = mirror::String::CountOffset().Int32Value();
-
-  RegLocation rl_obj = info->args[0];
-  RegLocation rl_idx = info->args[1];
-  rl_obj = LoadValue(rl_obj, kRefReg);
-  rl_idx = LoadValue(rl_idx, kCoreReg);
-  RegStorage reg_max;
-  GenNullCheck(rl_obj.reg, info->opt_flags);
-  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
-  LIR* range_check_branch = nullptr;
-  if (range_check) {
-    reg_max = AllocTemp();
-    Load32Disp(rl_obj.reg, count_offset, reg_max);
-    MarkPossibleNullPointerException(info->opt_flags);
-    // Set up a slow path to allow retry in case of bounds violation
-    OpRegReg(kOpCmp, rl_idx.reg, reg_max);
-    FreeTemp(reg_max);
-    range_check_branch = OpCondBranch(kCondUge, nullptr);
-  }
-  RegStorage reg_ptr = AllocTempRef();
-  OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, value_offset);
-  FreeTemp(rl_obj.reg);
-  RegLocation rl_dest = InlineTarget(info);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  LoadBaseIndexed(reg_ptr, rl_idx.reg, rl_result.reg, 1, kUnsignedHalf);
-  FreeTemp(reg_ptr);
-  StoreValue(rl_dest, rl_result);
-  if (range_check) {
-    DCHECK(range_check_branch != nullptr);
-    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
-    AddIntrinsicSlowPath(info, range_check_branch);
-  }
-  return true;
-}
-
-bool Mir2Lir::GenInlinedStringGetCharsNoCheck(CallInfo* info) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
-    return false;
-  }
-  size_t char_component_size = Primitive::ComponentSize(Primitive::kPrimChar);
-  // Location of data in char array buffer
-  int data_offset = mirror::Array::DataOffset(char_component_size).Int32Value();
-  // Location of char array data in string
-  int value_offset = mirror::String::ValueOffset().Int32Value();
-
-  RegLocation rl_obj = info->args[0];
-  RegLocation rl_start = info->args[1];
-  RegLocation rl_end = info->args[2];
-  RegLocation rl_buffer = info->args[3];
-  RegLocation rl_index = info->args[4];
-
-  ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers
-  RegStorage reg_dst_ptr = TargetReg(kArg0, kRef);
-  RegStorage reg_src_ptr = TargetReg(kArg1, kRef);
-  RegStorage reg_length = TargetReg(kArg2, kNotWide);
-  RegStorage reg_tmp = TargetReg(kArg3, kNotWide);
-  RegStorage reg_tmp_ptr = RegStorage(RegStorage::k64BitSolo, reg_tmp.GetRawBits() & RegStorage::kRegTypeMask);
-
-  LoadValueDirectFixed(rl_buffer, reg_dst_ptr);
-  OpRegImm(kOpAdd, reg_dst_ptr, data_offset);
-  LoadValueDirectFixed(rl_index, reg_tmp);
-  OpRegRegImm(kOpLsl, reg_tmp, reg_tmp, 1);
-  OpRegReg(kOpAdd, reg_dst_ptr, cu_->instruction_set == kArm64 ? reg_tmp_ptr : reg_tmp);
-
-  LoadValueDirectFixed(rl_start, reg_tmp);
-  LoadValueDirectFixed(rl_end, reg_length);
-  OpRegReg(kOpSub, reg_length, reg_tmp);
-  OpRegRegImm(kOpLsl, reg_length, reg_length, 1);
-  LoadValueDirectFixed(rl_obj, reg_src_ptr);
-
-  OpRegImm(kOpAdd, reg_src_ptr, value_offset);
-  OpRegRegImm(kOpLsl, reg_tmp, reg_tmp, 1);
-  OpRegReg(kOpAdd, reg_src_ptr, cu_->instruction_set == kArm64 ? reg_tmp_ptr : reg_tmp);
-
-  RegStorage r_tgt;
-  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
-    r_tgt = LoadHelper(kQuickMemcpy);
-  } else {
-    r_tgt = RegStorage::InvalidReg();
-  }
-  // NOTE: not a safepoint
-  CallHelper(r_tgt, kQuickMemcpy, false, true);
-
-  return true;
-}
-
-// Generates an inlined String.is_empty or String.length.
-bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
-  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
-    // TODO: add Mips and Mips64 implementations.
-    return false;
-  }
-  // dst = src.length();
-  RegLocation rl_obj = info->args[0];
-  rl_obj = LoadValue(rl_obj, kRefReg);
-  RegLocation rl_dest = InlineTarget(info);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  GenNullCheck(rl_obj.reg, info->opt_flags);
-  Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
-  MarkPossibleNullPointerException(info->opt_flags);
-  if (is_empty) {
-    // dst = (dst == 0);
-    if (cu_->instruction_set == kThumb2) {
-      RegStorage t_reg = AllocTemp();
-      OpRegReg(kOpNeg, t_reg, rl_result.reg);
-      OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
-    } else if (cu_->instruction_set == kArm64) {
-      OpRegImm(kOpSub, rl_result.reg, 1);
-      OpRegRegImm(kOpLsr, rl_result.reg, rl_result.reg, 31);
-    } else {
-      DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
-      OpRegImm(kOpSub, rl_result.reg, 1);
-      OpRegImm(kOpLsr, rl_result.reg, 31);
-    }
-  }
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool Mir2Lir::GenInlinedStringFactoryNewStringFromBytes(CallInfo* info) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
-    return false;
-  }
-  RegLocation rl_data = info->args[0];
-  RegLocation rl_high = info->args[1];
-  RegLocation rl_offset = info->args[2];
-  RegLocation rl_count = info->args[3];
-  rl_data = LoadValue(rl_data, kRefReg);
-  LIR* data_null_check_branch = OpCmpImmBranch(kCondEq, rl_data.reg, 0, nullptr);
-  AddIntrinsicSlowPath(info, data_null_check_branch);
-  CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
-      kQuickAllocStringFromBytes, rl_data, rl_high, rl_offset, rl_count, true);
-  RegLocation rl_return = GetReturn(kRefReg);
-  RegLocation rl_dest = InlineTarget(info);
-  StoreValue(rl_dest, rl_return);
-  return true;
-}
-
-bool Mir2Lir::GenInlinedStringFactoryNewStringFromChars(CallInfo* info) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
-    return false;
-  }
-  RegLocation rl_offset = info->args[0];
-  RegLocation rl_count = info->args[1];
-  RegLocation rl_data = info->args[2];
-  // No need to emit code checking whether `rl_data` is a null
-  // pointer, as callers of the native method
-  //
-  //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
-  //
-  // all include a null check on `data` before calling that method.
-  CallRuntimeHelperRegLocationRegLocationRegLocation(
-      kQuickAllocStringFromChars, rl_offset, rl_count, rl_data, true);
-  RegLocation rl_return = GetReturn(kRefReg);
-  RegLocation rl_dest = InlineTarget(info);
-  StoreValue(rl_dest, rl_return);
-  return true;
-}
-
-bool Mir2Lir::GenInlinedStringFactoryNewStringFromString(CallInfo* info) {
-  if (cu_->instruction_set == kMips) {
-    // TODO - add Mips implementation
-    return false;
-  }
-  RegLocation rl_string = info->args[0];
-  rl_string = LoadValue(rl_string, kRefReg);
-  LIR* string_null_check_branch = OpCmpImmBranch(kCondEq, rl_string.reg, 0, nullptr);
-  AddIntrinsicSlowPath(info, string_null_check_branch);
-  CallRuntimeHelperRegLocation(kQuickAllocStringFromString, rl_string, true);
-  RegLocation rl_return = GetReturn(kRefReg);
-  RegLocation rl_dest = InlineTarget(info);
-  StoreValue(rl_dest, rl_return);
-  return true;
-}
-
-bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
-  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
-    // TODO: add Mips and Mips64 implementations.
-    return false;
-  }
-  RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  RegLocation rl_src_i = info->args[0];
-  RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (IsWide(size)) {
-    if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
-      OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
-      StoreValueWide(rl_dest, rl_result);
-      return true;
-    }
-    RegStorage r_i_low = rl_i.reg.GetLow();
-    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
-      // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
-      r_i_low = AllocTemp();
-      OpRegCopy(r_i_low, rl_i.reg);
-    }
-    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
-    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
-    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
-      FreeTemp(r_i_low);
-    }
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    DCHECK(size == k32 || size == kSignedHalf);
-    OpKind op = (size == k32) ? kOpRev : kOpRevsh;
-    OpRegReg(op, rl_result.reg, rl_i.reg);
-    StoreValue(rl_dest, rl_result);
-  }
-  return true;
-}
-
-bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
-  RegLocation rl_dest = InlineTarget(info);
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  RegLocation rl_src = info->args[0];
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegStorage sign_reg = AllocTemp();
-  // abs(x) = y<=x>>31, (x+y)^y.
-  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
-  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
-  OpRegReg(kOpXor, rl_result.reg, sign_reg);
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
-  RegLocation rl_dest = InlineTargetWide(info);
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  RegLocation rl_src = info->args[0];
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-  // If on x86 or if we would clobber a register needed later, just copy the source first.
-  if (cu_->instruction_set != kX86_64 &&
-      (cu_->instruction_set == kX86 ||
-       rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg())) {
-    OpRegCopyWide(rl_result.reg, rl_src.reg);
-    if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
-        rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
-        rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
-        rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
-      // Reuse source registers to avoid running out of temps.
-      FreeTemp(rl_src.reg);
-    }
-    rl_src = rl_result;
-  }
-
-  // abs(x) = y<=x>>31, (x+y)^y.
-  RegStorage sign_reg;
-  if (cu_->instruction_set == kX86_64) {
-    sign_reg = AllocTempWide();
-    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
-    OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
-    OpRegReg(kOpXor, rl_result.reg, sign_reg);
-  } else {
-    sign_reg = AllocTemp();
-    OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
-    OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
-    OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
-    OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
-    OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
-  }
-  FreeTemp(sign_reg);
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-bool Mir2Lir::GenInlinedReverseBits(CallInfo* info ATTRIBUTE_UNUSED, OpSize size ATTRIBUTE_UNUSED) {
-  // Currently implemented only for ARM64.
-  return false;
-}
-
-bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info ATTRIBUTE_UNUSED,
-                                 bool is_min ATTRIBUTE_UNUSED,
-                                 bool is_double ATTRIBUTE_UNUSED) {
-  // Currently implemented only for ARM64.
-  return false;
-}
-
-bool Mir2Lir::GenInlinedCeil(CallInfo* info ATTRIBUTE_UNUSED) {
-  return false;
-}
-
-bool Mir2Lir::GenInlinedFloor(CallInfo* info ATTRIBUTE_UNUSED) {
-  return false;
-}
-
-bool Mir2Lir::GenInlinedRint(CallInfo* info ATTRIBUTE_UNUSED) {
-  return false;
-}
-
-bool Mir2Lir::GenInlinedRound(CallInfo* info ATTRIBUTE_UNUSED, bool is_double ATTRIBUTE_UNUSED) {
-  return false;
-}
-
-bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
-  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
-    // TODO: add Mips and Mips64 implementations.
-    return false;
-  }
-  RegLocation rl_dest = InlineTarget(info);
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  RegLocation rl_src = info->args[0];
-  StoreValue(rl_dest, rl_src);
-  return true;
-}
-
-bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
-  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
-    // TODO: add Mips and Mips64 implementations.
-    return false;
-  }
-  RegLocation rl_dest = InlineTargetWide(info);
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  RegLocation rl_src = info->args[0];
-  StoreValueWide(rl_dest, rl_src);
-  return true;
-}
-
-bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info ATTRIBUTE_UNUSED) {
-  return false;
-}
-
-
-/*
- * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
- * otherwise bails to standard library code.
- */
-bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
-  RegLocation rl_obj = info->args[0];
-  RegLocation rl_char = info->args[1];
-  if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
-    // Code point beyond 0xFFFF. Punt to the real String.indexOf().
-    return false;
-  }
-
-  ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers
-  RegStorage reg_ptr = TargetReg(kArg0, kRef);
-  RegStorage reg_char = TargetReg(kArg1, kNotWide);
-  RegStorage reg_start = TargetReg(kArg2, kNotWide);
-
-  LoadValueDirectFixed(rl_obj, reg_ptr);
-  LoadValueDirectFixed(rl_char, reg_char);
-  if (zero_based) {
-    LoadConstant(reg_start, 0);
-  } else {
-    RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
-    LoadValueDirectFixed(rl_start, reg_start);
-  }
-  RegStorage r_tgt = LoadHelper(kQuickIndexOf);
-  CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
-  GenExplicitNullCheck(reg_ptr, info->opt_flags);
-  LIR* high_code_point_branch =
-      rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
-  // NOTE: not a safepoint
-  OpReg(kOpBlx, r_tgt);
-  if (!rl_char.is_const) {
-    // Add the slow path for code points beyond 0xFFFF.
-    DCHECK(high_code_point_branch != nullptr);
-    LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
-    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
-    AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
-    ClobberCallerSave();  // We must clobber everything because slow path will return here
-  } else {
-    DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
-    DCHECK(high_code_point_branch == nullptr);
-  }
-  RegLocation rl_return = GetReturn(kCoreReg);
-  RegLocation rl_dest = InlineTarget(info);
-  StoreValue(rl_dest, rl_return);
-  return true;
-}
-
-/* Fast string.compareTo(Ljava/lang/string;)I. */
-bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
-  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
-    // TODO: add Mips and Mips64 implementations.
-    return false;
-  }
-  ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers
-  RegStorage reg_this = TargetReg(kArg0, kRef);
-  RegStorage reg_cmp = TargetReg(kArg1, kRef);
-
-  RegLocation rl_this = info->args[0];
-  RegLocation rl_cmp = info->args[1];
-  LoadValueDirectFixed(rl_this, reg_this);
-  LoadValueDirectFixed(rl_cmp, reg_cmp);
-  RegStorage r_tgt;
-  if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
-    r_tgt = LoadHelper(kQuickStringCompareTo);
-  } else {
-    r_tgt = RegStorage::InvalidReg();
-  }
-  GenExplicitNullCheck(reg_this, info->opt_flags);
-  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
-  // TUNING: check if rl_cmp.s_reg_low is already null checked
-  LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
-  AddIntrinsicSlowPath(info, cmp_null_check_branch);
-  // NOTE: not a safepoint
-  CallHelper(r_tgt, kQuickStringCompareTo, false, true);
-  RegLocation rl_return = GetReturn(kCoreReg);
-  RegLocation rl_dest = InlineTarget(info);
-  StoreValue(rl_dest, rl_return);
-  return true;
-}
-
-bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
-  RegLocation rl_dest = InlineTarget(info);
-
-  // Early exit if the result is unused.
-  if (rl_dest.orig_sreg < 0) {
-    return true;
-  }
-
-  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
-
-  if (cu_->target64) {
-    LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
-                kNotVolatile);
-  } else {
-    Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
-  }
-
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
-                                  bool is_long, bool is_object, bool is_volatile) {
-  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
-    // TODO: add Mips and Mips64 implementations.
-    return false;
-  }
-  // Unused - RegLocation rl_src_unsafe = info->args[0];
-  RegLocation rl_src_obj = info->args[1];  // Object
-  RegLocation rl_src_offset = info->args[2];  // long low
-  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
-  RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
-
-  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
-  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, is_object ? kRefReg : kCoreReg, true);
-  if (is_long) {
-    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
-        || cu_->instruction_set == kArm64) {
-      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64);
-    } else {
-      RegStorage rl_temp_offset = AllocTemp();
-      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
-      LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
-      FreeTemp(rl_temp_offset);
-    }
-  } else {
-    if (rl_result.ref) {
-      LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0);
-    } else {
-      LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
-    }
-  }
-
-  if (is_volatile) {
-    GenMemBarrier(kLoadAny);
-  }
-
-  if (is_long) {
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    StoreValue(rl_dest, rl_result);
-  }
-  return true;
-}
-
-bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
-                                  bool is_object, bool is_volatile, bool is_ordered) {
-  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
-    // TODO: add Mips and Mips64 implementations.
-    return false;
-  }
-  // Unused - RegLocation rl_src_unsafe = info->args[0];
-  RegLocation rl_src_obj = info->args[1];  // Object
-  RegLocation rl_src_offset = info->args[2];  // long low
-  rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
-  RegLocation rl_src_value = info->args[4];  // value to store
-  if (is_volatile || is_ordered) {
-    GenMemBarrier(kAnyStore);
-  }
-  RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
-  RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
-  RegLocation rl_value;
-  if (is_long) {
-    rl_value = LoadValueWide(rl_src_value, kCoreReg);
-    if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
-        || cu_->instruction_set == kArm64) {
-      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64);
-    } else {
-      RegStorage rl_temp_offset = AllocTemp();
-      OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
-      StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
-      FreeTemp(rl_temp_offset);
-    }
-  } else {
-    rl_value = LoadValue(rl_src_value, is_object ? kRefReg : kCoreReg);
-    if (rl_value.ref) {
-      StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
-    } else {
-      StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
-    }
-  }
-
-  // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
-  FreeTemp(rl_offset.reg);
-
-  if (is_volatile) {
-    // Prevent reordering with a subsequent volatile load.
-    // May also be needed to address store atomicity issues.
-    GenMemBarrier(kAnyAny);
-  }
-  if (is_object) {
-    MarkGCCard(0, rl_value.reg, rl_object.reg);
-  }
-  return true;
-}
-
-void Mir2Lir::GenInvoke(CallInfo* info) {
-  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
-  if (mir_graph_->GetMethodLoweringInfo(info->mir).IsIntrinsic()) {
-    const DexFile* dex_file = info->method_ref.dex_file;
-    auto* inliner = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(dex_file);
-    if (inliner->GenIntrinsic(this, info)) {
-      return;
-    }
-  }
-  GenInvokeNoInline(info);
-}
-
-void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
-  int call_state = 0;
-  LIR* null_ck;
-  LIR** p_null_ck = nullptr;
-  NextCallInsn next_call_insn;
-  FlushAllRegs();  /* Everything to home location */
-  // Explicit register usage
-  LockCallTemps();
-
-  const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
-  MethodReference target_method = method_info.GetTargetMethod();
-  cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
-  InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
-  info->type = method_info.GetSharpType();
-  bool is_string_init = false;
-  if (method_info.IsSpecial()) {
-    DexFileMethodInliner* inliner = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(
-        target_method.dex_file);
-    if (inliner->IsStringInitMethodIndex(target_method.dex_method_index)) {
-      is_string_init = true;
-      size_t pointer_size = GetInstructionSetPointerSize(cu_->instruction_set);
-      info->string_init_offset = inliner->GetOffsetForStringInit(target_method.dex_method_index,
-                                                                 pointer_size);
-      info->type = kStatic;
-    }
-  }
-  bool fast_path = method_info.FastPath();
-  bool skip_this;
-
-  if (info->type == kInterface) {
-    next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
-    skip_this = fast_path;
-  } else if (info->type == kDirect) {
-    if (fast_path) {
-      p_null_ck = &null_ck;
-    }
-    next_call_insn = fast_path ? GetNextSDCallInsn() : NextDirectCallInsnSP;
-    skip_this = false;
-  } else if (info->type == kStatic) {
-    next_call_insn = fast_path ? GetNextSDCallInsn() : NextStaticCallInsnSP;
-    skip_this = false;
-  } else if (info->type == kSuper) {
-    DCHECK(!fast_path);  // Fast path is a direct call.
-    next_call_insn = NextSuperCallInsnSP;
-    skip_this = false;
-  } else {
-    DCHECK_EQ(info->type, kVirtual);
-    next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
-    skip_this = fast_path;
-  }
-  call_state = GenDalvikArgs(info, call_state, p_null_ck,
-                             next_call_insn, target_method, method_info.VTableIndex(),
-                             method_info.DirectCode(), method_info.DirectMethod(),
-                             original_type, skip_this);
-  // Finish up any of the call sequence not interleaved in arg loading
-  while (call_state >= 0) {
-    call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
-                                method_info.DirectCode(), method_info.DirectMethod(),
-                                original_type);
-  }
-  LIR* call_insn = GenCallInsn(method_info);
-  MarkSafepointPC(call_insn);
-
-  FreeCallTemps();
-  if (info->result.location != kLocInvalid) {
-    // We have a following MOVE_RESULT - do it now.
-    RegisterClass reg_class = is_string_init ? kRefReg :
-        ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]);
-    if (info->result.wide) {
-      RegLocation ret_loc = GetReturnWide(reg_class);
-      StoreValueWide(info->result, ret_loc);
-    } else {
-      RegLocation ret_loc = GetReturn(reg_class);
-      StoreValue(info->result, ret_loc);
-    }
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
deleted file mode 100644
index 3f89001..0000000
--- a/compiler/dex/quick/gen_loadstore.cc
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mir_to_lir-inl.h"
-
-#include "dex/compiler_ir.h"
-#include "dex/mir_graph.h"
-#include "invoke_type.h"
-
-namespace art {
-
-/* This file contains target-independent codegen and support. */
-
-/*
- * Load an immediate value into a fixed or temp register.  Target
- * register is clobbered, and marked in_use.
- */
-LIR* Mir2Lir::LoadConstant(RegStorage r_dest, int value) {
-  if (IsTemp(r_dest)) {
-    Clobber(r_dest);
-    MarkInUse(r_dest);
-  }
-  return LoadConstantNoClobber(r_dest, value);
-}
-
-/*
- * Load a Dalvik register into a physical register.  Take care when
- * using this routine, as it doesn't perform any bookkeeping regarding
- * register liveness.  That is the responsibility of the caller.
- */
-void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
-  rl_src = rl_src.wide ? UpdateLocWide(rl_src) : UpdateLoc(rl_src);
-  if (rl_src.location == kLocPhysReg) {
-    OpRegCopy(r_dest, rl_src.reg);
-  } else if (IsInexpensiveConstant(rl_src)) {
-    // On 64-bit targets, will sign extend.  Make sure constant reference is always null.
-    DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0));
-    LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
-  } else {
-    DCHECK((rl_src.location == kLocDalvikFrame) ||
-           (rl_src.location == kLocCompilerTemp));
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    OpSize op_size;
-    if (rl_src.ref) {
-      op_size = kReference;
-    } else if (rl_src.wide) {
-      op_size = k64;
-    } else {
-      op_size = k32;
-    }
-    LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, op_size, kNotVolatile);
-  }
-}
-
-/*
- * Similar to LoadValueDirect, but clobbers and allocates the target
- * register.  Should be used when loading to a fixed register (for example,
- * loading arguments to an out of line call.
- */
-void Mir2Lir::LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest) {
-  Clobber(r_dest);
-  MarkInUse(r_dest);
-  LoadValueDirect(rl_src, r_dest);
-}
-
-/*
- * Load a Dalvik register pair into a physical register[s].  Take care when
- * using this routine, as it doesn't perform any bookkeeping regarding
- * register liveness.  That is the responsibility of the caller.
- */
-void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest) {
-  rl_src = UpdateLocWide(rl_src);
-  if (rl_src.location == kLocPhysReg) {
-    OpRegCopyWide(r_dest, rl_src.reg);
-  } else if (IsInexpensiveConstant(rl_src)) {
-    LoadConstantWide(r_dest, mir_graph_->ConstantValueWide(rl_src));
-  } else {
-    DCHECK((rl_src.location == kLocDalvikFrame) ||
-           (rl_src.location == kLocCompilerTemp));
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64, kNotVolatile);
-  }
-}
-
-/*
- * Similar to LoadValueDirect, but clobbers and allocates the target
- * registers.  Should be used when loading to a fixed registers (for example,
- * loading arguments to an out of line call.
- */
-void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest) {
-  Clobber(r_dest);
-  MarkInUse(r_dest);
-  LoadValueDirectWide(rl_src, r_dest);
-}
-
-RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) {
-  // If op_kind isn't a reference, rl_src should not be marked as a reference either
-  // unless we've seen type conflicts (i.e. register promotion is disabled).
-  DCHECK(op_kind == kRefReg || (!rl_src.ref || (cu_->disable_opt & (1u << kPromoteRegs)) != 0u));
-  rl_src = UpdateLoc(rl_src);
-  if (rl_src.location == kLocPhysReg) {
-    if (!RegClassMatches(op_kind, rl_src.reg)) {
-      // Wrong register class, realloc, copy and transfer ownership.
-      RegStorage new_reg = AllocTypedTemp(rl_src.fp, op_kind);
-      OpRegCopy(new_reg, rl_src.reg);
-      // Clobber the old regs and free it.
-      Clobber(rl_src.reg);
-      FreeTemp(rl_src.reg);
-      // ...and mark the new one live.
-      rl_src.reg = new_reg;
-      MarkLive(rl_src);
-    }
-    return rl_src;
-  }
-
-  DCHECK_NE(rl_src.s_reg_low, INVALID_SREG);
-  rl_src.reg = AllocTypedTemp(rl_src.fp, op_kind);
-  LoadValueDirect(rl_src, rl_src.reg);
-  rl_src.location = kLocPhysReg;
-  MarkLive(rl_src);
-  return rl_src;
-}
-
-void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) {
-  /*
-   * Sanity checking - should never try to store to the same
-   * ssa name during the compilation of a single instruction
-   * without an intervening ClobberSReg().
-   */
-  if (kIsDebugBuild) {
-    DCHECK((live_sreg_ == INVALID_SREG) ||
-           (rl_dest.s_reg_low != live_sreg_));
-    live_sreg_ = rl_dest.s_reg_low;
-  }
-  LIR* def_start;
-  LIR* def_end;
-  DCHECK(!rl_dest.wide);
-  DCHECK(!rl_src.wide);
-  rl_src = UpdateLoc(rl_src);
-  rl_dest = UpdateLoc(rl_dest);
-  if (rl_src.location == kLocPhysReg) {
-    if (IsLive(rl_src.reg) ||
-      IsPromoted(rl_src.reg) ||
-      (rl_dest.location == kLocPhysReg)) {
-      // Src is live/promoted or Dest has assigned reg.
-      rl_dest = EvalLoc(rl_dest, rl_dest.ref || rl_src.ref ? kRefReg : kAnyReg, false);
-      OpRegCopy(rl_dest.reg, rl_src.reg);
-    } else {
-      // Just re-assign the registers.  Dest gets Src's regs
-      rl_dest.reg = rl_src.reg;
-      Clobber(rl_src.reg);
-    }
-  } else {
-    // Load Src either into promoted Dest or temps allocated for Dest
-    rl_dest = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kAnyReg, false);
-    LoadValueDirect(rl_src, rl_dest.reg);
-  }
-
-  // Dest is now live and dirty (until/if we flush it to home location)
-  MarkLive(rl_dest);
-  MarkDirty(rl_dest);
-
-
-  ResetDefLoc(rl_dest);
-  if (IsDirty(rl_dest.reg) && LiveOut(rl_dest.s_reg_low)) {
-    def_start = last_lir_insn_;
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    if (rl_dest.ref) {
-      StoreRefDisp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kNotVolatile);
-    } else {
-      Store32Disp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
-    }
-    MarkClean(rl_dest);
-    def_end = last_lir_insn_;
-    if (!rl_dest.ref) {
-      // Exclude references from store elimination
-      MarkDef(rl_dest, def_start, def_end);
-    }
-  }
-}
-
-RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) {
-  DCHECK(rl_src.wide);
-  rl_src = UpdateLocWide(rl_src);
-  if (rl_src.location == kLocPhysReg) {
-    if (!RegClassMatches(op_kind, rl_src.reg)) {
-      // Wrong register class, realloc, copy and transfer ownership.
-      RegStorage new_regs = AllocTypedTempWide(rl_src.fp, op_kind);
-      OpRegCopyWide(new_regs, rl_src.reg);
-      // Clobber the old regs and free it.
-      Clobber(rl_src.reg);
-      FreeTemp(rl_src.reg);
-      // ...and mark the new ones live.
-      rl_src.reg = new_regs;
-      MarkLive(rl_src);
-    }
-    return rl_src;
-  }
-
-  DCHECK_NE(rl_src.s_reg_low, INVALID_SREG);
-  DCHECK_NE(GetSRegHi(rl_src.s_reg_low), INVALID_SREG);
-  rl_src.reg = AllocTypedTempWide(rl_src.fp, op_kind);
-  LoadValueDirectWide(rl_src, rl_src.reg);
-  rl_src.location = kLocPhysReg;
-  MarkLive(rl_src);
-  return rl_src;
-}
-
-void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) {
-  /*
-   * Sanity checking - should never try to store to the same
-   * ssa name during the compilation of a single instruction
-   * without an intervening ClobberSReg().
-   */
-  if (kIsDebugBuild) {
-    DCHECK((live_sreg_ == INVALID_SREG) ||
-           (rl_dest.s_reg_low != live_sreg_));
-    live_sreg_ = rl_dest.s_reg_low;
-  }
-  LIR* def_start;
-  LIR* def_end;
-  DCHECK(rl_dest.wide);
-  DCHECK(rl_src.wide);
-  rl_src = UpdateLocWide(rl_src);
-  rl_dest = UpdateLocWide(rl_dest);
-  if (rl_src.location == kLocPhysReg) {
-    if (IsLive(rl_src.reg) ||
-        IsPromoted(rl_src.reg) ||
-        (rl_dest.location == kLocPhysReg)) {
-      /*
-       * If src reg[s] are tied to the original Dalvik vreg via liveness or promotion, we
-       * can't repurpose them.  Similarly, if the dest reg[s] are tied to Dalvik vregs via
-       * promotion, we can't just re-assign.  In these cases, we have to copy.
-       */
-      rl_dest = EvalLoc(rl_dest, kAnyReg, false);
-      OpRegCopyWide(rl_dest.reg, rl_src.reg);
-    } else {
-      // Just re-assign the registers.  Dest gets Src's regs
-      rl_dest.reg = rl_src.reg;
-      Clobber(rl_src.reg);
-    }
-  } else {
-    // Load Src either into promoted Dest or temps allocated for Dest
-    rl_dest = EvalLoc(rl_dest, kAnyReg, false);
-    LoadValueDirectWide(rl_src, rl_dest.reg);
-  }
-
-  // Dest is now live and dirty (until/if we flush it to home location)
-  MarkLive(rl_dest);
-  MarkWide(rl_dest.reg);
-  MarkDirty(rl_dest);
-
-  ResetDefLocWide(rl_dest);
-  if (IsDirty(rl_dest.reg) && (LiveOut(rl_dest.s_reg_low) ||
-      LiveOut(GetSRegHi(rl_dest.s_reg_low)))) {
-    def_start = last_lir_insn_;
-    DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
-              mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64, kNotVolatile);
-    MarkClean(rl_dest);
-    def_end = last_lir_insn_;
-    MarkDefWide(rl_dest, def_start, def_end);
-  }
-}
-
-void Mir2Lir::StoreFinalValue(RegLocation rl_dest, RegLocation rl_src) {
-  DCHECK_EQ(rl_src.location, kLocPhysReg);
-
-  if (rl_dest.location == kLocPhysReg) {
-    OpRegCopy(rl_dest.reg, rl_src.reg);
-  } else {
-    // Just re-assign the register.  Dest gets Src's reg.
-    rl_dest.location = kLocPhysReg;
-    rl_dest.reg = rl_src.reg;
-    Clobber(rl_src.reg);
-  }
-
-  // Dest is now live and dirty (until/if we flush it to home location)
-  MarkLive(rl_dest);
-  MarkDirty(rl_dest);
-
-
-  ResetDefLoc(rl_dest);
-  if (IsDirty(rl_dest.reg) && LiveOut(rl_dest.s_reg_low)) {
-    LIR *def_start = last_lir_insn_;
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    Store32Disp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
-    MarkClean(rl_dest);
-    LIR *def_end = last_lir_insn_;
-    if (!rl_dest.ref) {
-      // Exclude references from store elimination
-      MarkDef(rl_dest, def_start, def_end);
-    }
-  }
-}
-
-void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) {
-  DCHECK(rl_dest.wide);
-  DCHECK(rl_src.wide);
-  DCHECK_EQ(rl_src.location, kLocPhysReg);
-
-  if (rl_dest.location == kLocPhysReg) {
-    OpRegCopyWide(rl_dest.reg, rl_src.reg);
-  } else {
-    // Just re-assign the registers.  Dest gets Src's regs.
-    rl_dest.location = kLocPhysReg;
-    rl_dest.reg = rl_src.reg;
-    Clobber(rl_src.reg);
-  }
-
-  // Dest is now live and dirty (until/if we flush it to home location).
-  MarkLive(rl_dest);
-  MarkWide(rl_dest.reg);
-  MarkDirty(rl_dest);
-
-  ResetDefLocWide(rl_dest);
-  if (IsDirty(rl_dest.reg) && (LiveOut(rl_dest.s_reg_low) ||
-      LiveOut(GetSRegHi(rl_dest.s_reg_low)))) {
-    LIR *def_start = last_lir_insn_;
-    DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
-              mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, k64, kNotVolatile);
-    MarkClean(rl_dest);
-    LIR *def_end = last_lir_insn_;
-    MarkDefWide(rl_dest, def_start, def_end);
-  }
-}
-
-/* Utilities to load the current Method* */
-void Mir2Lir::LoadCurrMethodDirect(RegStorage r_tgt) {
-  if (GetCompilationUnit()->target64) {
-    LoadValueDirectWideFixed(mir_graph_->GetMethodLoc(), r_tgt);
-  } else {
-    LoadValueDirectFixed(mir_graph_->GetMethodLoc(), r_tgt);
-  }
-}
-
-RegStorage Mir2Lir::LoadCurrMethodWithHint(RegStorage r_hint) {
-  // If the method is promoted to a register, return that register, otherwise load it to r_hint.
-  // (Replacement for LoadCurrMethod() usually used when LockCallTemps() is in effect.)
-  DCHECK(r_hint.Valid());
-  RegLocation rl_method = mir_graph_->GetMethodLoc();
-  if (rl_method.location == kLocPhysReg) {
-    DCHECK(!IsTemp(rl_method.reg));
-    return rl_method.reg;
-  } else {
-    LoadCurrMethodDirect(r_hint);
-    return r_hint;
-  }
-}
-
-RegLocation Mir2Lir::LoadCurrMethod() {
-  return GetCompilationUnit()->target64 ?
-      LoadValueWide(mir_graph_->GetMethodLoc(), kCoreReg) :
-      LoadValue(mir_graph_->GetMethodLoc(), kRefReg);
-}
-
-RegLocation Mir2Lir::ForceTemp(RegLocation loc) {
-  DCHECK(!loc.wide);
-  DCHECK(loc.location == kLocPhysReg);
-  DCHECK(!loc.reg.IsFloat());
-  if (IsTemp(loc.reg)) {
-    Clobber(loc.reg);
-  } else {
-    RegStorage temp_low = AllocTemp();
-    OpRegCopy(temp_low, loc.reg);
-    loc.reg = temp_low;
-  }
-
-  // Ensure that this doesn't represent the original SR any more.
-  loc.s_reg_low = INVALID_SREG;
-  return loc;
-}
-
-RegLocation Mir2Lir::ForceTempWide(RegLocation loc) {
-  DCHECK(loc.wide);
-  DCHECK(loc.location == kLocPhysReg);
-  DCHECK(!loc.reg.IsFloat());
-
-  if (!loc.reg.IsPair()) {
-    if (IsTemp(loc.reg)) {
-      Clobber(loc.reg);
-    } else {
-      RegStorage temp = AllocTempWide();
-      OpRegCopy(temp, loc.reg);
-      loc.reg = temp;
-    }
-  } else {
-    if (IsTemp(loc.reg.GetLow())) {
-      Clobber(loc.reg.GetLow());
-    } else {
-      RegStorage temp_low = AllocTemp();
-      OpRegCopy(temp_low, loc.reg.GetLow());
-      loc.reg.SetLowReg(temp_low.GetReg());
-    }
-    if (IsTemp(loc.reg.GetHigh())) {
-      Clobber(loc.reg.GetHigh());
-    } else {
-      RegStorage temp_high = AllocTemp();
-      OpRegCopy(temp_high, loc.reg.GetHigh());
-      loc.reg.SetHighReg(temp_high.GetReg());
-    }
-  }
-
-  // Ensure that this doesn't represent the original SR any more.
-  loc.s_reg_low = INVALID_SREG;
-  return loc;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/lazy_debug_frame_opcode_writer.cc b/compiler/dex/quick/lazy_debug_frame_opcode_writer.cc
deleted file mode 100644
index 5cfb0ff..0000000
--- a/compiler/dex/quick/lazy_debug_frame_opcode_writer.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "lazy_debug_frame_opcode_writer.h"
-#include "mir_to_lir.h"
-
-namespace art {
-namespace dwarf {
-
-const ArenaVector<uint8_t>* LazyDebugFrameOpCodeWriter::Patch(size_t code_size) {
-  if (!this->enabled_) {
-    DCHECK(this->data()->empty());
-    return this->data();
-  }
-  if (!patched_) {
-    patched_ = true;
-    // Move our data buffer to temporary variable.
-    ArenaVector<uint8_t> old_opcodes(this->opcodes_.get_allocator());
-    old_opcodes.swap(this->opcodes_);
-    // Refill our data buffer with patched opcodes.
-    this->opcodes_.reserve(old_opcodes.size() + advances_.size() + 4);
-    size_t pos = 0;
-    for (auto advance : advances_) {
-      DCHECK_GE(advance.pos, pos);
-      // Copy old data up to the point when advance was issued.
-      this->opcodes_.insert(this->opcodes_.end(),
-                            old_opcodes.begin() + pos,
-                            old_opcodes.begin() + advance.pos);
-      pos = advance.pos;
-      // This may be null if there is no slow-path code after return.
-      LIR* next_lir = NEXT_LIR(advance.last_lir_insn);
-      // Insert the advance command with its final offset.
-      Base::AdvancePC(next_lir != nullptr ? next_lir->offset : code_size);
-    }
-    // Copy the final segment.
-    this->opcodes_.insert(this->opcodes_.end(),
-                          old_opcodes.begin() + pos,
-                          old_opcodes.end());
-    Base::AdvancePC(code_size);
-  }
-  return this->data();
-}
-
-}  // namespace dwarf
-}  // namespace art
diff --git a/compiler/dex/quick/lazy_debug_frame_opcode_writer.h b/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
deleted file mode 100644
index 85050f4..0000000
--- a/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_LAZY_DEBUG_FRAME_OPCODE_WRITER_H_
-#define ART_COMPILER_DEX_QUICK_LAZY_DEBUG_FRAME_OPCODE_WRITER_H_
-
-#include "base/arena_allocator.h"
-#include "base/arena_containers.h"
-#include "debug/dwarf/debug_frame_opcode_writer.h"
-
-namespace art {
-struct LIR;
-namespace dwarf {
-
-// When we are generating the CFI code, we do not know the instuction offsets,
-// this class stores the LIR references and patches the instruction stream later.
-class LazyDebugFrameOpCodeWriter FINAL
-    : public DebugFrameOpCodeWriter<ArenaVector<uint8_t>> {
-  typedef DebugFrameOpCodeWriter<ArenaVector<uint8_t>> Base;
- public:
-  // This method is implicitely called the by opcode writers.
-  virtual void ImplicitlyAdvancePC() OVERRIDE {
-    DCHECK_EQ(patched_, false);
-    DCHECK_EQ(this->current_pc_, 0);
-    advances_.push_back({this->data()->size(), *last_lir_insn_});
-  }
-
-  const ArenaVector<uint8_t>* Patch(size_t code_size);
-
-  LazyDebugFrameOpCodeWriter(LIR** last_lir_insn, bool enable_writes, ArenaAllocator* allocator)
-      : Base(enable_writes, allocator->Adapter()),
-        last_lir_insn_(last_lir_insn),
-        advances_(allocator->Adapter()),
-        patched_(false) {
-  }
-
- private:
-  typedef struct {
-    size_t pos;
-    LIR* last_lir_insn;
-  } Advance;
-
-  using Base::data;  // Hidden. Use Patch method instead.
-
-  LIR** last_lir_insn_;
-  ArenaVector<Advance> advances_;
-  bool patched_;
-
-  DISALLOW_COPY_AND_ASSIGN(LazyDebugFrameOpCodeWriter);
-};
-
-}  // namespace dwarf
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_LAZY_DEBUG_FRAME_OPCODE_WRITER_H_
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
deleted file mode 100644
index 6cdf567..0000000
--- a/compiler/dex/quick/local_optimizations.cc
+++ /dev/null
@@ -1,518 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dex/quick/mir_to_lir-inl.h"
-
-#include "base/logging.h"
-
-namespace art {
-
-#define DEBUG_OPT(X)
-
-#define LOAD_STORE_CHECK_REG_DEP(mask, check) (mask.Intersects(*check->u.m.def_mask))
-
-/* Check RAW, WAR, and RAW dependency on the register operands */
-#define CHECK_REG_DEP(use, def, check) (def.Intersects(*check->u.m.use_mask)) || \
-                                       (use.Union(def).Intersects(*check->u.m.def_mask))
-
-/* Load Store Elimination filter:
- *  - Wide Load/Store
- *  - Exclusive Load/Store
- *  - Quad operand Load/Store
- *  - List Load/Store
- *  - IT blocks
- *  - Branch
- *  - Dmb
- */
-#define LOAD_STORE_FILTER(flags) ((flags & (IS_QUAD_OP|IS_STORE)) == (IS_QUAD_OP|IS_STORE) || \
-                                 (flags & (IS_QUAD_OP|IS_LOAD)) == (IS_QUAD_OP|IS_LOAD) || \
-                                 (flags & REG_USE012) == REG_USE012 || \
-                                 (flags & REG_DEF01) == REG_DEF01 || \
-                                 (flags & REG_DEF_LIST0) || \
-                                 (flags & REG_DEF_LIST1) || \
-                                 (flags & REG_USE_LIST0) || \
-                                 (flags & REG_USE_LIST1) || \
-                                 (flags & REG_DEF_FPCS_LIST0) || \
-                                 (flags & REG_DEF_FPCS_LIST2) || \
-                                 (flags & REG_USE_FPCS_LIST0) || \
-                                 (flags & REG_USE_FPCS_LIST2) || \
-                                 (flags & IS_VOLATILE) || \
-                                 (flags & IS_BRANCH) || \
-                                 (flags & IS_IT))
-
-/* Scheduler heuristics */
-#define MAX_HOIST_DISTANCE 20
-#define LDLD_DISTANCE 4
-#define LD_LATENCY 2
-
-static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) {
-  int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->flags.alias_info);
-  int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->flags.alias_info);
-  int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->flags.alias_info);
-  int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->flags.alias_info);
-
-  return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
-}
-
-/* Convert a more expensive instruction (ie load) into a move */
-void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src) {
-  /* Insert a move to replace the load */
-  LIR* move_lir;
-  move_lir = OpRegCopyNoInsert(dest, src);
-  move_lir->dalvik_offset = orig_lir->dalvik_offset;
-  /*
-   * Insert the converted instruction after the original since the
-   * optimization is scannng in the top-down order and the new instruction
-   * will need to be re-checked (eg the new dest clobbers the src used in
-   * this_lir).
-   */
-  InsertLIRAfter(orig_lir, move_lir);
-}
-
-void Mir2Lir::DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type) {
-  LOG(INFO) << type;
-  LOG(INFO) << "Check LIR:";
-  DumpLIRInsn(check_lir, 0);
-  LOG(INFO) << "This LIR:";
-  DumpLIRInsn(this_lir, 0);
-}
-
-inline void Mir2Lir::EliminateLoad(LIR* lir, int reg_id) {
-  DCHECK(RegStorage::SameRegType(lir->operands[0], reg_id));
-  RegStorage dest_reg, src_reg;
-
-  /* Same Register - Nop */
-  if (lir->operands[0] == reg_id) {
-    NopLIR(lir);
-    return;
-  }
-
-  /* different Regsister - Move + Nop */
-  switch (reg_id & RegStorage::kShapeTypeMask) {
-    case RegStorage::k32BitSolo | RegStorage::kCoreRegister:
-      dest_reg = RegStorage::Solo32(lir->operands[0]);
-      src_reg = RegStorage::Solo32(reg_id);
-      break;
-    case RegStorage::k64BitSolo | RegStorage::kCoreRegister:
-      dest_reg = RegStorage::Solo64(lir->operands[0]);
-      src_reg = RegStorage::Solo64(reg_id);
-      break;
-    case RegStorage::k32BitSolo | RegStorage::kFloatingPoint:
-      dest_reg = RegStorage::FloatSolo32(lir->operands[0]);
-      src_reg = RegStorage::FloatSolo32(reg_id);
-      break;
-    case RegStorage::k64BitSolo | RegStorage::kFloatingPoint:
-      dest_reg = RegStorage::FloatSolo64(lir->operands[0]);
-      src_reg = RegStorage::FloatSolo64(reg_id);
-      break;
-    default:
-      LOG(INFO) << "Load Store: Unsuported register type!";
-      return;
-  }
-  ConvertMemOpIntoMove(lir, dest_reg, src_reg);
-  NopLIR(lir);
-  return;
-}
-
-/*
- * Perform a pass of top-down walk, from the first to the last instruction in the
- * superblock, to eliminate redundant loads and stores.
- *
- * An earlier load can eliminate a later load iff
- *   1) They are must-aliases
- *   2) The native register is not clobbered in between
- *   3) The memory location is not written to in between
- *
- * An earlier store can eliminate a later load iff
- *   1) They are must-aliases
- *   2) The native register is not clobbered in between
- *   3) The memory location is not written to in between
- *
- * An earlier store can eliminate a later store iff
- *   1) They are must-aliases
- *   2) The memory location is not written to in between
- */
-void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
-  LIR* this_lir, *check_lir;
-  std::vector<int> alias_list;
-
-  if (head_lir == tail_lir) {
-    return;
-  }
-
-  for (this_lir = head_lir; this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
-    if (this_lir->flags.is_nop || IsPseudoLirOp(this_lir->opcode)) {
-      continue;
-    }
-
-    uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
-    /* Target LIR - skip if instr is:
-     *  - NOP
-     *  - Branch
-     *  - Load and store
-     *  - Wide load
-     *  - Wide store
-     *  - Exclusive load/store
-     */
-    if (LOAD_STORE_FILTER(target_flags) ||
-        ((target_flags & (IS_LOAD | IS_STORE)) == (IS_LOAD | IS_STORE)) ||
-        !(target_flags & (IS_LOAD | IS_STORE))) {
-      continue;
-    }
-    int native_reg_id = this_lir->operands[0];
-    int dest_reg_id = this_lir->operands[1];
-    bool is_this_lir_load = target_flags & IS_LOAD;
-    ResourceMask this_mem_mask = kEncodeMem.Intersection(this_lir->u.m.use_mask->Union(
-                                                        *this_lir->u.m.def_mask));
-
-    /* Memory region */
-    if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg)) &&
-      (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeHeapRef)))) {
-      continue;
-    }
-
-    /* Does not redefine the address */
-    if (this_lir->u.m.def_mask->Intersects(*this_lir->u.m.use_mask)) {
-      continue;
-    }
-
-    ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
-    ResourceMask stop_use_reg_mask = this_lir->u.m.use_mask->Without(kEncodeMem);
-
-    /* The ARM backend can load/store PC */
-    ResourceMask uses_pc = GetPCUseDefEncoding();
-    if (uses_pc.Intersects(this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask))) {
-      continue;
-    }
-
-    /* Initialize alias list */
-    alias_list.clear();
-    ResourceMask alias_reg_list_mask = kEncodeNone;
-    if (!this_mem_mask.Intersects(kEncodeMem) && !this_mem_mask.Intersects(kEncodeLiteral)) {
-      alias_list.push_back(dest_reg_id);
-      SetupRegMask(&alias_reg_list_mask, dest_reg_id);
-    }
-
-    /* Scan through the BB for posible elimination candidates */
-    for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
-      if (check_lir->flags.is_nop || IsPseudoLirOp(check_lir->opcode)) {
-        continue;
-      }
-
-      if (uses_pc.Intersects(check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask))) {
-        break;
-      }
-
-      ResourceMask check_mem_mask = kEncodeMem.Intersection(check_lir->u.m.use_mask->Union(
-                                                          *check_lir->u.m.def_mask));
-      ResourceMask alias_mem_mask = this_mem_mask.Intersection(check_mem_mask);
-      uint64_t check_flags = GetTargetInstFlags(check_lir->opcode);
-      bool stop_here = false;
-      bool pass_over = false;
-
-      /* Check LIR - skip if instr is:
-       *  - Wide Load
-       *  - Wide Store
-       *  - Branch
-       *  - Dmb
-       *  - Exclusive load/store
-       *  - IT blocks
-       *  - Quad loads
-       */
-      if (LOAD_STORE_FILTER(check_flags)) {
-        stop_here = true;
-        /* Possible alias or result of earlier pass */
-      } else if (check_flags & IS_MOVE) {
-        for (auto &reg : alias_list) {
-          if (RegStorage::RegNum(check_lir->operands[1]) == RegStorage::RegNum(reg)) {
-            pass_over = true;
-            alias_list.push_back(check_lir->operands[0]);
-            SetupRegMask(&alias_reg_list_mask, check_lir->operands[0]);
-          }
-        }
-      /* Memory regions */
-      } else if (!alias_mem_mask.Equals(kEncodeNone)) {
-        DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
-        bool is_check_lir_load = check_flags & IS_LOAD;
-        bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
-
-        if (!alias_mem_mask.Intersects(kEncodeMem) && alias_mem_mask.Equals(kEncodeLiteral)) {
-          DCHECK(check_flags & IS_LOAD);
-          /* Same value && same register type */
-          if (reg_compatible && (this_lir->target == check_lir->target)) {
-            DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LITERAL"));
-            EliminateLoad(check_lir, native_reg_id);
-          }
-        } else if (((alias_mem_mask.Equals(kEncodeDalvikReg)) || (alias_mem_mask.Equals(kEncodeHeapRef))) &&
-                   alias_reg_list_mask.Intersects((check_lir->u.m.use_mask)->Without(kEncodeMem))) {
-          bool same_offset = (GetInstructionOffset(this_lir) == GetInstructionOffset(check_lir));
-          if (same_offset && !is_check_lir_load) {
-            if (check_lir->operands[0] != native_reg_id) {
-              DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "STORE STOP"));
-              stop_here = true;
-              break;
-            }
-          }
-
-          if (reg_compatible && same_offset &&
-              ((is_this_lir_load && is_check_lir_load)  /* LDR - LDR */ ||
-              (!is_this_lir_load && is_check_lir_load)  /* STR - LDR */ ||
-              (!is_this_lir_load && !is_check_lir_load) /* STR - STR */)) {
-            DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LOAD STORE"));
-            EliminateLoad(check_lir, native_reg_id);
-          }
-        } else {
-          /* Unsupported memory region */
-        }
-      }
-
-      if (pass_over) {
-        continue;
-      }
-
-      if (stop_here == false) {
-        bool stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_list_mask, check_lir);
-        if (stop_alias) {
-          /* Scan through alias list and if alias remove from alias list. */
-          for (auto &reg : alias_list) {
-            stop_alias = false;
-            ResourceMask alias_reg_mask = kEncodeNone;
-            SetupRegMask(&alias_reg_mask, reg);
-            stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_mask, check_lir);
-            if (stop_alias) {
-              ClearRegMask(&alias_reg_list_mask, reg);
-              alias_list.erase(std::remove(alias_list.begin(), alias_list.end(),
-                                           reg), alias_list.end());
-            }
-          }
-        }
-        ResourceMask stop_search_mask = stop_def_reg_mask.Union(stop_use_reg_mask);
-        stop_search_mask = stop_search_mask.Union(alias_reg_list_mask);
-        stop_here = LOAD_STORE_CHECK_REG_DEP(stop_search_mask, check_lir);
-        if (stop_here) {
-          break;
-        }
-      } else {
-        break;
-      }
-    }
-  }
-}
-
-/*
- * Perform a pass of bottom-up walk, from the second instruction in the
- * superblock, to try to hoist loads to earlier slots.
- */
-void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
-  LIR* this_lir, *check_lir;
-  /*
-   * Store the list of independent instructions that can be hoisted past.
-   * Will decide the best place to insert later.
-   */
-  LIR* prev_inst_list[MAX_HOIST_DISTANCE];
-
-  /* Empty block */
-  if (head_lir == tail_lir) {
-    return;
-  }
-
-  /* Start from the second instruction */
-  for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
-    if (IsPseudoLirOp(this_lir->opcode)) {
-      continue;
-    }
-
-    uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
-    /* Skip non-interesting instructions */
-    if (!(target_flags & IS_LOAD) ||
-        (this_lir->flags.is_nop == true) ||
-        ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||
-        ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
-      continue;
-    }
-
-    ResourceMask stop_use_all_mask = *this_lir->u.m.use_mask;
-
-    /*
-     * Branches for null/range checks are marked with the true resource
-     * bits, and loads to Dalvik registers, constant pools, and non-alias
-     * locations are safe to be hoisted. So only mark the heap references
-     * conservatively here.
-     *
-     * Note: on x86(-64) and Arm64 this will add kEncodeNone.
-     * TODO: Sanity check. LoadStoreElimination uses kBranchBit to fake a PC.
-     */
-    if (stop_use_all_mask.HasBit(ResourceMask::kHeapRef)) {
-      stop_use_all_mask.SetBits(GetPCUseDefEncoding());
-    }
-
-    /* Similar as above, but just check for pure register dependency */
-    ResourceMask stop_use_reg_mask = stop_use_all_mask.Without(kEncodeMem);
-    ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
-
-    int next_slot = 0;
-    bool stop_here = false;
-
-    /* Try to hoist the load to a good spot */
-    for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) {
-      /*
-       * Skip already dead instructions (whose dataflow information is
-       * outdated and misleading).
-       */
-      if (check_lir->flags.is_nop) {
-        continue;
-      }
-
-      ResourceMask check_mem_mask = check_lir->u.m.def_mask->Intersection(kEncodeMem);
-      ResourceMask alias_condition = stop_use_all_mask.Intersection(check_mem_mask);
-      stop_here = false;
-
-      /* Potential WAR alias seen - check the exact relation */
-      if (!check_mem_mask.Equals(kEncodeMem) && !alias_condition.Equals(kEncodeNone)) {
-        /* We can fully disambiguate Dalvik references */
-        if (alias_condition.Equals(kEncodeDalvikReg)) {
-          /* Must alias or partially overlap */
-          if ((check_lir->flags.alias_info == this_lir->flags.alias_info) ||
-            IsDalvikRegisterClobbered(this_lir, check_lir)) {
-            stop_here = true;
-          }
-        /* Conservatively treat all heap refs as may-alias */
-        } else {
-          DCHECK(alias_condition.Equals(kEncodeHeapRef));
-          stop_here = true;
-        }
-        /* Memory content may be updated. Stop looking now. */
-        if (stop_here) {
-          prev_inst_list[next_slot++] = check_lir;
-          break;
-        }
-      }
-
-      if (stop_here == false) {
-        stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask,
-                     check_lir);
-      }
-
-      /*
-       * Store the dependent or non-pseudo/indepedent instruction to the
-       * list.
-       */
-      if (stop_here || !IsPseudoLirOp(check_lir->opcode)) {
-        prev_inst_list[next_slot++] = check_lir;
-        if (next_slot == MAX_HOIST_DISTANCE) {
-          break;
-        }
-      }
-
-      /* Found a new place to put the load - move it here */
-      if (stop_here == true) {
-        DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "HOIST STOP"));
-        break;
-      }
-    }
-
-    /*
-     * Reached the top - use head_lir as the dependent marker as all labels
-     * are barriers.
-     */
-    if (stop_here == false && next_slot < MAX_HOIST_DISTANCE) {
-      prev_inst_list[next_slot++] = head_lir;
-    }
-
-    /*
-     * At least one independent instruction is found. Scan in the reversed
-     * direction to find a beneficial slot.
-     */
-    if (next_slot >= 2) {
-      int first_slot = next_slot - 2;
-      int slot;
-      LIR* dep_lir = prev_inst_list[next_slot-1];
-      /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
-      if (!IsPseudoLirOp(dep_lir->opcode) &&
-        (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
-        first_slot -= LDLD_DISTANCE;
-      }
-      /*
-       * Make sure we check slot >= 0 since first_slot may be negative
-       * when the loop is first entered.
-       */
-      for (slot = first_slot; slot >= 0; slot--) {
-        LIR* cur_lir = prev_inst_list[slot];
-        LIR* prev_lir = prev_inst_list[slot+1];
-
-        /* Check the highest instruction */
-        if (prev_lir->u.m.def_mask->Equals(kEncodeAll)) {
-          /*
-           * If the first instruction is a load, don't hoist anything
-           * above it since it is unlikely to be beneficial.
-           */
-          if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) {
-            continue;
-          }
-          /*
-           * If the remaining number of slots is less than LD_LATENCY,
-           * insert the hoisted load here.
-           */
-          if (slot < LD_LATENCY) {
-            break;
-          }
-        }
-
-        // Don't look across a barrier label
-        if ((prev_lir->opcode == kPseudoTargetLabel) ||
-            (prev_lir->opcode == kPseudoSafepointPC) ||
-            (prev_lir->opcode == kPseudoBarrier)) {
-          break;
-        }
-
-        /*
-         * Try to find two instructions with load/use dependency until
-         * the remaining instructions are less than LD_LATENCY.
-         */
-        bool prev_is_load = IsPseudoLirOp(prev_lir->opcode) ? false :
-            (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
-        if ((prev_is_load && (cur_lir->u.m.use_mask->Intersects(*prev_lir->u.m.def_mask))) ||
-            (slot < LD_LATENCY)) {
-          break;
-        }
-      }
-
-      /* Found a slot to hoist to */
-      if (slot >= 0) {
-        LIR* cur_lir = prev_inst_list[slot];
-        LIR* prev_lir = PREV_LIR(this_lir);
-        UnlinkLIR(this_lir);
-        /*
-         * Insertion is guaranteed to succeed since check_lir
-         * is never the first LIR on the list
-         */
-        InsertLIRBefore(cur_lir, this_lir);
-        this_lir = prev_lir;  // Continue the loop with the next LIR.
-      }
-    }
-  }
-}
-
-void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) {
-  if (!(cu_->disable_opt & (1 << kLoadStoreElimination))) {
-    ApplyLoadStoreElimination(head_lir, tail_lir);
-  }
-  if (!(cu_->disable_opt & (1 << kLoadHoisting))) {
-    ApplyLoadHoisting(head_lir, tail_lir);
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/mips/README.mips b/compiler/dex/quick/mips/README.mips
deleted file mode 100644
index ff561fa..0000000
--- a/compiler/dex/quick/mips/README.mips
+++ /dev/null
@@ -1,57 +0,0 @@
-               Notes on the Mips target (3/4/2012)
-               -----------------------------------
-
-Testing
-
-The initial implementation of Mips support in the compiler is untested on
-actual hardware, and as such should be expected to have many bugs.  However,
-the vast majority of code for Mips support is either shared with other
-tested targets, or was taken from the functional Mips JIT compiler.  The
-expectation is that when it is first tried out on actual hardware lots of
-small bugs will be flushed out, but it should not take long to get it
-solidly running.  The following areas are considered most likely to have
-problems that need to be addressed:
-
-    o Endianness.  Focus was on little-endian support, and if a big-endian
-      target is desired, you should pay particular attention to the
-      code generation for switch tables, fill array data, 64-bit
-      data handling and the register usage conventions.
-
-    o The memory model.  Verify that GenMemoryBarrier() generates the
-      appropriate flavor of sync.
-
-Register promotion
-
-The resource masks in the LIR structure are 64-bits wide, which is enough
-room to fully describe def/use info for Arm and x86 instructions.  However,
-the larger number of MIPS core and float registers render this too small.
-Currently, the workaround for this limitation is to avoid using floating
-point registers 16-31.  These are the callee-save registers, which therefore
-means that no floating point promotion is allowed.  Among the solution are:
-     o Expand the def/use mask (which, unfortunately, is a significant change)
-     o The Arm target uses 52 of the 64 bits, so we could support float
-       registers 16-27 without much effort.
-     o We could likely assign the 4 non-register bits (kDalvikReg, kLiteral,
-       kHeapRef & kMustNotAlias) to positions occuped by MIPS registers that
-       don't need def/use bits because they are never modified by code
-       subject to scheduling: r_K0, r_K1, r_SP, r_ZERO, r_S1 (rSELF).
-
-Branch delay slots
-
-Little to no attempt was made to fill branch delay slots.  Branch
-instructions in the encoding map are given a length of 8 bytes to include
-an implicit NOP.  It should not be too difficult to provide a slot-filling
-pass following successful assembly, but thought should be given to the
-design.  Branches are currently treated as scheduling barriers.  One
-simple solution would be to copy the instruction at branch targets to the
-slot and adjust the displacement.  However, given that code expansion is
-already a problem it would be preferable to use a more sophisticated
-scheduling solution.
-
-Code expansion
-
-Code expansion for the MIPS target is significantly higher than we see
-for Arm and x86.  It might make sense to replace the inline code generation
-for some of the more verbose Dalik byte codes with subroutine calls to
-shared helper functions.
-
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
deleted file mode 100644
index f9b9684..0000000
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ /dev/null
@@ -1,956 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips.h"
-
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "mips_lir.h"
-
-namespace art {
-
-#define MAX_ASSEMBLER_RETRIES 50
-
-/*
- * opcode: MipsOpCode enum
- * skeleton: pre-designated bit-pattern for this opcode
- * k0: key to applying ds/de
- * ds: dest start bit position
- * de: dest end bit position
- * k1: key to applying s1s/s1e
- * s1s: src1 start bit position
- * s1e: src1 end bit position
- * k2: key to applying s2s/s2e
- * s2s: src2 start bit position
- * s2e: src2 end bit position
- * operands: number of operands (for sanity check purposes)
- * name: mnemonic name
- * fmt: for pretty-printing
- */
-#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
-                     k3, k3s, k3e, flags, name, fmt, size) \
-        {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
-                    {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
-
-/* Instruction dump string format keys: !pf, where "!" is the start
- * of the key, "p" is which numeric operand to use and "f" is the
- * print format.
- *
- * [p]ositions:
- *     0 -> operands[0] (dest)
- *     1 -> operands[1] (src1)
- *     2 -> operands[2] (src2)
- *     3 -> operands[3] (extra)
- *
- * [f]ormats:
- *     h -> 4-digit hex
- *     d -> decimal
- *     E -> decimal*4
- *     F -> decimal*2
- *     c -> branch condition (beq, bne, etc.)
- *     t -> pc-relative target
- *     T -> pc-region target
- *     u -> 1st half of bl[x] target
- *     v -> 2nd half ob bl[x] target
- *     R -> register list
- *     s -> single precision floating point register
- *     S -> double precision floating point register
- *     m -> Thumb2 modified immediate
- *     n -> complimented Thumb2 modified immediate
- *     M -> Thumb2 16-bit zero-extended immediate
- *     b -> 4-digit binary
- *     N -> append a NOP
- *
- *  [!] escape.  To insert "!", use "!!"
- */
-/* NOTE: must be kept in sync with enum MipsOpcode from mips_lir.h */
-/*
- * TUNING: We're currently punting on the branch delay slots.  All branch
- * instructions in this map are given a size of 8, which during assembly
- * is expanded to include a nop.  This scheme should be replaced with
- * an assembler pass to fill those slots when possible.
- */
-const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
-    // The following are common mips32r2, mips32r6 and mips64r6 instructions.
-    ENCODING_MAP(kMips32BitData, 0x00000000,
-                 kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP,
-                 "data", "0x!0h(!0d)", 4),
-    ENCODING_MAP(kMipsAddiu, 0x24000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "addiu", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMipsAddu, 0x00000021,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "addu", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsAnd, 0x00000024,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "and", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsAndi, 0x30000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "andi", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMipsB, 0x10000000,
-                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
-                 "b", "!0t!0N", 8),
-    ENCODING_MAP(kMipsBal, 0x04110000,
-                 kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR |
-                 NEEDS_FIXUP, "bal", "!0t!0N", 8),
-    ENCODING_MAP(kMipsBeq, 0x10000000,
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
-                 NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
-    ENCODING_MAP(kMipsBeqz, 0x10000000,  // Same as beq above with t = $zero.
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
-                 NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
-    ENCODING_MAP(kMipsBgez, 0x04010000,
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
-                 NEEDS_FIXUP, "bgez", "!0r,!1t!0N", 8),
-    ENCODING_MAP(kMipsBgtz, 0x1C000000,
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
-                 NEEDS_FIXUP, "bgtz", "!0r,!1t!0N", 8),
-    ENCODING_MAP(kMipsBlez, 0x18000000,
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
-                 NEEDS_FIXUP, "blez", "!0r,!1t!0N", 8),
-    ENCODING_MAP(kMipsBltz, 0x04000000,
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
-                 NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
-    ENCODING_MAP(kMipsBnez, 0x14000000,  // Same as bne below with t = $zero.
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
-                 NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
-    ENCODING_MAP(kMipsBne, 0x14000000,
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
-                 NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
-    ENCODING_MAP(kMipsExt, 0x7c000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
-                 kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
-                 "ext", "!0r,!1r,!2d,!3D", 4),
-    ENCODING_MAP(kMipsFaddd, 0x46200000,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "add.d", "!0S,!1S,!2S", 4),
-    ENCODING_MAP(kMipsFadds, 0x46000000,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "add.s", "!0s,!1s,!2s", 4),
-    ENCODING_MAP(kMipsFsubd, 0x46200001,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "sub.d", "!0S,!1S,!2S", 4),
-    ENCODING_MAP(kMipsFsubs, 0x46000001,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "sub.s", "!0s,!1s,!2s", 4),
-    ENCODING_MAP(kMipsFdivd, 0x46200003,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "div.d", "!0S,!1S,!2S", 4),
-    ENCODING_MAP(kMipsFdivs, 0x46000003,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "div.s", "!0s,!1s,!2s", 4),
-    ENCODING_MAP(kMipsFmuld, 0x46200002,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mul.d", "!0S,!1S,!2S", 4),
-    ENCODING_MAP(kMipsFmuls, 0x46000002,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mul.s", "!0s,!1s,!2s", 4),
-    ENCODING_MAP(kMipsFcvtsd, 0x46200020,
-                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.s.d", "!0s,!1S", 4),
-    ENCODING_MAP(kMipsFcvtsw, 0x46800020,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.s.w", "!0s,!1s", 4),
-    ENCODING_MAP(kMipsFcvtds, 0x46000021,
-                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.d.s", "!0S,!1s", 4),
-    ENCODING_MAP(kMipsFcvtdw, 0x46800021,
-                 kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.d.w", "!0S,!1s", 4),
-    ENCODING_MAP(kMipsFcvtwd, 0x46200024,
-                 kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.w.d", "!0s,!1S", 4),
-    ENCODING_MAP(kMipsFcvtws, 0x46000024,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "cvt.w.s", "!0s,!1s", 4),
-    ENCODING_MAP(kMipsFmovd, 0x46200006,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "mov.d", "!0S,!1S", 4),
-    ENCODING_MAP(kMipsFmovs, 0x46000006,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "mov.s", "!0s,!1s", 4),
-    ENCODING_MAP(kMipsFnegd, 0x46200007,
-                 kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "neg.d", "!0S,!1S", 4),
-    ENCODING_MAP(kMipsFnegs, 0x46000007,
-                 kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "neg.s", "!0s,!1s", 4),
-    ENCODING_MAP(kMipsFldc1, 0xd4000000,
-                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "ldc1", "!0S,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsFlwc1, 0xc4000000,
-                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "lwc1", "!0s,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsFsdc1, 0xf4000000,
-                 kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
-                 "sdc1", "!0S,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsFswc1, 0xe4000000,
-                 kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
-                 "swc1", "!0s,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsJal, 0x0c000000,
-                 kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
-                 "jal", "!0T(!0E)!0N", 8),
-    ENCODING_MAP(kMipsJalr, 0x00000009,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF0_USE1,
-                 "jalr", "!0r,!1r!0N", 8),
-    ENCODING_MAP(kMipsJr, 0x00000008,
-                 kFmtBitBlt, 25, 21, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
-                 NEEDS_FIXUP, "jr", "!0r!0N", 8),
-    ENCODING_MAP(kMipsLahi, 0x3C000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
-                 "lahi/lui", "!0r,0x!1h(!1d)", 4),
-    ENCODING_MAP(kMipsLalo, 0x34000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "lalo/ori", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMipsLui, 0x3C000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
-                 "lui", "!0r,0x!1h(!1d)", 4),
-    ENCODING_MAP(kMipsLb, 0x80000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "lb", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsLbu, 0x90000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "lbu", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsLh, 0x84000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "lh", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsLhu, 0x94000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "lhu", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsLw, 0x8C000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "lw", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsMove, 0x00000025,  // Or using zero reg.
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "move", "!0r,!1r", 4),
-    ENCODING_MAP(kMipsMfc1, 0x44000000,
-                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "mfc1", "!0r,!1s", 4),
-    ENCODING_MAP(kMipsMtc1, 0x44800000,
-                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
-                 "mtc1", "!0r,!1s", 4),
-    ENCODING_MAP(kMipsMfhc1, 0x44600000,
-                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "mfhc1", "!0r,!1s", 4),
-    ENCODING_MAP(kMipsMthc1, 0x44e00000,
-                 kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
-                 "mthc1", "!0r,!1s", 4),
-    ENCODING_MAP(kMipsNop, 0x00000000,
-                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, NO_OPERAND,
-                 "nop", ";", 4),
-    ENCODING_MAP(kMipsNor, 0x00000027,  // Used for "not" too.
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "nor", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsOr, 0x00000025,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "or", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsOri, 0x34000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "ori", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMipsPref, 0xCC000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE2,
-                 "pref", "!0d,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsSb, 0xA0000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
-                 "sb", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsSeb, 0x7c000420,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "seb", "!0r,!1r", 4),
-    ENCODING_MAP(kMipsSeh, 0x7c000620,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "seh", "!0r,!1r", 4),
-    ENCODING_MAP(kMipsSh, 0xA4000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
-                 "sh", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsSll, 0x00000000,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "sll", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMipsSllv, 0x00000004,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "sllv", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsSlt, 0x0000002a,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "slt", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsSlti, 0x28000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "slti", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMipsSltu, 0x0000002b,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "sltu", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsSra, 0x00000003,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "sra", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMipsSrav, 0x00000007,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "srav", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsSrl, 0x00000002,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "srl", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMipsSrlv, 0x00000006,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "srlv", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsSubu, 0x00000023,  // Used for "neg" too.
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "subu", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsSw, 0xAC000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
-                 "sw", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMipsSync, 0x0000000f,
-                 kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP,
-                 "sync", ";", 4),
-    ENCODING_MAP(kMipsXor, 0x00000026,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "xor", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsXori, 0x38000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "xori", "!0r,!1r,0x!2h(!2d)", 4),
-
-    // The following are mips32r2 instructions.
-    ENCODING_MAP(kMipsR2Div, 0x0000001a,
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
-                 "div", "!0r,!1r", 4),
-    ENCODING_MAP(kMipsR2Mul, 0x70000002,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mul", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsR2Mfhi, 0x00000010,
-                 kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_HI,
-                 "mfhi", "!0r", 4),
-    ENCODING_MAP(kMipsR2Mflo, 0x00000012,
-                 kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_LO,
-                 "mflo", "!0r", 4),
-    ENCODING_MAP(kMipsR2Movz, 0x0000000a,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "movz", "!0r,!1r,!2r", 4),
-
-    // The following are mips32r6 and mips64r6 instructions.
-    ENCODING_MAP(kMipsR6Div, 0x0000009a,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "div", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsR6Mod, 0x000000da,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mod", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMipsR6Mul, 0x00000098,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "mul", "!0r,!1r,!2r", 4),
-
-    // The following are mips64r6 instructions.
-    ENCODING_MAP(kMips64Daddiu, 0x64000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "daddiu", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMips64Daddu, 0x0000002d,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "daddu", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMips64Dahi, 0x04060000,
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
-                 "dahi", "!0r,0x!1h(!1d)", 4),
-    ENCODING_MAP(kMips64Dati, 0x041E0000,
-                 kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
-                 "dati", "!0r,0x!1h(!1d)", 4),
-    ENCODING_MAP(kMips64Daui, 0x74000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "daui", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMips64Ddiv, 0x0000009e,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "ddiv", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMips64Dmod, 0x000000de,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "dmod", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMips64Dmul, 0x0000009c,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "dmul", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMips64Dmfc1, 0x44200000,
-                 kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
-                 "dmfc1", "!0r,!1s", 4),
-    ENCODING_MAP(kMips64Dmtc1, 0x44a00000,
-                 kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
-                 "dmtc1", "!0r,!1s", 4),
-    ENCODING_MAP(kMips64Drotr32, 0x0000003e | (1 << 21),
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "drotr32", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMips64Dsll, 0x00000038,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "dsll", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMips64Dsll32, 0x0000003c,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "dsll32", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMips64Dsrl, 0x0000003a,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "dsrl", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMips64Dsrl32, 0x0000003e,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "dsrl32", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMips64Dsra, 0x0000003b,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "dsra", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMips64Dsra32, 0x0000003f,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
-                 "dsra32", "!0r,!1r,0x!2h(!2d)", 4),
-    ENCODING_MAP(kMips64Dsllv, 0x00000014,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "dsllv", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMips64Dsrlv, 0x00000016,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "dsrlv", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMips64Dsrav, 0x00000017,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "dsrav", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMips64Dsubu, 0x0000002f,
-                 kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
-                 "dsubu", "!0r,!1r,!2r", 4),
-    ENCODING_MAP(kMips64Ld, 0xdc000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "ld", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMips64Lwu, 0x9c000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
-                 "lwu", "!0r,!1d(!2r)", 4),
-    ENCODING_MAP(kMips64Sd, 0xfc000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
-                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
-                 "sd", "!0r,!1d(!2r)", 4),
-
-    // The following are pseudoinstructions.
-    ENCODING_MAP(kMipsDelta, 0x27e00000,  // It is implemented as daddiu for mips64.
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
-                 kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
-                 NEEDS_FIXUP, "addiu", "!0r,ra,0x!1h(!1d)", 4),
-    ENCODING_MAP(kMipsDeltaHi, 0x3C000000,
-                 kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | NEEDS_FIXUP,
-                 "lui", "!0r,0x!1h(!1d)", 4),
-    ENCODING_MAP(kMipsDeltaLo, 0x34000000,
-                 kFmtBlt5_2, 16, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0_USE0 | NEEDS_FIXUP,
-                 "ori", "!0r,!0r,0x!1h(!1d)", 4),
-    ENCODING_MAP(kMipsCurrPC, 0x04110001,
-                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
-                 "addiu", "ra,pc,8", 4),
-    ENCODING_MAP(kMipsUndefined, 0x64000000,
-                 kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
-                 kFmtUnused, -1, -1, NO_OPERAND,
-                 "undefined", "", 4),
-};
-
-
-/*
- * Convert a short-form branch to long form.  Hopefully, this won't happen
- * very often because the PIC sequence is especially unfortunate.
- *
- * Orig conditional branch
- * -----------------------
- *      beq  rs,rt,target
- *
- * Long conditional branch
- * -----------------------
- *      bne  rs,rt,hop
- *      bal  .+8   ; rRA <- anchor
- *      lui  rAT, ((target-anchor) >> 16)
- * anchor:
- *      ori  rAT, rAT, ((target-anchor) & 0xffff)
- *      addu rAT, rAT, rRA
- *      jalr rZERO, rAT
- * hop:
- *
- * Orig unconditional branch
- * -------------------------
- *      b target
- *
- * Long unconditional branch
- * -----------------------
- *      bal  .+8   ; rRA <- anchor
- *      lui  rAT, ((target-anchor) >> 16)
- * anchor:
- *      ori  rAT, rAT, ((target-anchor) & 0xffff)
- *      addu rAT, rAT, rRA
- *      jalr rZERO, rAT
- *
- *
- * NOTE: An out-of-range bal isn't supported because it should
- * never happen with the current PIC model.
- */
-void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
-  // For conditional branches we'll need to reverse the sense
-  bool unconditional = false;
-  int opcode = lir->opcode;
-  int dalvik_offset = lir->dalvik_offset;
-  switch (opcode) {
-    case kMipsBal:
-      LOG(FATAL) << "long branch and link unsupported";
-      UNREACHABLE();
-    case kMipsB:
-      unconditional = true;
-      break;
-    case kMipsBeq:  opcode = kMipsBne; break;
-    case kMipsBne:  opcode = kMipsBeq; break;
-    case kMipsBeqz: opcode = kMipsBnez; break;
-    case kMipsBgez: opcode = kMipsBltz; break;
-    case kMipsBgtz: opcode = kMipsBlez; break;
-    case kMipsBlez: opcode = kMipsBgtz; break;
-    case kMipsBltz: opcode = kMipsBgez; break;
-    case kMipsBnez: opcode = kMipsBeqz; break;
-    default:
-      LOG(FATAL) << "Unexpected branch kind " << opcode;
-      UNREACHABLE();
-  }
-  LIR* hop_target = nullptr;
-  if (!unconditional) {
-    hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
-    LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
-                             lir->operands[1], 0, 0, 0, hop_target);
-    InsertLIRBefore(lir, hop_branch);
-  }
-  LIR* curr_pc = RawLIR(dalvik_offset, kMipsCurrPC);
-  InsertLIRBefore(lir, curr_pc);
-  LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
-  LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, rAT, 0, WrapPointer(anchor), 0, 0,
-                         lir->target);
-  InsertLIRBefore(lir, delta_hi);
-  InsertLIRBefore(lir, anchor);
-  LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, rAT, 0, WrapPointer(anchor), 0, 0,
-                         lir->target);
-  InsertLIRBefore(lir, delta_lo);
-  LIR* addu = RawLIR(dalvik_offset, kMipsAddu, rAT, rAT, rRA);
-  InsertLIRBefore(lir, addu);
-  LIR* jalr = RawLIR(dalvik_offset, kMipsJalr, rZERO, rAT);
-  InsertLIRBefore(lir, jalr);
-  if (!unconditional) {
-    InsertLIRBefore(lir, hop_target);
-  }
-  NopLIR(lir);
-}
-
-/*
- * Assemble the LIR into binary instruction format.  Note that we may
- * discover that pc-relative displacements may not fit the selected
- * instruction.  In those cases we will try to substitute a new code
- * sequence or request that the trace be shortened and retried.
- */
-AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
-  LIR *lir;
-  AssemblerStatus res = kSuccess;  // Assume success.
-
-  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
-    if (lir->opcode < 0) {
-      continue;
-    }
-
-    if (lir->flags.is_nop) {
-      continue;
-    }
-
-    if (lir->flags.fixup != kFixupNone) {
-      if (lir->opcode == kMipsDelta) {
-        /*
-         * The "Delta" pseudo-ops load the difference between
-         * two pc-relative locations into a the target register
-         * found in operands[0].  The delta is determined by
-         * (label2 - label1), where label1 is a standard
-         * kPseudoTargetLabel and is stored in operands[2].
-         * If operands[3] is null, then label2 is a kPseudoTargetLabel
-         * and is found in lir->target.  If operands[3] is non-nullptr,
-         * then it is a Switch/Data table.
-         */
-        int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
-        const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
-        int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
-        int delta = offset2 - offset1;
-        if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
-          // Fits.
-          lir->operands[1] = delta;
-          if (cu_->target64) {
-            LIR *new_addiu = RawLIR(lir->dalvik_offset, kMips64Daddiu, lir->operands[0], rRAd,
-                                    delta);
-            InsertLIRBefore(lir, new_addiu);
-            NopLIR(lir);
-            res = kRetryAll;
-          }
-        } else {
-          // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair.
-          LIR *new_delta_hi = RawLIR(lir->dalvik_offset, kMipsDeltaHi, lir->operands[0], 0,
-                                     lir->operands[2], lir->operands[3], 0, lir->target);
-          InsertLIRBefore(lir, new_delta_hi);
-          LIR *new_delta_lo = RawLIR(lir->dalvik_offset, kMipsDeltaLo, lir->operands[0], 0,
-                                     lir->operands[2], lir->operands[3], 0, lir->target);
-          InsertLIRBefore(lir, new_delta_lo);
-          LIR *new_addu;
-          if (cu_->target64) {
-            new_addu = RawLIR(lir->dalvik_offset, kMips64Daddu, lir->operands[0], lir->operands[0],
-                              rRAd);
-          } else {
-            new_addu = RawLIR(lir->dalvik_offset, kMipsAddu, lir->operands[0], lir->operands[0],
-                              rRA);
-          }
-          InsertLIRBefore(lir, new_addu);
-          NopLIR(lir);
-          res = kRetryAll;
-        }
-      } else if (lir->opcode == kMipsDeltaLo) {
-        int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
-        const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
-        int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
-        int delta = offset2 - offset1;
-        lir->operands[1] = delta & 0xffff;
-      } else if (lir->opcode == kMipsDeltaHi) {
-        int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
-        const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
-        int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
-        int delta = offset2 - offset1;
-        lir->operands[1] = (delta >> 16) & 0xffff;
-      } else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
-        LIR *target_lir = lir->target;
-        CodeOffset pc = lir->offset + 4;
-        CodeOffset target = target_lir->offset;
-        int delta = target - pc;
-        if (delta & 0x3) {
-          LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
-        }
-        if (delta > 131068 || delta < -131069) {
-          res = kRetryAll;
-          ConvertShortToLongBranch(lir);
-        } else {
-          lir->operands[0] = delta >> 2;
-        }
-      } else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
-        LIR *target_lir = lir->target;
-        CodeOffset pc = lir->offset + 4;
-        CodeOffset target = target_lir->offset;
-        int delta = target - pc;
-        if (delta & 0x3) {
-          LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
-        }
-        if (delta > 131068 || delta < -131069) {
-          res = kRetryAll;
-          ConvertShortToLongBranch(lir);
-        } else {
-          lir->operands[1] = delta >> 2;
-        }
-      } else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
-        LIR *target_lir = lir->target;
-        CodeOffset pc = lir->offset + 4;
-        CodeOffset target = target_lir->offset;
-        int delta = target - pc;
-        if (delta & 0x3) {
-          LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
-        }
-        if (delta > 131068 || delta < -131069) {
-          res = kRetryAll;
-          ConvertShortToLongBranch(lir);
-        } else {
-          lir->operands[2] = delta >> 2;
-        }
-      } else if (lir->opcode == kMipsJal) {
-        CodeOffset cur_pc = (start_addr + lir->offset + 4) & ~3;
-        CodeOffset target = lir->operands[0];
-        /* ensure PC-region branch can be used */
-        DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
-        if (target & 0x3) {
-          LOG(FATAL) << "Jump target not multiple of 4: " << target;
-        }
-        lir->operands[0] =  target >> 2;
-      } else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
-        LIR *target_lir = lir->target;
-        CodeOffset target = start_addr + target_lir->offset;
-        lir->operands[1] = target >> 16;
-      } else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
-        LIR *target_lir = lir->target;
-        CodeOffset target = start_addr + target_lir->offset;
-        lir->operands[2] = lir->operands[2] + target;
-      }
-    }
-
-    /*
-     * If one of the pc-relative instructions expanded we'll have
-     * to make another pass.  Don't bother to fully assemble the
-     * instruction.
-     */
-    if (res != kSuccess) {
-      continue;
-    }
-    DCHECK(!IsPseudoLirOp(lir->opcode));
-    const MipsEncodingMap *encoder = &EncodingMap[lir->opcode];
-    uint32_t bits = encoder->skeleton;
-    int i;
-    for (i = 0; i < 4; i++) {
-      uint32_t operand;
-      uint32_t value;
-      operand = lir->operands[i];
-      switch (encoder->field_loc[i].kind) {
-        case kFmtUnused:
-          break;
-        case kFmtBitBlt:
-          if (encoder->field_loc[i].start == 0 && encoder->field_loc[i].end == 31) {
-            value = operand;
-          } else {
-            value = (operand << encoder->field_loc[i].start) &
-                ((1 << (encoder->field_loc[i].end + 1)) - 1);
-          }
-          bits |= value;
-          break;
-        case kFmtBlt5_2:
-          value = (operand & 0x1f);
-          bits |= (value << encoder->field_loc[i].start);
-          bits |= (value << encoder->field_loc[i].end);
-          break;
-        case kFmtDfp: {
-          // TODO: do we need to adjust now that we're using 64BitSolo?
-          DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
-          if (!cu_->target64) {
-            DCHECK_EQ((operand & 0x1), 0U);  // May only use even numbered registers for mips32.
-          }
-          value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
-              ((1 << (encoder->field_loc[i].end + 1)) - 1);
-          bits |= value;
-          break;
-        }
-        case kFmtSfp:
-          DCHECK(RegStorage::IsSingle(operand)) << ", Operand = 0x" << std::hex << operand;
-          value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
-              ((1 << (encoder->field_loc[i].end + 1)) - 1);
-          bits |= value;
-          break;
-        default:
-          LOG(FATAL) << "Bad encoder format: " << encoder->field_loc[i].kind;
-      }
-    }
-    // We only support little-endian MIPS.
-    code_buffer_.push_back(bits & 0xff);
-    code_buffer_.push_back((bits >> 8) & 0xff);
-    code_buffer_.push_back((bits >> 16) & 0xff);
-    code_buffer_.push_back((bits >> 24) & 0xff);
-    // TUNING: replace with proper delay slot handling.
-    if (encoder->size == 8) {
-      DCHECK(!IsPseudoLirOp(lir->opcode));
-      const MipsEncodingMap *encoder2 = &EncodingMap[kMipsNop];
-      uint32_t bits2 = encoder2->skeleton;
-      code_buffer_.push_back(bits2 & 0xff);
-      code_buffer_.push_back((bits2 >> 8) & 0xff);
-      code_buffer_.push_back((bits2 >> 16) & 0xff);
-      code_buffer_.push_back((bits2 >> 24) & 0xff);
-    }
-  }
-  return res;
-}
-
-size_t MipsMir2Lir::GetInsnSize(LIR* lir) {
-  DCHECK(!IsPseudoLirOp(lir->opcode));
-  return EncodingMap[lir->opcode].size;
-}
-
-// LIR offset assignment.
-// TODO: consolidate w/ Arm assembly mechanism.
-int MipsMir2Lir::AssignInsnOffsets() {
-  LIR* lir;
-  int offset = 0;
-
-  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
-    lir->offset = offset;
-    if (LIKELY(lir->opcode >= 0)) {
-      if (!lir->flags.is_nop) {
-        offset += lir->flags.size;
-      }
-    } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
-      if (offset & 0x2) {
-        offset += 2;
-        lir->operands[0] = 1;
-      } else {
-        lir->operands[0] = 0;
-      }
-    }
-    // Pseudo opcodes don't consume space.
-  }
-  return offset;
-}
-
-/*
- * Walk the compilation unit and assign offsets to instructions
- * and literals and compute the total size of the compiled unit.
- * TODO: consolidate w/ Arm assembly mechanism.
- */
-void MipsMir2Lir::AssignOffsets() {
-  int offset = AssignInsnOffsets();
-
-  // Const values have to be word aligned.
-  offset = RoundUp(offset, 4);
-
-  // Set up offsets for literals.
-  data_offset_ = offset;
-
-  offset = AssignLiteralOffset(offset);
-
-  offset = AssignSwitchTablesOffset(offset);
-
-  offset = AssignFillArrayDataOffset(offset);
-
-  total_size_ = offset;
-}
-
-/*
- * Go over each instruction in the list and calculate the offset from the top
- * before sending them off to the assembler. If out-of-range branch distance is
- * seen rearrange the instructions a bit to correct it.
- * TODO: consolidate w/ Arm assembly mechanism.
- */
-void MipsMir2Lir::AssembleLIR() {
-  cu_->NewTimingSplit("Assemble");
-  AssignOffsets();
-  int assembler_retries = 0;
-  /*
-   * Assemble here.  Note that we generate code with optimistic assumptions
-   * and if found now to work, we'll have to redo the sequence and retry.
-   */
-
-  while (true) {
-    AssemblerStatus res = AssembleInstructions(0);
-    if (res == kSuccess) {
-      break;
-    } else {
-      assembler_retries++;
-      if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
-        CodegenDump();
-        LOG(FATAL) << "Assembler error - too many retries";
-      }
-      // Redo offsets and try again.
-      AssignOffsets();
-      code_buffer_.clear();
-    }
-  }
-
-  // Install literals.
-  InstallLiteralPools();
-
-  // Install switch tables.
-  InstallSwitchTables();
-
-  // Install fill array data.
-  InstallFillArrayData();
-
-  // Create the mapping table and native offset to reference map.
-  cu_->NewTimingSplit("PcMappingTable");
-  CreateMappingTables();
-
-  cu_->NewTimingSplit("GcMap");
-  CreateNativeGcMap();
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/mips/backend_mips.h b/compiler/dex/quick/mips/backend_mips.h
deleted file mode 100644
index f65e984..0000000
--- a/compiler/dex/quick/mips/backend_mips.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIPS_BACKEND_MIPS_H_
-#define ART_COMPILER_DEX_QUICK_MIPS_BACKEND_MIPS_H_
-
-namespace art {
-
-struct CompilationUnit;
-class Mir2Lir;
-class MIRGraph;
-class ArenaAllocator;
-
-Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                           ArenaAllocator* const arena);
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_MIPS_BACKEND_MIPS_H_
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
deleted file mode 100644
index 4a736f3d..0000000
--- a/compiler/dex/quick/mips/call_mips.cc
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the Mips ISA */
-
-#include "codegen_mips.h"
-
-#include "art_method.h"
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "gc/accounting/card_table.h"
-#include "mips_lir.h"
-#include "mirror/object_array-inl.h"
-
-namespace art {
-
-bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb ATTRIBUTE_UNUSED,
-                                 MIR* mir ATTRIBUTE_UNUSED,
-                                 const InlineMethod& special ATTRIBUTE_UNUSED) {
-  // TODO
-  return false;
-}
-
-/*
- * The lack of pc-relative loads on Mips presents somewhat of a challenge
- * for our PIC switch table strategy.  To materialize the current location
- * we'll do a dummy JAL and reference our tables using rRA as the
- * base register.  Note that rRA will be used both as the base to
- * locate the switch table data and as the reference base for the switch
- * target offsets stored in the table.  We'll use a special pseudo-instruction
- * to represent the jal and trigger the construction of the
- * switch table offsets (which will happen after final assembly and all
- * labels are fixed).
- *
- * The test loop will look something like:
- *
- *   ori   r_end, rZERO, #table_size  ; size in bytes
- *   jal   BaseLabel         ; stores "return address" (BaseLabel) in rRA
- *   nop                     ; opportunistically fill
- * BaseLabel:
- *   addiu r_base, rRA, <table> - <BaseLabel>    ; table relative to BaseLabel
-     addu  r_end, r_end, r_base                   ; end of table
- *   lw    r_val, [rSP, v_reg_off]                ; Test Value
- * loop:
- *   beq   r_base, r_end, done
- *   lw    r_key, 0(r_base)
- *   addu  r_base, 8
- *   bne   r_val, r_key, loop
- *   lw    r_disp, -4(r_base)
- *   addu  rRA, r_disp
- *   jalr  rZERO, rRA
- * done:
- *
- */
-void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  // Add the table to the list - we'll process it later.
-  SwitchTable* tab_rec =
-      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
-  tab_rec->switch_mir = mir;
-  tab_rec->table = table;
-  tab_rec->vaddr = current_dalvik_offset_;
-  int elements = table[1];
-  switch_tables_.push_back(tab_rec);
-
-  // The table is composed of 8-byte key/disp pairs.
-  int byte_size = elements * 8;
-
-  int size_hi = byte_size >> 16;
-  int size_lo = byte_size & 0xffff;
-
-  RegStorage r_end = AllocPtrSizeTemp();
-  if (size_hi) {
-    NewLIR2(kMipsLui, r_end.GetReg(), size_hi);
-  }
-  // Must prevent code motion for the curr pc pair.
-  GenBarrier();  // Scheduling barrier
-  NewLIR0(kMipsCurrPC);  // Really a jal to .+8.
-  // Now, fill the branch delay slot.
-  if (size_hi) {
-    NewLIR3(kMipsOri, r_end.GetReg(), r_end.GetReg(), size_lo);
-  } else {
-    NewLIR3(kMipsOri, r_end.GetReg(), rZERO, size_lo);
-  }
-  GenBarrier();  // Scheduling barrier.
-
-  // Construct BaseLabel and set up table base register.
-  LIR* base_label = NewLIR0(kPseudoTargetLabel);
-  // Remember base label so offsets can be computed later.
-  tab_rec->anchor = base_label;
-  RegStorage r_base = AllocPtrSizeTemp();
-  NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
-  OpRegRegReg(kOpAdd, r_end, r_end, r_base);
-
-  // Grab switch test value.
-  rl_src = LoadValue(rl_src, kCoreReg);
-
-  // Test loop.
-  RegStorage r_key = AllocTemp();
-  LIR* loop_label = NewLIR0(kPseudoTargetLabel);
-  LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, nullptr);
-  Load32Disp(r_base, 0, r_key);
-  OpRegImm(kOpAdd, r_base, 8);
-  OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
-  RegStorage r_disp = AllocTemp();
-  Load32Disp(r_base, -4, r_disp);
-  const RegStorage rs_ra = TargetPtrReg(kLr);
-  OpRegRegReg(kOpAdd, rs_ra, rs_ra, r_disp);
-  OpReg(kOpBx, rs_ra);
-  // Loop exit.
-  LIR* exit_label = NewLIR0(kPseudoTargetLabel);
-  exit_branch->target = exit_label;
-}
-
-/*
- * Code pattern will look something like:
- *
- *   lw    r_val
- *   jal   BaseLabel         ; stores "return address" (BaseLabel) in rRA
- *   nop                     ; opportunistically fill
- *   [subiu r_val, bias]      ; Remove bias if low_val != 0
- *   bound check -> done
- *   lw    r_disp, [rRA, r_val]
- *   addu  rRA, r_disp
- *   jalr  rZERO, rRA
- * done:
- */
-void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  // Add the table to the list - we'll process it later.
-  SwitchTable* tab_rec =
-      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
-  tab_rec->switch_mir = mir;
-  tab_rec->table = table;
-  tab_rec->vaddr = current_dalvik_offset_;
-  int size = table[1];
-  switch_tables_.push_back(tab_rec);
-
-  // Get the switch value.
-  rl_src = LoadValue(rl_src, kCoreReg);
-
-  // Prepare the bias.  If too big, handle 1st stage here.
-  int low_key = s4FromSwitchData(&table[2]);
-  bool large_bias = false;
-  RegStorage r_key;
-  if (low_key == 0) {
-    r_key = rl_src.reg;
-  } else if ((low_key & 0xffff) != low_key) {
-    r_key = AllocTemp();
-    LoadConstant(r_key, low_key);
-    large_bias = true;
-  } else {
-    r_key = AllocTemp();
-  }
-
-  // Must prevent code motion for the curr pc pair.
-  GenBarrier();
-  NewLIR0(kMipsCurrPC);  // Really a jal to .+8.
-  // Now, fill the branch delay slot with bias strip.
-  if (low_key == 0) {
-    NewLIR0(kMipsNop);
-  } else {
-    if (large_bias) {
-      OpRegRegReg(kOpSub, r_key, rl_src.reg, r_key);
-    } else {
-      OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key);
-    }
-  }
-  GenBarrier();  // Scheduling barrier.
-
-  // Construct BaseLabel and set up table base register.
-  LIR* base_label = NewLIR0(kPseudoTargetLabel);
-  // Remember base label so offsets can be computed later.
-  tab_rec->anchor = base_label;
-
-  // Bounds check - if < 0 or >= size continue following switch.
-  LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, nullptr);
-
-  // Materialize the table base pointer.
-  RegStorage r_base = AllocPtrSizeTemp();
-  NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
-
-  // Load the displacement from the switch table.
-  RegStorage r_disp = AllocTemp();
-  LoadBaseIndexed(r_base, r_key, r_disp, 2, k32);
-
-  // Add to rRA and go.
-  const RegStorage rs_ra = TargetPtrReg(kLr);
-  OpRegRegReg(kOpAdd, rs_ra, rs_ra, r_disp);
-  OpReg(kOpBx, rs_ra);
-
-  // Branch_over target here.
-  LIR* target = NewLIR0(kPseudoTargetLabel);
-  branch_over->target = target;
-}
-
-void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
-  int ex_offset = cu_->target64 ? Thread::ExceptionOffset<8>().Int32Value() :
-      Thread::ExceptionOffset<4>().Int32Value();
-  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
-  RegStorage reset_reg = AllocTempRef();
-  LoadRefDisp(TargetPtrReg(kSelf), ex_offset, rl_result.reg, kNotVolatile);
-  LoadConstant(reset_reg, 0);
-  StoreRefDisp(TargetPtrReg(kSelf), ex_offset, reset_reg, kNotVolatile);
-  FreeTemp(reset_reg);
-  StoreValue(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
-  RegStorage reg_card_base = AllocPtrSizeTemp();
-  RegStorage reg_card_no = AllocPtrSizeTemp();
-  if (cu_->target64) {
-    // NOTE: native pointer.
-    LoadWordDisp(TargetPtrReg(kSelf), Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
-    OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
-    StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 0, kUnsignedByte);
-  } else {
-    // NOTE: native pointer.
-    LoadWordDisp(TargetPtrReg(kSelf), Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
-    OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
-    StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
-  }
-  FreeTemp(reg_card_base);
-  FreeTemp(reg_card_no);
-}
-
-static dwarf::Reg DwarfCoreReg(int num) {
-  return dwarf::Reg::MipsCore(num);
-}
-
-void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
-  DCHECK_EQ(cfi_.GetCurrentCFAOffset(), 0);
-  int spill_count = num_core_spills_ + num_fp_spills_;
-  /*
-   * On entry, A0, A1, A2 & A3 are live. On Mips64, A4, A5, A6 & A7 are also live.
-   * Let the register allocation mechanism know so it doesn't try to use any of them when
-   * expanding the frame or flushing.
-   */
-  const RegStorage arg0 = TargetReg(kArg0);
-  const RegStorage arg1 = TargetReg(kArg1);
-  const RegStorage arg2 = TargetReg(kArg2);
-  const RegStorage arg3 = TargetReg(kArg3);
-  const RegStorage arg4 = TargetReg(kArg4);
-  const RegStorage arg5 = TargetReg(kArg5);
-  const RegStorage arg6 = TargetReg(kArg6);
-  const RegStorage arg7 = TargetReg(kArg7);
-
-  LockTemp(arg0);
-  LockTemp(arg1);
-  LockTemp(arg2);
-  LockTemp(arg3);
-  if (cu_->target64) {
-    LockTemp(arg4);
-    LockTemp(arg5);
-    LockTemp(arg6);
-    LockTemp(arg7);
-  }
-
-  bool skip_overflow_check;
-  InstructionSet target = (cu_->target64) ? kMips64 : kMips;
-  int ptr_size = cu_->target64 ? 8 : 4;
-
-  /*
-   * We can safely skip the stack overflow check if we're
-   * a leaf *and* our frame size < fudge factor.
-   */
-
-  skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, target);
-  RegStorage check_reg = AllocPtrSizeTemp();
-  RegStorage new_sp = AllocPtrSizeTemp();
-  const RegStorage rs_sp = TargetPtrReg(kSp);
-  const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(target);
-  const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
-  bool generate_explicit_stack_overflow_check = large_frame ||
-    !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks();
-
-  if (!skip_overflow_check) {
-    if (generate_explicit_stack_overflow_check) {
-      // Load stack limit.
-      if (cu_->target64) {
-        LoadWordDisp(TargetPtrReg(kSelf), Thread::StackEndOffset<8>().Int32Value(), check_reg);
-      } else {
-        Load32Disp(TargetPtrReg(kSelf), Thread::StackEndOffset<4>().Int32Value(), check_reg);
-      }
-    } else {
-      // Implicit stack overflow check.
-      // Generate a load from [sp, #-overflowsize].  If this is in the stack
-      // redzone we will get a segmentation fault.
-      Load32Disp(rs_sp, -kStackOverflowReservedUsableBytes, rs_rZERO);
-      MarkPossibleStackOverflowException();
-    }
-  }
-  // Spill core callee saves.
-  SpillCoreRegs();
-  // NOTE: promotion of FP regs currently unsupported, thus no FP spill.
-  DCHECK_EQ(num_fp_spills_, 0);
-  const int frame_sub = frame_size_ - spill_count * ptr_size;
-  if (!skip_overflow_check && generate_explicit_stack_overflow_check) {
-    class StackOverflowSlowPath : public LIRSlowPath {
-     public:
-      StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
-          : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) {
-      }
-      void Compile() OVERRIDE {
-        m2l_->ResetRegPool();
-        m2l_->ResetDefTracking();
-        GenerateTargetLabel(kPseudoThrowTarget);
-        // RA is offset 0 since we push in reverse order.
-        m2l_->LoadWordDisp(m2l_->TargetPtrReg(kSp), 0, m2l_->TargetPtrReg(kLr));
-        m2l_->OpRegImm(kOpAdd, m2l_->TargetPtrReg(kSp), sp_displace_);
-        m2l_->cfi().AdjustCFAOffset(-sp_displace_);
-        m2l_->ClobberCallerSave();
-        RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow);  // Doesn't clobber LR.
-        m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
-                         false /* UseLink */);
-        m2l_->cfi().AdjustCFAOffset(sp_displace_);
-      }
-
-     private:
-      const size_t sp_displace_;
-    };
-    OpRegRegImm(kOpSub, new_sp, rs_sp, frame_sub);
-    LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
-    AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * ptr_size));
-    // TODO: avoid copy for small frame sizes.
-    OpRegCopy(rs_sp, new_sp);  // Establish stack.
-    cfi_.AdjustCFAOffset(frame_sub);
-  } else {
-    // Here if skip_overflow_check or doing implicit stack overflow check.
-    // Just make room on the stack for the frame now.
-    OpRegImm(kOpSub, rs_sp, frame_sub);
-    cfi_.AdjustCFAOffset(frame_sub);
-  }
-
-  FlushIns(ArgLocs, rl_method);
-
-  FreeTemp(arg0);
-  FreeTemp(arg1);
-  FreeTemp(arg2);
-  FreeTemp(arg3);
-  if (cu_->target64) {
-    FreeTemp(arg4);
-    FreeTemp(arg5);
-    FreeTemp(arg6);
-    FreeTemp(arg7);
-  }
-}
-
-void MipsMir2Lir::GenExitSequence() {
-  cfi_.RememberState();
-  /*
-   * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
-   * allocated by the register utilities as temps.
-   */
-  LockTemp(TargetPtrReg(kRet0));
-  LockTemp(TargetPtrReg(kRet1));
-
-  UnSpillCoreRegs();
-  OpReg(kOpBx, TargetPtrReg(kLr));
-  // The CFI should be restored for any code that follows the exit block.
-  cfi_.RestoreState();
-  cfi_.DefCFAOffset(frame_size_);
-}
-
-void MipsMir2Lir::GenSpecialExitSequence() {
-  OpReg(kOpBx, TargetPtrReg(kLr));
-}
-
-void MipsMir2Lir::GenSpecialEntryForSuspend() {
-  // Keep 16-byte stack alignment - push A0, i.e. ArtMethod*, 2 filler words and RA for mips32,
-  // but A0 and RA for mips64.
-  core_spill_mask_ = (1u << TargetPtrReg(kLr).GetRegNum());
-  num_core_spills_ = 1u;
-  fp_spill_mask_ = 0u;
-  num_fp_spills_ = 0u;
-  frame_size_ = 16u;
-  core_vmap_table_.clear();
-  fp_vmap_table_.clear();
-  const RegStorage rs_sp = TargetPtrReg(kSp);
-  OpRegImm(kOpSub, rs_sp, frame_size_);
-  cfi_.AdjustCFAOffset(frame_size_);
-  StoreWordDisp(rs_sp, frame_size_ - (cu_->target64 ? 8 : 4), TargetPtrReg(kLr));
-  cfi_.RelOffset(DwarfCoreReg(rRA), frame_size_ - (cu_->target64 ? 8 : 4));
-  StoreWordDisp(rs_sp, 0, TargetPtrReg(kArg0));
-  // Do not generate CFI for scratch register A0.
-}
-
-void MipsMir2Lir::GenSpecialExitForSuspend() {
-  // Pop the frame. Don't pop ArtMethod*, it's no longer needed.
-  const RegStorage rs_sp = TargetPtrReg(kSp);
-  LoadWordDisp(rs_sp, frame_size_ - (cu_->target64 ? 8 : 4), TargetPtrReg(kLr));
-  cfi_.Restore(DwarfCoreReg(rRA));
-  OpRegImm(kOpAdd, rs_sp, frame_size_);
-  cfi_.AdjustCFAOffset(-frame_size_);
-}
-
-/*
- * Bit of a hack here - in the absence of a real scheduling pass,
- * emit the next instruction in static & direct invoke sequences.
- */
-int MipsMir2Lir::MipsNextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state,
-                                    const MethodReference& target_method, uint32_t,
-                                    uintptr_t direct_code, uintptr_t direct_method,
-                                    InvokeType type) {
-  MipsMir2Lir* cg = static_cast<MipsMir2Lir*>(cu->cg.get());
-  if (info->string_init_offset != 0) {
-    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
-    switch (state) {
-    case 0: {  // Grab target method* from thread pointer
-      cg->LoadWordDisp(cg->TargetPtrReg(kSelf), info->string_init_offset, arg0_ref);
-      break;
-    }
-    case 1:  // Grab the code from the method*
-      if (direct_code == 0) {
-        int32_t offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-            InstructionSetPointerSize(cu->instruction_set)).Int32Value();
-        cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
-      }
-      break;
-    default:
-      return -1;
-    }
-  } else if (direct_code != 0 && direct_method != 0) {
-    switch (state) {
-      case 0:  // Get the current Method* [sets kArg0]
-        if (direct_code != static_cast<uintptr_t>(-1)) {
-          if (cu->target64) {
-            cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
-          } else {
-            cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
-          }
-        } else {
-          cg->LoadCodeAddress(target_method, type, kInvokeTgt);
-        }
-        if (direct_method != static_cast<uintptr_t>(-1)) {
-          if (cu->target64) {
-            cg->LoadConstantWide(cg->TargetReg(kArg0, kRef), direct_method);
-          } else {
-            cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
-          }
-        } else {
-          cg->LoadMethodAddress(target_method, type, kArg0);
-        }
-        break;
-      default:
-        return -1;
-    }
-  } else {
-    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
-    switch (state) {
-      case 0:  // Get the current Method* [sets kArg0]
-        // TUNING: we can save a reg copy if Method* has been promoted.
-        cg->LoadCurrMethodDirect(arg0_ref);
-        break;
-      case 1:  // Get method->dex_cache_resolved_methods_
-        cg->LoadBaseDisp(arg0_ref,
-                         ArtMethod::DexCacheResolvedMethodsOffset(
-                             cu->target64 ? kMips64PointerSize : kMipsPointerSize).Int32Value(),
-                         arg0_ref,
-                         cu->target64 ? k64 : k32,
-                         kNotVolatile);
-        // Set up direct code if known.
-        if (direct_code != 0) {
-          if (direct_code != static_cast<uintptr_t>(-1)) {
-            if (cu->target64) {
-              cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
-            } else {
-              cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
-            }
-          } else {
-            CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
-            cg->LoadCodeAddress(target_method, type, kInvokeTgt);
-          }
-        }
-        break;
-      case 2: {
-        // Grab target method*
-        CHECK_EQ(cu->dex_file, target_method.dex_file);
-        const size_t pointer_size = GetInstructionSetPointerSize(cu->instruction_set);
-        cg->LoadWordDisp(arg0_ref,
-                         cg->GetCachePointerOffset(target_method.dex_method_index,
-                                                   pointer_size),
-                         arg0_ref);
-        break;
-      }
-      case 3:  // Grab the code from the method*
-        if (direct_code == 0) {
-          int32_t offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-              InstructionSetPointerSize(cu->instruction_set)).Int32Value();
-          // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
-          cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
-        }
-        break;
-      default:
-        return -1;
-    }
-  }
-  return state + 1;
-}
-
-NextCallInsn MipsMir2Lir::GetNextSDCallInsn() {
-  return MipsNextSDCallInsn;
-}
-
-LIR* MipsMir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info ATTRIBUTE_UNUSED) {
-  return OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
deleted file mode 100644
index 378b9a0..0000000
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIPS_CODEGEN_MIPS_H_
-#define ART_COMPILER_DEX_QUICK_MIPS_CODEGEN_MIPS_H_
-
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir.h"
-#include "mips_lir.h"
-
-namespace art {
-
-struct CompilationUnit;
-
-class MipsMir2Lir FINAL : public Mir2Lir {
- protected:
-  class InToRegStorageMipsMapper : public InToRegStorageMapper {
-   public:
-    explicit InToRegStorageMipsMapper(Mir2Lir* m2l) : m2l_(m2l), cur_core_reg_(0), cur_fpu_reg_(0)
-        {}
-    virtual RegStorage GetNextReg(ShortyArg arg);
-    virtual void Reset() OVERRIDE {
-      cur_core_reg_ = 0;
-      cur_fpu_reg_ = 0;
-    }
-   protected:
-    Mir2Lir* m2l_;
-   private:
-    size_t cur_core_reg_;
-    size_t cur_fpu_reg_;
-  };
-
-  class InToRegStorageMips64Mapper : public InToRegStorageMapper {
-   public:
-    explicit InToRegStorageMips64Mapper(Mir2Lir* m2l) : m2l_(m2l), cur_arg_reg_(0) {}
-    virtual RegStorage GetNextReg(ShortyArg arg);
-    virtual void Reset() OVERRIDE {
-      cur_arg_reg_ = 0;
-    }
-   protected:
-    Mir2Lir* m2l_;
-   private:
-    size_t cur_arg_reg_;
-  };
-
-  InToRegStorageMips64Mapper in_to_reg_storage_mips64_mapper_;
-  InToRegStorageMipsMapper in_to_reg_storage_mips_mapper_;
-  InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
-    InToRegStorageMapper* res;
-    if (cu_->target64) {
-      res = &in_to_reg_storage_mips64_mapper_;
-    } else {
-      res = &in_to_reg_storage_mips_mapper_;
-    }
-    res->Reset();
-    return res;
-  }
-
- public:
-  MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
-
-  // Required for target - codegen utilities.
-  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                          RegLocation rl_dest, int lit);
-  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-  void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, int32_t constant)
-  OVERRIDE;
-  void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, int64_t constant)
-  OVERRIDE;
-  LIR* CheckSuspendUsingLoad() OVERRIDE;
-  RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
-  void ForceImplicitNullCheck(RegStorage reg, int opt_flags, bool is_wide);
-  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
-                    VolatileKind is_volatile) OVERRIDE;
-  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
-                       OpSize size) OVERRIDE;
-  LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
-  LIR* LoadConstantWideNoClobber(RegStorage r_dest, int64_t value);
-  LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
-  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
-                     VolatileKind is_volatile) OVERRIDE;
-  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
-                        OpSize size) OVERRIDE;
-  LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
-  LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
-
-  /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
-  void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
-
-  // Required for target - register utilities.
-  RegStorage Solo64ToPair64(RegStorage reg);
-  RegStorage Fp64ToSolo32(RegStorage reg);
-  RegStorage TargetReg(SpecialTargetRegister reg);
-  RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE;
-  RegStorage TargetPtrReg(SpecialTargetRegister reg) OVERRIDE {
-    return TargetReg(reg, cu_->target64 ? kWide : kNotWide);
-  }
-  RegLocation GetReturnAlt();
-  RegLocation GetReturnWideAlt();
-  RegLocation LocCReturn();
-  RegLocation LocCReturnRef();
-  RegLocation LocCReturnDouble();
-  RegLocation LocCReturnFloat();
-  RegLocation LocCReturnWide();
-  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
-  void AdjustSpillMask();
-  void ClobberCallerSave();
-  void FreeCallTemps();
-  void LockCallTemps();
-  void CompilerInitializeRegAlloc();
-
-  // Required for target - miscellaneous.
-  void AssembleLIR();
-  int AssignInsnOffsets();
-  void AssignOffsets();
-  AssemblerStatus AssembleInstructions(CodeOffset start_addr);
-  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
-  void SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
-                                ResourceMask* def_mask) OVERRIDE;
-  const char* GetTargetInstFmt(int opcode);
-  const char* GetTargetInstName(int opcode);
-  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
-  uint64_t GetTargetInstFlags(int opcode);
-  size_t GetInsnSize(LIR* lir) OVERRIDE;
-  bool IsUnconditionalBranch(LIR* lir);
-
-  // Get the register class for load/store of a field.
-  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
-
-  // Required for target - Dalvik-level generators.
-  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                      RegLocation lr_shift);
-  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_src2, int flags);
-  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
-                   RegLocation rl_dest, int scale);
-  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
-                   RegLocation rl_src, int scale, bool card_mark);
-  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_shift, int flags);
-  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                        RegLocation rl_src2);
-  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2);
-  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                RegLocation rl_src2);
-  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
-  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
-  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
-  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
-  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
-  bool GenInlinedSqrt(CallInfo* info);
-  bool GenInlinedPeek(CallInfo* info, OpSize size);
-  bool GenInlinedPoke(CallInfo* info, OpSize size);
-  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
-  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                      RegLocation rl_src2, int flags) OVERRIDE;
-  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
-  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
-  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-  void GenDivZeroCheckWide(RegStorage reg);
-  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
-  void GenExitSequence();
-  void GenSpecialExitSequence() OVERRIDE;
-  void GenSpecialEntryForSuspend() OVERRIDE;
-  void GenSpecialExitForSuspend() OVERRIDE;
-  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
-  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
-  void GenSelect(BasicBlock* bb, MIR* mir);
-  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                        RegisterClass dest_reg_class) OVERRIDE;
-  bool GenMemBarrier(MemBarrierKind barrier_kind);
-  void GenMoveException(RegLocation rl_dest);
-  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                     int first_bit, int second_bit);
-  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
-  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
-  void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
-  void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
-  bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
-
-  // Required for target - single operation generators.
-  LIR* OpUnconditionalBranch(LIR* target);
-  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
-  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
-  LIR* OpCondBranch(ConditionCode cc, LIR* target);
-  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
-  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
-  LIR* OpIT(ConditionCode cond, const char* guide);
-  void OpEndIT(LIR* it);
-  LIR* OpMem(OpKind op, RegStorage r_base, int disp);
-  void OpPcRelLoad(RegStorage reg, LIR* target);
-  LIR* OpReg(OpKind op, RegStorage r_dest_src);
-  void OpRegCopy(RegStorage r_dest, RegStorage r_src);
-  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
-  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
-  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
-  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
-  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
-  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
-  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
-  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
-  LIR* OpTestSuspend(LIR* target);
-  LIR* OpVldm(RegStorage r_base, int count);
-  LIR* OpVstm(RegStorage r_base, int count);
-  void OpRegCopyWide(RegStorage dest, RegStorage src);
-
-  // TODO: collapse r_dest.
-  LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
-  // TODO: collapse r_src.
-  LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
-  void SpillCoreRegs();
-  void UnSpillCoreRegs();
-  static const MipsEncodingMap EncodingMap[kMipsLast];
-  bool InexpensiveConstantInt(int32_t value);
-  bool InexpensiveConstantFloat(int32_t value);
-  bool InexpensiveConstantLong(int64_t value);
-  bool InexpensiveConstantDouble(int64_t value);
-
-  bool WideGPRsAreAliases() const OVERRIDE {
-    return cu_->target64;  // Wide GPRs are formed by pairing on mips32.
-  }
-  bool WideFPRsAreAliases() const OVERRIDE {
-    return cu_->target64;  // Wide FPRs are formed by pairing on mips32.
-  }
-
-  LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
-
-  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_div,
-                        int flags) OVERRIDE;
-  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) OVERRIDE;
-  NextCallInsn GetNextSDCallInsn() OVERRIDE;
-  LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
-
-  // Unimplemented intrinsics.
-  bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
-    return false;
-  }
-  bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
-    return false;
-  }
-  bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
-    return false;
-  }
-  bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
-  OVERRIDE {
-    return false;
-  }
-
-  // True if isa is rev R6.
-  const bool isaIsR6_;
-
-  // True if floating point unit is 32bits.
-  const bool fpuIs32Bit_;
-
- private:
-  static int MipsNextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state,
-                                const MethodReference& target_method, uint32_t,
-                                uintptr_t direct_code, uintptr_t direct_method,
-                                InvokeType type);
-
-  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-  void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-  void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-
-  void ConvertShortToLongBranch(LIR* lir);
-
-  // Mips64 specific long gen methods:
-  void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-  void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-  void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                     RegLocation rl_src2, bool is_div, int flags);
-  void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
-                         RegisterClass reg_class);
-  RegStorage AllocPtrSizeTemp(bool required = true);
-
-  /**
-   * @param reg #RegStorage containing a Solo64 input register (e.g. @c a1 or @c d0).
-   * @return A Solo32 with the same register number as the @p reg (e.g. @c a1 or @c f0).
-   * @see As64BitReg
-   */
-  RegStorage As32BitReg(RegStorage reg) {
-    DCHECK(!reg.IsPair());
-    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
-      if (kFailOnSizeError) {
-        LOG(FATAL) << "Expected 64b register";
-      } else {
-        LOG(WARNING) << "Expected 64b register";
-        return reg;
-      }
-    }
-    RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
-                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
-    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
-              ->GetReg().GetReg(),
-              ret_val.GetReg());
-    return ret_val;
-  }
-
-  /**
-   * @param reg #RegStorage containing a Solo32 input register (e.g. @c a1 or @c f0).
-   * @return A Solo64 with the same register number as the @p reg (e.g. @c a1 or @c d0).
-   */
-  RegStorage As64BitReg(RegStorage reg) {
-    DCHECK(!reg.IsPair());
-    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
-      if (kFailOnSizeError) {
-        LOG(FATAL) << "Expected 32b register";
-      } else {
-        LOG(WARNING) << "Expected 32b register";
-        return reg;
-      }
-    }
-    RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
-                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
-    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
-              ->GetReg().GetReg(),
-              ret_val.GetReg());
-    return ret_val;
-  }
-
-  RegStorage Check64BitReg(RegStorage reg) {
-    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
-      if (kFailOnSizeError) {
-        LOG(FATAL) << "Checked for 64b register";
-      } else {
-        LOG(WARNING) << "Checked for 64b register";
-        return As64BitReg(reg);
-      }
-    }
-    return reg;
-  }
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_MIPS_CODEGEN_MIPS_H_
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
deleted file mode 100644
index 52706df..0000000
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips.h"
-
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "mips_lir.h"
-
-namespace art {
-
-void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
-                                  RegLocation rl_src1, RegLocation rl_src2) {
-  int op = kMipsNop;
-  RegLocation rl_result;
-
-  /*
-   * Don't attempt to optimize register usage since these opcodes call out to
-   * the handlers.
-   */
-  switch (opcode) {
-    case Instruction::ADD_FLOAT_2ADDR:
-    case Instruction::ADD_FLOAT:
-      op = kMipsFadds;
-      break;
-    case Instruction::SUB_FLOAT_2ADDR:
-    case Instruction::SUB_FLOAT:
-      op = kMipsFsubs;
-      break;
-    case Instruction::DIV_FLOAT_2ADDR:
-    case Instruction::DIV_FLOAT:
-      op = kMipsFdivs;
-      break;
-    case Instruction::MUL_FLOAT_2ADDR:
-    case Instruction::MUL_FLOAT:
-      op = kMipsFmuls;
-      break;
-    case Instruction::REM_FLOAT_2ADDR:
-    case Instruction::REM_FLOAT:
-      FlushAllRegs();   // Send everything to home location.
-      CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
-      rl_result = GetReturn(kFPReg);
-      StoreValue(rl_dest, rl_result);
-      return;
-    case Instruction::NEG_FLOAT:
-      GenNegFloat(rl_dest, rl_src1);
-      return;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-  rl_src1 = LoadValue(rl_src1, kFPReg);
-  rl_src2 = LoadValue(rl_src2, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  StoreValue(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
-                                   RegLocation rl_src1, RegLocation rl_src2) {
-  int op = kMipsNop;
-  RegLocation rl_result;
-
-  switch (opcode) {
-    case Instruction::ADD_DOUBLE_2ADDR:
-    case Instruction::ADD_DOUBLE:
-      op = kMipsFaddd;
-      break;
-    case Instruction::SUB_DOUBLE_2ADDR:
-    case Instruction::SUB_DOUBLE:
-      op = kMipsFsubd;
-      break;
-    case Instruction::DIV_DOUBLE_2ADDR:
-    case Instruction::DIV_DOUBLE:
-      op = kMipsFdivd;
-      break;
-    case Instruction::MUL_DOUBLE_2ADDR:
-    case Instruction::MUL_DOUBLE:
-      op = kMipsFmuld;
-      break;
-    case Instruction::REM_DOUBLE_2ADDR:
-    case Instruction::REM_DOUBLE:
-      FlushAllRegs();   // Send everything to home location.
-      CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
-      rl_result = GetReturnWide(kFPReg);
-      StoreValueWide(rl_dest, rl_result);
-      return;
-    case Instruction::NEG_DOUBLE:
-      GenNegDouble(rl_dest, rl_src1);
-      return;
-    default:
-      LOG(FATAL) << "Unpexpected opcode: " << opcode;
-  }
-  rl_src1 = LoadValueWide(rl_src1, kFPReg);
-  DCHECK(rl_src1.wide);
-  rl_src2 = LoadValueWide(rl_src2, kFPReg);
-  DCHECK(rl_src2.wide);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  DCHECK(rl_dest.wide);
-  DCHECK(rl_result.wide);
-  NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                             RegLocation rl_src1 ATTRIBUTE_UNUSED,
-                                             int32_t constant ATTRIBUTE_UNUSED) {
-  // TODO: need mips implementation.
-  LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips";
-}
-
-void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                              RegLocation rl_src1 ATTRIBUTE_UNUSED,
-                                              int64_t constant ATTRIBUTE_UNUSED) {
-  // TODO: need mips implementation.
-  LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips";
-}
-
-void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
-                                RegLocation rl_src) {
-  int op = kMipsNop;
-  RegLocation rl_result;
-  switch (opcode) {
-    case Instruction::INT_TO_FLOAT:
-      op = kMipsFcvtsw;
-      break;
-    case Instruction::DOUBLE_TO_FLOAT:
-      op = kMipsFcvtsd;
-      break;
-    case Instruction::FLOAT_TO_DOUBLE:
-      op = kMipsFcvtds;
-      break;
-    case Instruction::INT_TO_DOUBLE:
-      op = kMipsFcvtdw;
-      break;
-    case Instruction::FLOAT_TO_INT:
-      GenConversionCall(kQuickF2iz, rl_dest, rl_src, kCoreReg);
-      return;
-    case Instruction::DOUBLE_TO_INT:
-      GenConversionCall(kQuickD2iz, rl_dest, rl_src, kCoreReg);
-      return;
-    case Instruction::LONG_TO_DOUBLE:
-      GenConversionCall(kQuickL2d, rl_dest, rl_src, kFPReg);
-      return;
-    case Instruction::FLOAT_TO_LONG:
-      GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
-      return;
-    case Instruction::LONG_TO_FLOAT:
-      GenConversionCall(kQuickL2f, rl_dest, rl_src, kFPReg);
-      return;
-    case Instruction::DOUBLE_TO_LONG:
-      GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
-      return;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-  if (rl_src.wide) {
-    rl_src = LoadValueWide(rl_src, kFPReg);
-  } else {
-    rl_src = LoadValue(rl_src, kFPReg);
-  }
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  if (rl_dest.wide) {
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-// Get the reg storage for a wide FP. Is either a solo or a pair. Base is Mips-counted, e.g., even
-// values are valid (0, 2).
-static RegStorage GetWideArgFP(bool fpuIs32Bit, size_t base) {
-  // Think about how to make this be able to be computed. E.g., rMIPS_FARG0 + base. Right now
-  // inlining should optimize everything.
-  if (fpuIs32Bit) {
-    switch (base) {
-      case 0:
-        return RegStorage(RegStorage::k64BitPair, rFARG0, rFARG1);
-      case 2:
-        return RegStorage(RegStorage::k64BitPair, rFARG2, rFARG3);
-    }
-  } else {
-    switch (base) {
-      case 0:
-        return RegStorage(RegStorage::k64BitSolo, rFARG0);
-      case 2:
-        return RegStorage(RegStorage::k64BitSolo, rFARG2);
-    }
-  }
-  LOG(FATAL) << "Unsupported Mips.GetWideFP: " << fpuIs32Bit << " " << base;
-  UNREACHABLE();
-}
-
-void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2) {
-  bool wide = true;
-  QuickEntrypointEnum target;
-
-  switch (opcode) {
-    case Instruction::CMPL_FLOAT:
-      target = kQuickCmplFloat;
-      wide = false;
-      break;
-    case Instruction::CMPG_FLOAT:
-      target = kQuickCmpgFloat;
-      wide = false;
-      break;
-    case Instruction::CMPL_DOUBLE:
-      target = kQuickCmplDouble;
-      break;
-    case Instruction::CMPG_DOUBLE:
-      target = kQuickCmpgDouble;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-      target = kQuickCmplFloat;
-  }
-  FlushAllRegs();
-  LockCallTemps();
-  if (wide) {
-    RegStorage r_tmp1;
-    RegStorage r_tmp2;
-    if (cu_->target64) {
-      r_tmp1 = RegStorage(RegStorage::k64BitSolo, rFARG0);
-      r_tmp2 = RegStorage(RegStorage::k64BitSolo, rFARG1);
-    } else {
-      r_tmp1 = GetWideArgFP(fpuIs32Bit_, 0);
-      r_tmp2 = GetWideArgFP(fpuIs32Bit_, 2);
-    }
-    LoadValueDirectWideFixed(rl_src1, r_tmp1);
-    LoadValueDirectWideFixed(rl_src2, r_tmp2);
-  } else {
-    LoadValueDirectFixed(rl_src1, rs_rFARG0);
-    LoadValueDirectFixed(rl_src2, cu_->target64 ? rs_rFARG1 : rs_rFARG2);
-  }
-  RegStorage r_tgt = LoadHelper(target);
-  // NOTE: not a safepoint.
-  OpReg(kOpBlx, r_tgt);
-  RegLocation rl_result = GetReturn(kCoreReg);
-  StoreValue(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb ATTRIBUTE_UNUSED,
-                                      MIR* mir ATTRIBUTE_UNUSED,
-                                      bool gt_bias ATTRIBUTE_UNUSED,
-                                      bool is_double ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
-}
-
-void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-  if (cu_->target64) {
-    rl_src = LoadValue(rl_src, kFPReg);
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(kMipsFnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  } else {
-    rl_src = LoadValue(rl_src, kCoreReg);
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
-  }
-  StoreValue(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-  if (cu_->target64) {
-    rl_src = LoadValueWide(rl_src, kFPReg);
-    rl_result = EvalLocWide(rl_dest, kFPReg, true);
-    NewLIR2(kMipsFnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  } else {
-    rl_src = LoadValueWide(rl_src, kCoreReg);
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
-    OpRegCopy(rl_result.reg, rl_src.reg);
-  }
-  StoreValueWide(rl_dest, rl_result);
-}
-
-bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info ATTRIBUTE_UNUSED,
-                                   bool is_min ATTRIBUTE_UNUSED,
-                                   bool is_long ATTRIBUTE_UNUSED) {
-  // TODO: need Mips implementation.
-  return false;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
deleted file mode 100644
index 8ca53ea..0000000
--- a/compiler/dex/quick/mips/int_mips.cc
+++ /dev/null
@@ -1,932 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the Mips ISA */
-
-#include "codegen_mips.h"
-
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "mips_lir.h"
-#include "mirror/array-inl.h"
-
-namespace art {
-
-/*
- * Compare two 64-bit values
- *    x = y     return  0
- *    x < y     return -1
- *    x > y     return  1
- *
- * Mips32 implementation
- *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
- *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
- *    subu  res, t0, t1             # res = -1:1:0 for [ < > = ]
- *    bnez  res, finish
- *    sltu  t0, x.lo, y.lo
- *    sgtu  r1, x.lo, y.lo
- *    subu  res, t0, t1
- * finish:
- *
- * Mips64 implementation
- *    slt   temp, x, y;             # (x < y) ? 1:0
- *    slt   res, y, x;              # (x > y) ? 1:0
- *    subu  res, res, temp;         # res = -1:1:0 for [ < > = ]
- *
- */
-void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  if (cu_->target64) {
-    RegStorage temp = AllocTempWide();
-    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    NewLIR3(kMipsSlt, temp.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-    NewLIR3(kMipsSlt, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
-    NewLIR3(kMipsSubu, rl_result.reg.GetReg(), rl_result.reg.GetReg(), temp.GetReg());
-    FreeTemp(temp);
-    StoreValue(rl_dest, rl_result);
-  } else {
-    RegStorage t0 = AllocTemp();
-    RegStorage t1 = AllocTemp();
-    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
-    NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
-    NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
-    LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr);
-    NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
-    NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
-    NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
-    FreeTemp(t0);
-    FreeTemp(t1);
-    LIR* target = NewLIR0(kPseudoTargetLabel);
-    branch->target = target;
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
-  LIR* branch;
-  MipsOpCode slt_op;
-  MipsOpCode br_op;
-  bool cmp_zero = false;
-  bool swapped = false;
-  switch (cond) {
-    case kCondEq:
-      br_op = kMipsBeq;
-      cmp_zero = true;
-      break;
-    case kCondNe:
-      br_op = kMipsBne;
-      cmp_zero = true;
-      break;
-    case kCondUlt:
-      slt_op = kMipsSltu;
-      br_op = kMipsBnez;
-      break;
-    case kCondUge:
-      slt_op = kMipsSltu;
-      br_op = kMipsBeqz;
-      break;
-    case kCondGe:
-      slt_op = kMipsSlt;
-      br_op = kMipsBeqz;
-      break;
-    case kCondGt:
-      slt_op = kMipsSlt;
-      br_op = kMipsBnez;
-      swapped = true;
-      break;
-    case kCondLe:
-      slt_op = kMipsSlt;
-      br_op = kMipsBeqz;
-      swapped = true;
-      break;
-    case kCondLt:
-      slt_op = kMipsSlt;
-      br_op = kMipsBnez;
-      break;
-    case kCondHi:  // Gtu
-      slt_op = kMipsSltu;
-      br_op = kMipsBnez;
-      swapped = true;
-      break;
-    default:
-      LOG(FATAL) << "No support for ConditionCode: " << cond;
-      return nullptr;
-  }
-  if (cmp_zero) {
-    branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
-  } else {
-    RegStorage t_reg = AllocTemp();
-    if (swapped) {
-      NewLIR3(slt_op, t_reg.GetReg(), src2.GetReg(), src1.GetReg());
-    } else {
-      NewLIR3(slt_op, t_reg.GetReg(), src1.GetReg(), src2.GetReg());
-    }
-    branch = NewLIR1(br_op, t_reg.GetReg());
-    FreeTemp(t_reg);
-  }
-  branch->target = target;
-  return branch;
-}
-
-LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
-  LIR* branch;
-  if (check_value != 0) {
-    // TUNING: handle s16 & kCondLt/Mi case using slti.
-    RegStorage t_reg = AllocTemp();
-    LoadConstant(t_reg, check_value);
-    branch = OpCmpBranch(cond, reg, t_reg, target);
-    FreeTemp(t_reg);
-    return branch;
-  }
-  MipsOpCode opc;
-  switch (cond) {
-    case kCondEq: opc = kMipsBeqz; break;
-    case kCondGe: opc = kMipsBgez; break;
-    case kCondGt: opc = kMipsBgtz; break;
-    case kCondLe: opc = kMipsBlez; break;
-    // case KCondMi:
-    case kCondLt: opc = kMipsBltz; break;
-    case kCondNe: opc = kMipsBnez; break;
-    default:
-      // Tuning: use slti when applicable
-      RegStorage t_reg = AllocTemp();
-      LoadConstant(t_reg, check_value);
-      branch = OpCmpBranch(cond, reg, t_reg, target);
-      FreeTemp(t_reg);
-      return branch;
-  }
-  branch = NewLIR1(opc, reg.GetReg());
-  branch->target = target;
-  return branch;
-}
-
-LIR* MipsMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
-  LIR* res;
-  MipsOpCode opcode;
-
-  if (!cu_->target64) {
-    // If src or dest is a pair, we'll be using low reg.
-    if (r_dest.IsPair()) {
-      r_dest = r_dest.GetLow();
-    }
-    if (r_src.IsPair()) {
-      r_src = r_src.GetLow();
-    }
-  } else {
-    DCHECK(!r_dest.IsPair() && !r_src.IsPair());
-  }
-
-  if (r_dest.IsFloat() || r_src.IsFloat())
-    return OpFpRegCopy(r_dest, r_src);
-  if (cu_->target64) {
-    // TODO: Check that r_src and r_dest are both 32 or both 64 bits length on Mips64.
-    if (r_dest.Is64Bit() || r_src.Is64Bit()) {
-      opcode = kMipsMove;
-    } else {
-      opcode = kMipsSll;
-    }
-  } else {
-    opcode = kMipsMove;
-  }
-  res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
-  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
-    res->flags.is_nop = true;
-  }
-  return res;
-}
-
-void MipsMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
-  if (r_dest != r_src) {
-    LIR *res = OpRegCopyNoInsert(r_dest, r_src);
-    AppendLIR(res);
-  }
-}
-
-void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
-  if (cu_->target64) {
-    OpRegCopy(r_dest, r_src);
-    return;
-  }
-  if (r_dest != r_src) {
-    bool dest_fp = r_dest.IsFloat();
-    bool src_fp = r_src.IsFloat();
-    if (dest_fp) {
-      if (src_fp) {
-        // Here if both src and dest are fp registers. OpRegCopy will choose the right copy
-        // (solo or pair).
-        OpRegCopy(r_dest, r_src);
-      } else {
-        // note the operands are swapped for the mtc1 and mthc1 instr.
-        // Here if dest is fp reg and src is core reg.
-        if (fpuIs32Bit_) {
-          NewLIR2(kMipsMtc1, r_src.GetLowReg(), r_dest.GetLowReg());
-          NewLIR2(kMipsMtc1, r_src.GetHighReg(), r_dest.GetHighReg());
-        } else {
-          r_dest = Fp64ToSolo32(r_dest);
-          NewLIR2(kMipsMtc1, r_src.GetLowReg(), r_dest.GetReg());
-          NewLIR2(kMipsMthc1, r_src.GetHighReg(), r_dest.GetReg());
-        }
-      }
-    } else {
-      if (src_fp) {
-        // Here if dest is core reg and src is fp reg.
-        if (fpuIs32Bit_) {
-          NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetLowReg());
-          NewLIR2(kMipsMfc1, r_dest.GetHighReg(), r_src.GetHighReg());
-        } else {
-          r_src = Fp64ToSolo32(r_src);
-          NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetReg());
-          NewLIR2(kMipsMfhc1, r_dest.GetHighReg(), r_src.GetReg());
-        }
-      } else {
-        // Here if both src and dest are core registers.
-        // Handle overlap
-        if (r_src.GetHighReg() != r_dest.GetLowReg()) {
-          OpRegCopy(r_dest.GetLow(), r_src.GetLow());
-          OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
-        } else if (r_src.GetLowReg() != r_dest.GetHighReg()) {
-          OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
-          OpRegCopy(r_dest.GetLow(), r_src.GetLow());
-        } else {
-          RegStorage r_tmp = AllocTemp();
-          OpRegCopy(r_tmp, r_src.GetHigh());
-          OpRegCopy(r_dest.GetLow(), r_src.GetLow());
-          OpRegCopy(r_dest.GetHigh(), r_tmp);
-          FreeTemp(r_tmp);
-        }
-      }
-    }
-  }
-}
-
-void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                                   int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                   RegisterClass dest_reg_class ATTRIBUTE_UNUSED) {
-  // Implement as a branch-over.
-  // TODO: Conditional move?
-  LoadConstant(rs_dest, true_val);
-  LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, nullptr);
-  LoadConstant(rs_dest, false_val);
-  LIR* target_label = NewLIR0(kPseudoTargetLabel);
-  ne_branchover->target = target_label;
-}
-
-void MipsMir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "Need codegen for select";
-}
-
-void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb ATTRIBUTE_UNUSED,
-                                        MIR* mir ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
-}
-
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
-                                   bool is_div) {
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-  if (isaIsR6_) {
-    NewLIR3(is_div ? kMipsR6Div : kMipsR6Mod, rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
-  } else {
-    NewLIR2(kMipsR2Div, reg1.GetReg(), reg2.GetReg());
-    NewLIR1(is_div ? kMipsR2Mflo : kMipsR2Mfhi, rl_result.reg.GetReg());
-  }
-  return rl_result;
-}
-
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
-  RegStorage t_reg = AllocTemp();
-  // lit is guarantee to be a 16-bit constant
-  if (IsUint<16>(lit)) {
-    NewLIR3(kMipsOri, t_reg.GetReg(), rZERO, lit);
-  } else {
-    // Addiu will sign extend the entire width (32 or 64) of the register.
-    NewLIR3(kMipsAddiu, t_reg.GetReg(), rZERO, lit);
-  }
-  RegLocation rl_result = GenDivRem(rl_dest, reg1, t_reg, is_div);
-  FreeTemp(t_reg);
-  return rl_result;
-}
-
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                   RegLocation rl_src1 ATTRIBUTE_UNUSED,
-                                   RegLocation rl_src2 ATTRIBUTE_UNUSED,
-                                   bool is_div ATTRIBUTE_UNUSED,
-                                   int flags ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of GenDivRem for Mips";
-  UNREACHABLE();
-}
-
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                      RegLocation rl_src1 ATTRIBUTE_UNUSED,
-                                      int lit ATTRIBUTE_UNUSED,
-                                      bool is_div ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips";
-  UNREACHABLE();
-}
-
-bool MipsMir2Lir::GenInlinedCas(CallInfo* info ATTRIBUTE_UNUSED,
-                                bool is_long ATTRIBUTE_UNUSED,
-                                bool is_object ATTRIBUTE_UNUSED) {
-  return false;
-}
-
-bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info ATTRIBUTE_UNUSED) {
-  // TODO: add Mips implementation.
-  return false;
-}
-
-bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info ATTRIBUTE_UNUSED) {
-  // TODO: add Mips implementation.
-  return false;
-}
-
-bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info ATTRIBUTE_UNUSED) {
-  return false;
-}
-
-bool MipsMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
-  if (size != kSignedByte) {
-    // MIPS supports only aligned access. Defer unaligned access to JNI implementation.
-    return false;
-  }
-  RegLocation rl_src_address = info->args[0];       // Long address.
-  if (!cu_->target64) {
-    rl_src_address = NarrowRegLoc(rl_src_address);  // Ignore high half in info->args[1].
-  }
-  RegLocation rl_dest = InlineTarget(info);
-  RegLocation rl_address;
-  if (cu_->target64) {
-    rl_address = LoadValueWide(rl_src_address, kCoreReg);
-  } else {
-    rl_address = LoadValue(rl_src_address, kCoreReg);
-  }
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  DCHECK(size == kSignedByte);
-  LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
-  if (size != kSignedByte) {
-    // MIPS supports only aligned access. Defer unaligned access to JNI implementation.
-    return false;
-  }
-  RegLocation rl_src_address = info->args[0];       // Long address.
-  if (!cu_->target64) {
-    rl_src_address = NarrowRegLoc(rl_src_address);  // Ignore high half in info->args[1].
-  }
-  RegLocation rl_src_value = info->args[2];         // [size] value.
-  RegLocation rl_address;
-  if (cu_->target64) {
-    rl_address = LoadValueWide(rl_src_address, kCoreReg);
-  } else {
-    rl_address = LoadValue(rl_src_address, kCoreReg);
-  }
-  DCHECK(size == kSignedByte);
-  RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
-  StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
-  return true;
-}
-
-void MipsMir2Lir::OpPcRelLoad(RegStorage reg ATTRIBUTE_UNUSED, LIR* target ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
-  UNREACHABLE();
-}
-
-LIR* MipsMir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpVldm for Mips";
-  UNREACHABLE();
-}
-
-LIR* MipsMir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpVstm for Mips";
-  UNREACHABLE();
-}
-
-void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
-                                                RegLocation rl_result,
-                                                int lit ATTRIBUTE_UNUSED,
-                                                int first_bit,
-                                                int second_bit) {
-  RegStorage t_reg = AllocTemp();
-  OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
-  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
-  FreeTemp(t_reg);
-  if (first_bit != 0) {
-    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
-  }
-}
-
-void MipsMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
-  if (cu_->target64) {
-    GenDivZeroCheck(reg);
-  } else {
-    DCHECK(reg.IsPair());   // TODO: support k64BitSolo.
-    RegStorage t_reg = AllocTemp();
-    OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
-    GenDivZeroCheck(t_reg);
-    FreeTemp(t_reg);
-  }
-}
-
-// Test suspend flag, return target of taken suspend branch.
-LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
-  OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
-  return OpCmpImmBranch((target == nullptr) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
-}
-
-// Decrement register and branch on condition.
-LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
-  OpRegImm(kOpSub, reg, 1);
-  return OpCmpImmBranch(c_code, reg, 0, target);
-}
-
-bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
-                                     bool is_div ATTRIBUTE_UNUSED,
-                                     RegLocation rl_src ATTRIBUTE_UNUSED,
-                                     RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                     int lit ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of smallLiteralDivRem in Mips";
-  UNREACHABLE();
-}
-
-bool MipsMir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
-                               RegLocation rl_dest ATTRIBUTE_UNUSED,
-                               int lit ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
-  UNREACHABLE();
-}
-
-LIR* MipsMir2Lir::OpIT(ConditionCode cond ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpIT in Mips";
-  UNREACHABLE();
-}
-
-void MipsMir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
-}
-
-void MipsMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  /*
-   *  [v1 v0] =  [a1 a0] + [a3 a2];
-   *  addu v0,a2,a0
-   *  addu t1,a3,a1
-   *  sltu v1,v0,a2
-   *  addu v1,v1,t1
-   */
-
-  OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src2.reg.GetLow(), rl_src1.reg.GetLow());
-  RegStorage t_reg = AllocTemp();
-  OpRegRegReg(kOpAdd, t_reg, rl_src2.reg.GetHigh(), rl_src1.reg.GetHigh());
-  NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(),
-          rl_src2.reg.GetLowReg());
-  OpRegRegReg(kOpAdd, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
-  FreeTemp(t_reg);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  /*
-   *  [v1 v0] =  [a1 a0] - [a3 a2];
-   *  sltu  t1,a0,a2
-   *  subu  v0,a0,a2
-   *  subu  v1,a1,a3
-   *  subu  v1,v1,t1
-   */
-
-  RegStorage t_reg = AllocTemp();
-  NewLIR3(kMipsSltu, t_reg.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
-  OpRegRegReg(kOpSub, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
-  OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
-  OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
-  FreeTemp(t_reg);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                                 RegLocation rl_src2, int flags) {
-  if (cu_->target64) {
-    switch (opcode) {
-      case Instruction::NOT_LONG:
-        GenNotLong(rl_dest, rl_src2);
-        return;
-      case Instruction::ADD_LONG:
-      case Instruction::ADD_LONG_2ADDR:
-        GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
-        return;
-      case Instruction::SUB_LONG:
-      case Instruction::SUB_LONG_2ADDR:
-        GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
-        return;
-      case Instruction::MUL_LONG:
-      case Instruction::MUL_LONG_2ADDR:
-        GenMulLong(rl_dest, rl_src1, rl_src2);
-        return;
-      case Instruction::DIV_LONG:
-      case Instruction::DIV_LONG_2ADDR:
-        GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
-        return;
-      case Instruction::REM_LONG:
-      case Instruction::REM_LONG_2ADDR:
-        GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
-        return;
-      case Instruction::AND_LONG:
-      case Instruction::AND_LONG_2ADDR:
-        GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
-        return;
-      case Instruction::OR_LONG:
-      case Instruction::OR_LONG_2ADDR:
-        GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
-        return;
-      case Instruction::XOR_LONG:
-      case Instruction::XOR_LONG_2ADDR:
-        GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
-        return;
-      case Instruction::NEG_LONG:
-        GenNegLong(rl_dest, rl_src2);
-        return;
-
-      default:
-        LOG(FATAL) << "Invalid long arith op";
-        return;
-    }
-  } else {
-    switch (opcode) {
-      case Instruction::ADD_LONG:
-      case Instruction::ADD_LONG_2ADDR:
-        GenAddLong(rl_dest, rl_src1, rl_src2);
-        return;
-      case Instruction::SUB_LONG:
-      case Instruction::SUB_LONG_2ADDR:
-        GenSubLong(rl_dest, rl_src1, rl_src2);
-        return;
-      case Instruction::NEG_LONG:
-        GenNegLong(rl_dest, rl_src2);
-        return;
-      default:
-        break;
-    }
-    // Fallback for all other ops.
-    Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-  }
-}
-
-void MipsMir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegReg(kOpMvn, rl_result.reg, rl_src.reg);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  NewLIR3(kMips64Dmul, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode ATTRIBUTE_UNUSED,
-                                RegLocation rl_dest,
-                                RegLocation rl_src1,
-                                RegLocation rl_src2,
-                                bool is_div,
-                                int flags) {
-  // TODO: Implement easy div/rem?
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-  if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
-    GenDivZeroCheckWide(rl_src2.reg);
-  }
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  NewLIR3(is_div ? kMips64Ddiv : kMips64Dmod, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
-          rl_src2.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  RegLocation rl_result;
-
-  if (cu_->target64) {
-    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    //  [v1 v0] =  -[a1 a0]
-    //  negu  v0,a0
-    //  negu  v1,a1
-    //  sltu  t1,r_zero
-    //  subu  v1,v1,t1
-    OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_src.reg.GetLow());
-    OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
-    RegStorage t_reg = AllocTemp();
-    NewLIR3(kMipsSltu, t_reg.GetReg(), rZERO, rl_result.reg.GetLowReg());
-    OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
-    FreeTemp(t_reg);
-    StoreValueWide(rl_dest, rl_result);
-  }
-}
-
-/*
- * Generate array load
- */
-void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
-                              RegLocation rl_index, RegLocation rl_dest, int scale) {
-  RegisterClass reg_class = RegClassBySize(size);
-  int len_offset = mirror::Array::LengthOffset().Int32Value();
-  int data_offset;
-  RegLocation rl_result;
-  rl_array = LoadValue(rl_array, kRefReg);
-  rl_index = LoadValue(rl_index, kCoreReg);
-
-  // FIXME: need to add support for rl_index.is_const.
-
-  if (size == k64 || size == kDouble) {
-    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
-  } else {
-    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
-  }
-
-  // Null object?
-  GenNullCheck(rl_array.reg, opt_flags);
-
-  RegStorage reg_ptr = (cu_->target64) ? AllocTempRef() : AllocTemp();
-  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
-  RegStorage reg_len;
-  if (needs_range_check) {
-    reg_len = AllocTemp();
-    // Get len.
-    Load32Disp(rl_array.reg, len_offset, reg_len);
-    MarkPossibleNullPointerException(opt_flags);
-  } else {
-    ForceImplicitNullCheck(rl_array.reg, opt_flags, false);
-  }
-  // reg_ptr -> array data.
-  OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
-  FreeTemp(rl_array.reg);
-  if ((size == k64) || (size == kDouble)) {
-    if (scale) {
-      RegStorage r_new_index = AllocTemp();
-      OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
-      OpRegReg(kOpAdd, reg_ptr, r_new_index);
-      FreeTemp(r_new_index);
-    } else {
-      OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
-    }
-    FreeTemp(rl_index.reg);
-    rl_result = EvalLoc(rl_dest, reg_class, true);
-
-    if (needs_range_check) {
-      GenArrayBoundsCheck(rl_index.reg, reg_len);
-      FreeTemp(reg_len);
-    }
-    LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, kNotVolatile);
-
-    FreeTemp(reg_ptr);
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    rl_result = EvalLoc(rl_dest, reg_class, true);
-
-    if (needs_range_check) {
-      GenArrayBoundsCheck(rl_index.reg, reg_len);
-      FreeTemp(reg_len);
-    }
-
-    if (cu_->target64) {
-      if (rl_result.ref) {
-        LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), As32BitReg(rl_result.reg), scale,
-                        kReference);
-      } else {
-        LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
-      }
-    } else {
-      LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
-    }
-
-    FreeTemp(reg_ptr);
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-/*
- * Generate array store
- *
- */
-void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
-                              RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
-  RegisterClass reg_class = RegClassBySize(size);
-  int len_offset = mirror::Array::LengthOffset().Int32Value();
-  int data_offset;
-
-  if (size == k64 || size == kDouble) {
-    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
-  } else {
-    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
-  }
-
-  rl_array = LoadValue(rl_array, kRefReg);
-  rl_index = LoadValue(rl_index, kCoreReg);
-
-  // FIXME: need to add support for rl_index.is_const.
-
-  RegStorage reg_ptr;
-  bool allocated_reg_ptr_temp = false;
-  if (IsTemp(rl_array.reg) && !card_mark) {
-    Clobber(rl_array.reg);
-    reg_ptr = rl_array.reg;
-  } else {
-    reg_ptr = AllocTemp();
-    OpRegCopy(reg_ptr, rl_array.reg);
-    allocated_reg_ptr_temp = true;
-  }
-
-  // Null object?
-  GenNullCheck(rl_array.reg, opt_flags);
-
-  bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
-  RegStorage reg_len;
-  if (needs_range_check) {
-    reg_len = AllocTemp();
-    // NOTE: max live temps(4) here.
-    // Get len.
-    Load32Disp(rl_array.reg, len_offset, reg_len);
-    MarkPossibleNullPointerException(opt_flags);
-  } else {
-    ForceImplicitNullCheck(rl_array.reg, opt_flags, false);
-  }
-  // reg_ptr -> array data.
-  OpRegImm(kOpAdd, reg_ptr, data_offset);
-  // At this point, reg_ptr points to array, 2 live temps.
-  if ((size == k64) || (size == kDouble)) {
-    // TUNING: specific wide routine that can handle fp regs.
-    if (scale) {
-      RegStorage r_new_index = AllocTemp();
-      OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
-      OpRegReg(kOpAdd, reg_ptr, r_new_index);
-      FreeTemp(r_new_index);
-    } else {
-      OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
-    }
-    rl_src = LoadValueWide(rl_src, reg_class);
-
-    if (needs_range_check) {
-      GenArrayBoundsCheck(rl_index.reg, reg_len);
-      FreeTemp(reg_len);
-    }
-
-    StoreBaseDisp(reg_ptr, 0, rl_src.reg, size, kNotVolatile);
-  } else {
-    rl_src = LoadValue(rl_src, reg_class);
-    if (needs_range_check) {
-      GenArrayBoundsCheck(rl_index.reg, reg_len);
-      FreeTemp(reg_len);
-    }
-    StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
-  }
-  if (allocated_reg_ptr_temp) {
-    FreeTemp(reg_ptr);
-  }
-  if (card_mark) {
-    MarkGCCard(opt_flags, rl_src.reg, rl_array.reg);
-  }
-}
-
-void MipsMir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                                 RegLocation rl_shift) {
-  if (!cu_->target64) {
-    Mir2Lir::GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
-    return;
-  }
-  OpKind op = kOpBkpt;
-  switch (opcode) {
-    case Instruction::SHL_LONG:
-    case Instruction::SHL_LONG_2ADDR:
-      op = kOpLsl;
-      break;
-    case Instruction::SHR_LONG:
-    case Instruction::SHR_LONG_2ADDR:
-      op = kOpAsr;
-      break;
-    case Instruction::USHR_LONG:
-    case Instruction::USHR_LONG_2ADDR:
-      op = kOpLsr;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected case: " << opcode;
-  }
-  rl_shift = LoadValue(rl_shift, kCoreReg);
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
-                                    RegLocation rl_dest,
-                                    RegLocation rl_src1,
-                                    RegLocation rl_shift,
-                                    int flags ATTRIBUTE_UNUSED) {
-  if (!cu_->target64) {
-    // Default implementation is just to ignore the constant case.
-    GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
-    return;
-  }
-  OpKind op = kOpBkpt;
-  // Per spec, we only care about low 6 bits of shift amount.
-  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  if (shift_amount == 0) {
-    StoreValueWide(rl_dest, rl_src1);
-    return;
-  }
-
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  switch (opcode) {
-    case Instruction::SHL_LONG:
-    case Instruction::SHL_LONG_2ADDR:
-      op = kOpLsl;
-      break;
-    case Instruction::SHR_LONG:
-    case Instruction::SHR_LONG_2ADDR:
-      op = kOpAsr;
-      break;
-    case Instruction::USHR_LONG:
-    case Instruction::USHR_LONG_2ADDR:
-      op = kOpLsr;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected case";
-  }
-  OpRegRegImm(op, rl_result.reg, rl_src1.reg, shift_amount);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                                    RegLocation rl_src1, RegLocation rl_src2, int flags) {
-  // Default - bail to non-const handler.
-  GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-}
-
-void MipsMir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
-  if (!cu_->target64) {
-    Mir2Lir::GenIntToLong(rl_dest, rl_src);
-    return;
-  }
-  rl_src = LoadValue(rl_src, kCoreReg);
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  NewLIR3(kMipsSll, rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void MipsMir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
-                                    RegLocation rl_src, RegisterClass reg_class) {
-  FlushAllRegs();   // Send everything to home location.
-  CallRuntimeHelperRegLocation(trampoline, rl_src, false);
-  if (rl_dest.wide) {
-    RegLocation rl_result;
-    rl_result = GetReturnWide(reg_class);
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    RegLocation rl_result;
-    rl_result = GetReturn(reg_class);
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
deleted file mode 100644
index 078ac0a..0000000
--- a/compiler/dex/quick/mips/mips_lir.h
+++ /dev/null
@@ -1,720 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIPS_MIPS_LIR_H_
-#define ART_COMPILER_DEX_QUICK_MIPS_MIPS_LIR_H_
-
-#include "dex/reg_location.h"
-#include "dex/reg_storage.h"
-
-namespace art {
-
-/*
- * Runtime register conventions.
- *
- *          mips32            | mips64
- * $0:      zero is always the value 0
- * $1:      at is scratch (normally used as temp reg by assembler)
- * $2,$3:   v0, v1 are scratch (normally hold subroutine return values)
- * $4-$7:   a0-a3 are scratch (normally hold subroutine arguments)
- * $8-$11:  t0-t3 are scratch | a4-a7 are scratch (normally hold subroutine arguments)
- * $12-$15: t4-t7 are scratch | t0-t3 are scratch
- * $16:     s0 (rSUSPEND) is reserved [holds suspend-check counter]
- * $17:     s1 (rSELF) is reserved [holds current &Thread]
- * $18-$23: s2-s7 are callee save (promotion target)
- * $24:     t8 is scratch
- * $25:     t9 is scratch (normally used for function calls)
- * $26,$27: k0, k1 are reserved for use by interrupt handlers
- * $28:     gp is reserved for global pointer
- * $29:     sp is reserved
- * $30:     s8 is callee save (promotion target)
- * $31:     ra is scratch (normally holds the return addr)
- *
- * Preserved across C calls: s0-s8
- * Trashed across C calls (mips32): at, v0-v1, a0-a3, t0-t9, gp, ra
- * Trashed across C calls (mips64): at, v0-v1, a0-a7, t0-t3, t8, t9, gp, ra
- *
- * Floating pointer registers (mips32)
- * NOTE: there are 32 fp registers (16 df pairs), but currently
- *       only support 16 fp registers (8 df pairs).
- * f0-f15
- * df0-df7, where df0={f0,f1}, df1={f2,f3}, ... , df7={f14,f15}
- *
- * f0-f15 (df0-df7) trashed across C calls
- *
- * Floating pointer registers (mips64)
- * NOTE: there are 32 fp registers.
- * f0-f31
- *
- * For mips32 code use:
- *      a0-a3 to hold operands
- *      v0-v1 to hold results
- *      t0-t9 for temps
- *
- * For mips64 code use:
- *      a0-a7 to hold operands
- *      v0-v1 to hold results
- *      t0-t3, t8-t9 for temps
- *
- * All jump/branch instructions have a delay slot after it.
- *
- * Stack frame diagram (stack grows down, higher addresses at top):
- *
- * +------------------------+
- * | IN[ins-1]              |  {Note: resides in caller's frame}
- * |       .                |
- * | IN[0]                  |
- * | caller's Method*       |
- * +========================+  {Note: start of callee's frame}
- * | spill region           |  {variable sized - will include lr if non-leaf.}
- * +------------------------+
- * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
- * +------------------------+
- * | V[locals-1]            |
- * | V[locals-2]            |
- * |      .                 |
- * |      .                 |
- * | V[1]                   |
- * | V[0]                   |
- * +------------------------+
- * |  0 to 3 words padding  |
- * +------------------------+
- * | OUT[outs-1]            |
- * | OUT[outs-2]            |
- * |       .                |
- * | OUT[0]                 |
- * | cur_method*            | <<== sp w/ 16-byte alignment
- * +========================+
- */
-
-
-#define LOWORD_OFFSET 0
-#define HIWORD_OFFSET 4
-
-#define rFARG0 rF12
-#define rs_rFARG0 rs_rF12
-#define rFARG1 rF13
-#define rs_rFARG1 rs_rF13
-#define rFARG2 rF14
-#define rs_rFARG2 rs_rF14
-#define rFARG3 rF15
-#define rs_rFARG3 rs_rF15
-
-enum MipsResourceEncodingPos {
-  kMipsGPReg0   = 0,
-  kMipsRegSP    = 29,
-  kMipsRegLR    = 31,
-  kMipsFPReg0   = 32,  // only 16 fp regs supported currently.
-  kMipsFPRegEnd   = 48,
-  kMipsRegHI    = kMipsFPRegEnd,
-  kMipsRegLO,
-  kMipsRegPC,
-  kMipsRegEnd   = 51,
-  // Mips64 related:
-  kMips64FPRegEnd = 64,
-  kMips64RegPC    = kMips64FPRegEnd,
-  kMips64RegEnd   = 65,
-};
-
-#define ENCODE_MIPS_REG_LIST(N)      (static_cast<uint64_t>(N))
-#define ENCODE_MIPS_REG_SP           (1ULL << kMipsRegSP)
-#define ENCODE_MIPS_REG_LR           (1ULL << kMipsRegLR)
-#define ENCODE_MIPS_REG_PC           (1ULL << kMipsRegPC)
-#define ENCODE_MIPS_REG_HI           (1ULL << kMipsRegHI)
-#define ENCODE_MIPS_REG_LO           (1ULL << kMipsRegLO)
-
-// Set FR_BIT to 0
-// This bit determines how the CPU access FP registers.
-#define FR_BIT   0
-
-enum MipsNativeRegisterPool {  // private marker to avoid generate-operator-out.py from processing.
-  rZERO  = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  0,
-  rZEROd = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  0,
-  rAT    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  1,
-  rATd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  1,
-  rV0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  2,
-  rV0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  2,
-  rV1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  3,
-  rV1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  3,
-  rA0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  4,
-  rA0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  4,
-  rA1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  5,
-  rA1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  5,
-  rA2    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  6,
-  rA2d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  6,
-  rA3    = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  7,
-  rA3d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  7,
-  rT0_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  8,
-  rA4    = rT0_32,
-  rA4d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  8,
-  rT1_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister |  9,
-  rA5    = rT1_32,
-  rA5d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister |  9,
-  rT2_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
-  rA6    = rT2_32,
-  rA6d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
-  rT3_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
-  rA7    = rT3_32,
-  rA7d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
-  rT4_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
-  rT0    = rT4_32,
-  rT0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
-  rT5_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
-  rT1    = rT5_32,
-  rT1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
-  rT6_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
-  rT2    = rT6_32,
-  rT2d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
-  rT7_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
-  rT3    = rT7_32,
-  rT3d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
-  rS0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
-  rS0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 16,
-  rS1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
-  rS1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 17,
-  rS2    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
-  rS2d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 18,
-  rS3    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
-  rS3d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 19,
-  rS4    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
-  rS4d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 20,
-  rS5    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
-  rS5d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 21,
-  rS6    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
-  rS6d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 22,
-  rS7    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
-  rS7d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 23,
-  rT8    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
-  rT8d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 24,
-  rT9    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
-  rT9d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 25,
-  rK0    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
-  rK0d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 26,
-  rK1    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
-  rK1d   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 27,
-  rGP    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
-  rGPd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 28,
-  rSP    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
-  rSPd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 29,
-  rFP    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
-  rFPd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 30,
-  rRA    = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
-  rRAd   = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 31,
-
-  rF0  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  0,
-  rF1  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  1,
-  rF2  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  2,
-  rF3  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  3,
-  rF4  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  4,
-  rF5  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  5,
-  rF6  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  6,
-  rF7  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  7,
-  rF8  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  8,
-  rF9  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint |  9,
-  rF10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
-  rF11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
-  rF12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
-  rF13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
-  rF14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
-  rF15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
-
-  rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
-  rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
-  rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
-  rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
-  rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
-  rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
-  rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
-  rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
-  rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
-  rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
-  rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
-  rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
-  rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
-  rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
-  rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
-  rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
-
-#if 0
-  /*
-   * TODO: The shared resource mask doesn't have enough bit positions to describe all
-   * MIPS registers.  Expand it and enable use of fp registers 16 through 31.
-   */
-  rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
-  rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
-  rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
-  rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
-  rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
-  rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
-  rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
-  rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
-  rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
-  rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
-  rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
-  rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
-  rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
-  rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
-  rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
-  rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
-#endif
-  // Double precision registers where the FPU is in 32-bit mode.
-  rD0_fr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  0,
-  rD1_fr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  2,
-  rD2_fr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  4,
-  rD3_fr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  6,
-  rD4_fr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  8,
-  rD5_fr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
-  rD6_fr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
-  rD7_fr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
-#if 0  // TODO: expand resource mask to enable use of all MIPS fp registers.
-  rD8_fr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
-  rD9_fr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
-  rD10_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
-  rD11_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
-  rD12_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
-  rD13_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
-  rD14_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
-  rD15_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
-#endif
-  // Double precision registers where the FPU is in 64-bit mode.
-  rD0_fr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  0,
-  rD1_fr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  2,
-  rD2_fr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  4,
-  rD3_fr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  6,
-  rD4_fr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  8,
-  rD5_fr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
-  rD6_fr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
-  rD7_fr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
-#if 0  // TODO: expand resource mask to enable use of all MIPS fp registers.
-  rD8_fr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
-  rD9_fr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
-  rD10_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
-  rD11_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
-  rD12_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
-  rD13_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
-  rD14_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
-  rD15_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
-#endif
-
-  rD0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  0,
-  rD1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  1,
-  rD2  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  2,
-  rD3  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  3,
-  rD4  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  4,
-  rD5  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  5,
-  rD6  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  6,
-  rD7  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  7,
-  rD8  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  8,
-  rD9  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint |  9,
-  rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
-  rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
-  rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
-  rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
-  rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
-  rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
-  rD16 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
-  rD17 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
-  rD18 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
-  rD19 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
-  rD20 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
-  rD21 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
-  rD22 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
-  rD23 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
-  rD24 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
-  rD25 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
-  rD26 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
-  rD27 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
-  rD28 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
-  rD29 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
-  rD30 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
-  rD31 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
-};
-
-constexpr RegStorage rs_rZERO(RegStorage::kValid | rZERO);
-constexpr RegStorage rs_rAT(RegStorage::kValid | rAT);
-constexpr RegStorage rs_rV0(RegStorage::kValid | rV0);
-constexpr RegStorage rs_rV1(RegStorage::kValid | rV1);
-constexpr RegStorage rs_rA0(RegStorage::kValid | rA0);
-constexpr RegStorage rs_rA1(RegStorage::kValid | rA1);
-constexpr RegStorage rs_rA2(RegStorage::kValid | rA2);
-constexpr RegStorage rs_rA3(RegStorage::kValid | rA3);
-constexpr RegStorage rs_rT0_32(RegStorage::kValid | rT0_32);
-constexpr RegStorage rs_rA4 = rs_rT0_32;
-constexpr RegStorage rs_rT1_32(RegStorage::kValid | rT1_32);
-constexpr RegStorage rs_rA5 = rs_rT1_32;
-constexpr RegStorage rs_rT2_32(RegStorage::kValid | rT2_32);
-constexpr RegStorage rs_rA6 = rs_rT2_32;
-constexpr RegStorage rs_rT3_32(RegStorage::kValid | rT3_32);
-constexpr RegStorage rs_rA7 = rs_rT3_32;
-constexpr RegStorage rs_rT4_32(RegStorage::kValid | rT4_32);
-constexpr RegStorage rs_rT0 = rs_rT4_32;
-constexpr RegStorage rs_rT5_32(RegStorage::kValid | rT5_32);
-constexpr RegStorage rs_rT1 = rs_rT5_32;
-constexpr RegStorage rs_rT6_32(RegStorage::kValid | rT6_32);
-constexpr RegStorage rs_rT2 = rs_rT6_32;
-constexpr RegStorage rs_rT7_32(RegStorage::kValid | rT7_32);
-constexpr RegStorage rs_rT3 = rs_rT7_32;
-constexpr RegStorage rs_rS0(RegStorage::kValid | rS0);
-constexpr RegStorage rs_rS1(RegStorage::kValid | rS1);
-constexpr RegStorage rs_rS2(RegStorage::kValid | rS2);
-constexpr RegStorage rs_rS3(RegStorage::kValid | rS3);
-constexpr RegStorage rs_rS4(RegStorage::kValid | rS4);
-constexpr RegStorage rs_rS5(RegStorage::kValid | rS5);
-constexpr RegStorage rs_rS6(RegStorage::kValid | rS6);
-constexpr RegStorage rs_rS7(RegStorage::kValid | rS7);
-constexpr RegStorage rs_rT8(RegStorage::kValid | rT8);
-constexpr RegStorage rs_rT9(RegStorage::kValid | rT9);
-constexpr RegStorage rs_rK0(RegStorage::kValid | rK0);
-constexpr RegStorage rs_rK1(RegStorage::kValid | rK1);
-constexpr RegStorage rs_rGP(RegStorage::kValid | rGP);
-constexpr RegStorage rs_rSP(RegStorage::kValid | rSP);
-constexpr RegStorage rs_rFP(RegStorage::kValid | rFP);
-constexpr RegStorage rs_rRA(RegStorage::kValid | rRA);
-
-constexpr RegStorage rs_rZEROd(RegStorage::kValid | rZEROd);
-constexpr RegStorage rs_rATd(RegStorage::kValid | rATd);
-constexpr RegStorage rs_rV0d(RegStorage::kValid | rV0d);
-constexpr RegStorage rs_rV1d(RegStorage::kValid | rV1d);
-constexpr RegStorage rs_rA0d(RegStorage::kValid | rA0d);
-constexpr RegStorage rs_rA1d(RegStorage::kValid | rA1d);
-constexpr RegStorage rs_rA2d(RegStorage::kValid | rA2d);
-constexpr RegStorage rs_rA3d(RegStorage::kValid | rA3d);
-constexpr RegStorage rs_rA4d(RegStorage::kValid | rA4d);
-constexpr RegStorage rs_rA5d(RegStorage::kValid | rA5d);
-constexpr RegStorage rs_rA6d(RegStorage::kValid | rA6d);
-constexpr RegStorage rs_rA7d(RegStorage::kValid | rA7d);
-constexpr RegStorage rs_rT0d(RegStorage::kValid | rT0d);
-constexpr RegStorage rs_rT1d(RegStorage::kValid | rT1d);
-constexpr RegStorage rs_rT2d(RegStorage::kValid | rT2d);
-constexpr RegStorage rs_rT3d(RegStorage::kValid | rT3d);
-constexpr RegStorage rs_rS0d(RegStorage::kValid | rS0d);
-constexpr RegStorage rs_rS1d(RegStorage::kValid | rS1d);
-constexpr RegStorage rs_rS2d(RegStorage::kValid | rS2d);
-constexpr RegStorage rs_rS3d(RegStorage::kValid | rS3d);
-constexpr RegStorage rs_rS4d(RegStorage::kValid | rS4d);
-constexpr RegStorage rs_rS5d(RegStorage::kValid | rS5d);
-constexpr RegStorage rs_rS6d(RegStorage::kValid | rS6d);
-constexpr RegStorage rs_rS7d(RegStorage::kValid | rS7d);
-constexpr RegStorage rs_rT8d(RegStorage::kValid | rT8d);
-constexpr RegStorage rs_rT9d(RegStorage::kValid | rT9d);
-constexpr RegStorage rs_rK0d(RegStorage::kValid | rK0d);
-constexpr RegStorage rs_rK1d(RegStorage::kValid | rK1d);
-constexpr RegStorage rs_rGPd(RegStorage::kValid | rGPd);
-constexpr RegStorage rs_rSPd(RegStorage::kValid | rSPd);
-constexpr RegStorage rs_rFPd(RegStorage::kValid | rFPd);
-constexpr RegStorage rs_rRAd(RegStorage::kValid | rRAd);
-
-constexpr RegStorage rs_rF0(RegStorage::kValid | rF0);
-constexpr RegStorage rs_rF1(RegStorage::kValid | rF1);
-constexpr RegStorage rs_rF2(RegStorage::kValid | rF2);
-constexpr RegStorage rs_rF3(RegStorage::kValid | rF3);
-constexpr RegStorage rs_rF4(RegStorage::kValid | rF4);
-constexpr RegStorage rs_rF5(RegStorage::kValid | rF5);
-constexpr RegStorage rs_rF6(RegStorage::kValid | rF6);
-constexpr RegStorage rs_rF7(RegStorage::kValid | rF7);
-constexpr RegStorage rs_rF8(RegStorage::kValid | rF8);
-constexpr RegStorage rs_rF9(RegStorage::kValid | rF9);
-constexpr RegStorage rs_rF10(RegStorage::kValid | rF10);
-constexpr RegStorage rs_rF11(RegStorage::kValid | rF11);
-constexpr RegStorage rs_rF12(RegStorage::kValid | rF12);
-constexpr RegStorage rs_rF13(RegStorage::kValid | rF13);
-constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
-constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
-
-constexpr RegStorage rs_rF16(RegStorage::kValid | rF16);
-constexpr RegStorage rs_rF17(RegStorage::kValid | rF17);
-constexpr RegStorage rs_rF18(RegStorage::kValid | rF18);
-constexpr RegStorage rs_rF19(RegStorage::kValid | rF19);
-constexpr RegStorage rs_rF20(RegStorage::kValid | rF20);
-constexpr RegStorage rs_rF21(RegStorage::kValid | rF21);
-constexpr RegStorage rs_rF22(RegStorage::kValid | rF22);
-constexpr RegStorage rs_rF23(RegStorage::kValid | rF23);
-constexpr RegStorage rs_rF24(RegStorage::kValid | rF24);
-constexpr RegStorage rs_rF25(RegStorage::kValid | rF25);
-constexpr RegStorage rs_rF26(RegStorage::kValid | rF26);
-constexpr RegStorage rs_rF27(RegStorage::kValid | rF27);
-constexpr RegStorage rs_rF28(RegStorage::kValid | rF28);
-constexpr RegStorage rs_rF29(RegStorage::kValid | rF29);
-constexpr RegStorage rs_rF30(RegStorage::kValid | rF30);
-constexpr RegStorage rs_rF31(RegStorage::kValid | rF31);
-
-constexpr RegStorage rs_rD0_fr0(RegStorage::kValid | rD0_fr0);
-constexpr RegStorage rs_rD1_fr0(RegStorage::kValid | rD1_fr0);
-constexpr RegStorage rs_rD2_fr0(RegStorage::kValid | rD2_fr0);
-constexpr RegStorage rs_rD3_fr0(RegStorage::kValid | rD3_fr0);
-constexpr RegStorage rs_rD4_fr0(RegStorage::kValid | rD4_fr0);
-constexpr RegStorage rs_rD5_fr0(RegStorage::kValid | rD5_fr0);
-constexpr RegStorage rs_rD6_fr0(RegStorage::kValid | rD6_fr0);
-constexpr RegStorage rs_rD7_fr0(RegStorage::kValid | rD7_fr0);
-
-constexpr RegStorage rs_rD0_fr1(RegStorage::kValid | rD0_fr1);
-constexpr RegStorage rs_rD1_fr1(RegStorage::kValid | rD1_fr1);
-constexpr RegStorage rs_rD2_fr1(RegStorage::kValid | rD2_fr1);
-constexpr RegStorage rs_rD3_fr1(RegStorage::kValid | rD3_fr1);
-constexpr RegStorage rs_rD4_fr1(RegStorage::kValid | rD4_fr1);
-constexpr RegStorage rs_rD5_fr1(RegStorage::kValid | rD5_fr1);
-constexpr RegStorage rs_rD6_fr1(RegStorage::kValid | rD6_fr1);
-constexpr RegStorage rs_rD7_fr1(RegStorage::kValid | rD7_fr1);
-
-constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
-constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
-constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
-constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
-constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
-constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
-constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
-constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
-constexpr RegStorage rs_rD8(RegStorage::kValid | rD8);
-constexpr RegStorage rs_rD9(RegStorage::kValid | rD9);
-constexpr RegStorage rs_rD10(RegStorage::kValid | rD10);
-constexpr RegStorage rs_rD11(RegStorage::kValid | rD11);
-constexpr RegStorage rs_rD12(RegStorage::kValid | rD12);
-constexpr RegStorage rs_rD13(RegStorage::kValid | rD13);
-constexpr RegStorage rs_rD14(RegStorage::kValid | rD14);
-constexpr RegStorage rs_rD15(RegStorage::kValid | rD15);
-constexpr RegStorage rs_rD16(RegStorage::kValid | rD16);
-constexpr RegStorage rs_rD17(RegStorage::kValid | rD17);
-constexpr RegStorage rs_rD18(RegStorage::kValid | rD18);
-constexpr RegStorage rs_rD19(RegStorage::kValid | rD19);
-constexpr RegStorage rs_rD20(RegStorage::kValid | rD20);
-constexpr RegStorage rs_rD21(RegStorage::kValid | rD21);
-constexpr RegStorage rs_rD22(RegStorage::kValid | rD22);
-constexpr RegStorage rs_rD23(RegStorage::kValid | rD23);
-constexpr RegStorage rs_rD24(RegStorage::kValid | rD24);
-constexpr RegStorage rs_rD25(RegStorage::kValid | rD25);
-constexpr RegStorage rs_rD26(RegStorage::kValid | rD26);
-constexpr RegStorage rs_rD27(RegStorage::kValid | rD27);
-constexpr RegStorage rs_rD28(RegStorage::kValid | rD28);
-constexpr RegStorage rs_rD29(RegStorage::kValid | rD29);
-constexpr RegStorage rs_rD30(RegStorage::kValid | rD30);
-constexpr RegStorage rs_rD31(RegStorage::kValid | rD31);
-
-// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
-const RegLocation mips_loc_c_return
-    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
-     RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_ref
-    {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
-     RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
-const RegLocation mips_loc_c_return_wide
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
-     RegStorage(RegStorage::k64BitPair, rV0, rV1), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_wide
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
-     RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
-const RegLocation mips_loc_c_return_float
-    {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
-     RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
-// FIXME: move MIPS to k64Bitsolo for doubles
-const RegLocation mips_loc_c_return_double_fr0
-    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
-     RegStorage(RegStorage::k64BitPair, rF0, rF1), INVALID_SREG, INVALID_SREG};
-const RegLocation mips_loc_c_return_double_fr1
-    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
-     RegStorage(RegStorage::k64BitSolo, rF0), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_double
-    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
-     RegStorage(RegStorage::k64BitSolo, rD0), INVALID_SREG, INVALID_SREG};
-
-enum MipsShiftEncodings {
-  kMipsLsl = 0x0,
-  kMipsLsr = 0x1,
-  kMipsAsr = 0x2,
-  kMipsRor = 0x3
-};
-
-// MIPS sync kinds (Note: support for kinds other than kSYNC0 may not exist).
-#define kSYNC0        0x00
-#define kSYNC_WMB     0x04
-#define kSYNC_MB      0x01
-#define kSYNC_ACQUIRE 0x11
-#define kSYNC_RELEASE 0x12
-#define kSYNC_RMB     0x13
-
-// TODO: Use smaller hammer when appropriate for target CPU.
-#define kST kSYNC0
-#define kSY kSYNC0
-
-/*
- * The following enum defines the list of supported mips instructions by the
- * assembler. Their corresponding EncodingMap positions will be defined in
- * assemble_mips.cc.
- */
-enum MipsOpCode {
-  kMipsFirst = 0,
-  // The following are common mips32r2, mips32r6 and mips64r6 instructions.
-  kMips32BitData = kMipsFirst,  // data [31..0].
-  kMipsAddiu,      // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
-  kMipsAddu,       // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
-  kMipsAnd,        // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
-  kMipsAndi,       // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
-  kMipsB,          // b o   [0001000000000000] o[15..0].
-  kMipsBal,        // bal o [0000010000010001] o[15..0].
-  // NOTE : the code tests the range kMipsBeq thru kMipsBne, so adding an instruction in this
-  // range may require updates.
-  kMipsBeq,        // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
-  kMipsBeqz,       // beqz s,o [000100] s[25..21] [00000] o[15..0].
-  kMipsBgez,       // bgez s,o [000001] s[25..21] [00001] o[15..0].
-  kMipsBgtz,       // bgtz s,o [000111] s[25..21] [00000] o[15..0].
-  kMipsBlez,       // blez s,o [000110] s[25..21] [00000] o[15..0].
-  kMipsBltz,       // bltz s,o [000001] s[25..21] [00000] o[15..0].
-  kMipsBnez,       // bnez s,o [000101] s[25..21] [00000] o[15..0].
-  kMipsBne,        // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
-  kMipsExt,        // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
-  kMipsFaddd,      // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
-  kMipsFadds,      // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
-  kMipsFsubd,      // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
-  kMipsFsubs,      // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
-  kMipsFdivd,      // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
-  kMipsFdivs,      // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
-  kMipsFmuld,      // mul.d d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
-  kMipsFmuls,      // mul.s d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
-  kMipsFcvtsd,     // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
-  kMipsFcvtsw,     // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
-  kMipsFcvtds,     // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
-  kMipsFcvtdw,     // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
-  kMipsFcvtwd,     // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
-  kMipsFcvtws,     // cvt.w.s d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
-  kMipsFmovd,      // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
-  kMipsFmovs,      // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
-  kMipsFnegd,      // neg.d d,s [01000110001] [00000] s[15..11] d[10..6] [000111].
-  kMipsFnegs,      // neg.s d,s [01000110000] [00000] s[15..11] d[10..6] [000111].
-  kMipsFldc1,      // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
-  kMipsFlwc1,      // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
-  kMipsFsdc1,      // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
-  kMipsFswc1,      // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
-  kMipsJal,        // jal t [000011] t[25..0].
-  kMipsJalr,       // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
-  kMipsJr,         // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
-  kMipsLahi,       // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
-  kMipsLalo,       // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
-  kMipsLui,        // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
-  kMipsLb,         // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
-  kMipsLbu,        // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
-  kMipsLh,         // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
-  kMipsLhu,        // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
-  kMipsLw,         // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
-  kMipsMove,       // move d,s [000000] s[25..21] [00000] d[15..11] [00000100101].
-  kMipsMfc1,       // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
-  kMipsMtc1,       // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
-  kMipsMfhc1,      // mfhc1 t,s [01000100011] t[20..16] s[15..11] [00000000000].
-  kMipsMthc1,      // mthc1 t,s [01000100111] t[20..16] s[15..11] [00000000000].
-  kMipsNop,        // nop [00000000000000000000000000000000].
-  kMipsNor,        // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
-  kMipsOr,         // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
-  kMipsOri,        // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
-  kMipsPref,       // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
-  kMipsSb,         // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
-  kMipsSeb,        // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
-  kMipsSeh,        // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
-  kMipsSh,         // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
-  kMipsSll,        // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
-  kMipsSllv,       // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
-  kMipsSlt,        // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
-  kMipsSlti,       // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
-  kMipsSltu,       // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
-  kMipsSra,        // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
-  kMipsSrav,       // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
-  kMipsSrl,        // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
-  kMipsSrlv,       // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
-  kMipsSubu,       // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
-  kMipsSw,         // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
-  kMipsSync,       // sync kind [000000] [0000000000000000] s[10..6] [001111].
-  kMipsXor,        // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
-  kMipsXori,       // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
-
-  // The following are mips32r2 instructions.
-  kMipsR2Div,      // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
-  kMipsR2Mul,      // mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010].
-  kMipsR2Mfhi,     // mfhi d [0000000000000000] d[15..11] [00000010000].
-  kMipsR2Mflo,     // mflo d [0000000000000000] d[15..11] [00000010010].
-  kMipsR2Movz,     // movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010].
-
-  // The following are mips32r6 and mips64r6 instructions.
-  kMipsR6Div,      // div d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
-  kMipsR6Mod,      // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
-  kMipsR6Mul,      // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
-
-  // The following are mips64r6 instructions.
-  kMips64Daddiu,   // daddiu t,s,imm16 [011001] s[25..21] t[20..16] imm16[15..11].
-  kMips64Daddu,    // daddu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101101].
-  kMips64Dahi,     // dahi s,imm16 [000001] s[25..21] [00110] imm16[15..11].
-  kMips64Dati,     // dati s,imm16 [000001] s[25..21] [11110] imm16[15..11].
-  kMips64Daui,     // daui t,s,imm16 [011101] s[25..21] t[20..16] imm16[15..11].
-  kMips64Ddiv,     // ddiv  d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011110].
-  kMips64Dmod,     // dmod  d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011110].
-  kMips64Dmul,     // dmul  d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011100].
-  kMips64Dmfc1,    // dmfc1 t,s [01000100001] t[20..16] s[15..11] [00000000000].
-  kMips64Dmtc1,    // dmtc1 t,s [01000100101] t[20..16] s[15..11] [00000000000].
-  kMips64Drotr32,  // drotr32 d,t,a [00000000001] t[20..16] d[15..11] a[10..6] [111110].
-  kMips64Dsll,     // dsll    d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111000].
-  kMips64Dsll32,   // dsll32  d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111100].
-  kMips64Dsrl,     // dsrl    d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111010].
-  kMips64Dsrl32,   // dsrl32  d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111110].
-  kMips64Dsra,     // dsra    d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111011].
-  kMips64Dsra32,   // dsra32  d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111111].
-  kMips64Dsllv,    // dsllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010100].
-  kMips64Dsrlv,    // dsrlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010110].
-  kMips64Dsrav,    // dsrav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010111].
-  kMips64Dsubu,    // dsubu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101111].
-  kMips64Ld,       // ld  t,o(b) [110111] b[25..21] t[20..16] o[15..0].
-  kMips64Lwu,      // lwu t,o(b) [100111] b[25..21] t[20..16] o[15..0].
-  kMips64Sd,       // sd t,o(b) [111111] b[25..21] t[20..16] o[15..0].
-
-  // The following are pseudoinstructions.
-  kMipsDelta,      // Psuedo for ori t, s, <label>-<label>.
-  kMipsDeltaHi,    // Pseudo for lui t, high16(<label>-<label>).
-  kMipsDeltaLo,    // Pseudo for ori t, s, low16(<label>-<label>).
-  kMipsCurrPC,     // jal to .+8 to materialize pc.
-  kMipsUndefined,  // undefined [011001xxxxxxxxxxxxxxxx].
-  kMipsLast
-};
-std::ostream& operator<<(std::ostream& os, const MipsOpCode& rhs);
-
-// Instruction assembly field_loc kind.
-enum MipsEncodingKind {
-  kFmtUnused,
-  kFmtBitBlt,    // Bit string using end/start.
-  kFmtDfp,       // Double FP reg.
-  kFmtSfp,       // Single FP reg.
-  kFmtBlt5_2,    // Same 5-bit field to 2 locations.
-};
-std::ostream& operator<<(std::ostream& os, const MipsEncodingKind& rhs);
-
-// Struct used to define the snippet positions for each MIPS opcode.
-struct MipsEncodingMap {
-  uint32_t skeleton;
-  struct {
-    MipsEncodingKind kind;
-    int end;   // end for kFmtBitBlt, 1-bit slice end for FP regs.
-    int start;  // start for kFmtBitBlt, 4-bit slice end for FP regs.
-  } field_loc[4];
-  MipsOpCode opcode;
-  uint64_t flags;
-  const char *name;
-  const char* fmt;
-  int size;   // Note: size is in bytes.
-};
-
-extern MipsEncodingMap EncodingMap[kMipsLast];
-
-#define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
-#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
-#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763))  // 2 offsets must fit.
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_MIPS_MIPS_LIR_H_
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
deleted file mode 100644
index 09d37f8..0000000
--- a/compiler/dex/quick/mips/target_mips.cc
+++ /dev/null
@@ -1,976 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips.h"
-
-#include <inttypes.h>
-
-#include <string>
-
-#include "arch/mips/instruction_set_features_mips.h"
-#include "backend_mips.h"
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "driver/compiler_driver.h"
-#include "mips_lir.h"
-
-namespace art {
-
-static constexpr RegStorage core_regs_arr_32[] =
-    {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32,
-     rs_rT2_32, rs_rT3_32, rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rS0, rs_rS1, rs_rS2,
-     rs_rS3, rs_rS4, rs_rS5, rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP,
-     rs_rRA};
-static constexpr RegStorage sp_regs_arr_32[] =
-    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
-     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_fr0_regs_arr_32[] =
-    {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
-     rs_rD7_fr0};
-static constexpr RegStorage dp_fr1_regs_arr_32[] =
-    {rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
-     rs_rD7_fr1};
-static constexpr RegStorage reserved_regs_arr_32[] =
-    {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
-static constexpr RegStorage core_temps_arr_32[] =
-    {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32, rs_rT2_32, rs_rT3_32,
-     rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rT8};
-static constexpr RegStorage sp_fr0_temps_arr_32[] =
-    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
-     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage sp_fr1_temps_arr_32[] =
-    {rs_rF0, rs_rF2, rs_rF4, rs_rF6, rs_rF8, rs_rF10, rs_rF12, rs_rF14};
-static constexpr RegStorage dp_fr0_temps_arr_32[] =
-    {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
-     rs_rD7_fr0};
-static constexpr RegStorage dp_fr1_temps_arr_32[] =
-    {rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
-     rs_rD7_fr1};
-
-static constexpr RegStorage core_regs_arr_64[] =
-    {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6,
-     rs_rA7, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5, rs_rS6,
-     rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
-static constexpr RegStorage core_regs_arr_64d[] =
-    {rs_rZEROd, rs_rATd, rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d,
-     rs_rA6d, rs_rA7d, rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rS0d, rs_rS1d, rs_rS2d, rs_rS3d,
-     rs_rS4d, rs_rS5d, rs_rS6d, rs_rS7d, rs_rT8d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd,
-     rs_rFPd, rs_rRAd};
-#if 0
-// TODO: f24-f31 must be saved before calls and restored after.
-static constexpr RegStorage sp_regs_arr_64[] =
-    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
-     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
-     rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
-     rs_rF31};
-static constexpr RegStorage dp_regs_arr_64[] =
-    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
-     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
-     rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
-     rs_rD31};
-#else
-static constexpr RegStorage sp_regs_arr_64[] =
-    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
-     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
-     rs_rF21, rs_rF22, rs_rF23};
-static constexpr RegStorage dp_regs_arr_64[] =
-    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
-     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
-     rs_rD21, rs_rD22, rs_rD23};
-#endif
-static constexpr RegStorage reserved_regs_arr_64[] =
-    {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
-static constexpr RegStorage reserved_regs_arr_64d[] =
-    {rs_rZEROd, rs_rATd, rs_rS0d, rs_rS1d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd, rs_rRAd};
-static constexpr RegStorage core_temps_arr_64[] =
-    {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6, rs_rA7, rs_rT0, rs_rT1,
-     rs_rT2, rs_rT3, rs_rT8};
-static constexpr RegStorage core_temps_arr_64d[] =
-    {rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d, rs_rA6d, rs_rA7d,
-     rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rT8d};
-#if 0
-// TODO: f24-f31 must be saved before calls and restored after.
-static constexpr RegStorage sp_temps_arr_64[] =
-    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
-     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
-     rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
-     rs_rF31};
-static constexpr RegStorage dp_temps_arr_64[] =
-    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
-     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
-     rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
-     rs_rD31};
-#else
-static constexpr RegStorage sp_temps_arr_64[] =
-    {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
-     rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
-     rs_rF21, rs_rF22, rs_rF23};
-static constexpr RegStorage dp_temps_arr_64[] =
-    {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
-     rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
-     rs_rD21, rs_rD22, rs_rD23};
-#endif
-
-static constexpr ArrayRef<const RegStorage> empty_pool;
-static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
-static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
-static constexpr ArrayRef<const RegStorage> dp_fr0_regs_32(dp_fr0_regs_arr_32);
-static constexpr ArrayRef<const RegStorage> dp_fr1_regs_32(dp_fr1_regs_arr_32);
-static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
-static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
-static constexpr ArrayRef<const RegStorage> sp_fr0_temps_32(sp_fr0_temps_arr_32);
-static constexpr ArrayRef<const RegStorage> sp_fr1_temps_32(sp_fr1_temps_arr_32);
-static constexpr ArrayRef<const RegStorage> dp_fr0_temps_32(dp_fr0_temps_arr_32);
-static constexpr ArrayRef<const RegStorage> dp_fr1_temps_32(dp_fr1_temps_arr_32);
-
-static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
-static constexpr ArrayRef<const RegStorage> core_regs_64d(core_regs_arr_64d);
-static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
-static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
-static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
-static constexpr ArrayRef<const RegStorage> reserved_regs_64d(reserved_regs_arr_64d);
-static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
-static constexpr ArrayRef<const RegStorage> core_temps_64d(core_temps_arr_64d);
-static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
-static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
-
-RegLocation MipsMir2Lir::LocCReturn() {
-  return mips_loc_c_return;
-}
-
-RegLocation MipsMir2Lir::LocCReturnRef() {
-  return cu_->target64 ? mips64_loc_c_return_ref : mips_loc_c_return;
-}
-
-RegLocation MipsMir2Lir::LocCReturnWide() {
-  return cu_->target64 ? mips64_loc_c_return_wide : mips_loc_c_return_wide;
-}
-
-RegLocation MipsMir2Lir::LocCReturnFloat() {
-  return mips_loc_c_return_float;
-}
-
-RegLocation MipsMir2Lir::LocCReturnDouble() {
-  if (cu_->target64) {
-    return mips64_loc_c_return_double;
-  } else if (fpuIs32Bit_) {
-    return mips_loc_c_return_double_fr0;
-  } else {
-    return mips_loc_c_return_double_fr1;
-  }
-}
-
-// Convert k64BitSolo into k64BitPair.
-RegStorage MipsMir2Lir::Solo64ToPair64(RegStorage reg) {
-  DCHECK(reg.IsDouble());
-  DCHECK_EQ(reg.GetRegNum() & 1, 0);
-  int reg_num = (reg.GetRegNum() & ~1) | RegStorage::kFloatingPoint;
-  return RegStorage(RegStorage::k64BitPair, reg_num, reg_num + 1);
-}
-
-// Convert 64bit FP (k64BitSolo or k64BitPair) into k32BitSolo.
-// This routine is only used to allow a 64bit FPU to access FP registers 32bits at a time.
-RegStorage MipsMir2Lir::Fp64ToSolo32(RegStorage reg) {
-  DCHECK(!fpuIs32Bit_);
-  DCHECK(reg.IsDouble());
-  DCHECK(!reg.IsPair());
-  int reg_num = reg.GetRegNum() | RegStorage::kFloatingPoint;
-  return RegStorage(RegStorage::k32BitSolo, reg_num);
-}
-
-// Return a target-dependent special register.
-RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
-  if (!cu_->target64 && wide_kind == kWide) {
-    DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 == reg) || (kFArg2 == reg) || (kRet0 == reg));
-    RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
-                                     TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
-    if (!fpuIs32Bit_ && ret_reg.IsFloat()) {
-      // convert 64BitPair to 64BitSolo for 64bit FPUs.
-      RegStorage low = ret_reg.GetLow();
-      ret_reg = RegStorage::FloatSolo64(low.GetRegNum());
-    }
-    return ret_reg;
-  } else if (cu_->target64 && (wide_kind == kWide || wide_kind == kRef)) {
-    return As64BitReg(TargetReg(reg));
-  } else {
-    return TargetReg(reg);
-  }
-}
-
-// Return a target-dependent special register.
-RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg) {
-  RegStorage res_reg;
-  switch (reg) {
-    case kSelf: res_reg = rs_rS1; break;
-    case kSuspend: res_reg =  rs_rS0; break;
-    case kLr: res_reg =  rs_rRA; break;
-    case kPc: res_reg = RegStorage::InvalidReg(); break;
-    case kSp: res_reg =  rs_rSP; break;
-    case kArg0: res_reg = rs_rA0; break;
-    case kArg1: res_reg = rs_rA1; break;
-    case kArg2: res_reg = rs_rA2; break;
-    case kArg3: res_reg = rs_rA3; break;
-    case kArg4: res_reg = cu_->target64 ? rs_rA4 : RegStorage::InvalidReg(); break;
-    case kArg5: res_reg = cu_->target64 ? rs_rA5 : RegStorage::InvalidReg(); break;
-    case kArg6: res_reg = cu_->target64 ? rs_rA6 : RegStorage::InvalidReg(); break;
-    case kArg7: res_reg = cu_->target64 ? rs_rA7 : RegStorage::InvalidReg(); break;
-    case kFArg0: res_reg = rs_rF12; break;
-    case kFArg1: res_reg = rs_rF13; break;
-    case kFArg2: res_reg = rs_rF14; break;
-    case kFArg3: res_reg = rs_rF15; break;
-    case kFArg4: res_reg = cu_->target64 ? rs_rF16 : RegStorage::InvalidReg(); break;
-    case kFArg5: res_reg = cu_->target64 ? rs_rF17 : RegStorage::InvalidReg(); break;
-    case kFArg6: res_reg = cu_->target64 ? rs_rF18 : RegStorage::InvalidReg(); break;
-    case kFArg7: res_reg = cu_->target64 ? rs_rF19 : RegStorage::InvalidReg(); break;
-    case kRet0: res_reg = rs_rV0; break;
-    case kRet1: res_reg = rs_rV1; break;
-    case kInvokeTgt: res_reg = rs_rT9; break;
-    case kHiddenArg: res_reg = cu_->target64 ? rs_rT0 : rs_rT0_32; break;
-    case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
-    case kCount: res_reg = RegStorage::InvalidReg(); break;
-    default: res_reg = RegStorage::InvalidReg();
-  }
-  return res_reg;
-}
-
-RegStorage MipsMir2Lir::InToRegStorageMipsMapper::GetNextReg(ShortyArg arg) {
-  const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3};
-  const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
-  const SpecialTargetRegister fpuArgMappingToPhysicalReg[] = {kFArg0, kFArg2};
-  const size_t fpuArgMappingToPhysicalRegSize = arraysize(fpuArgMappingToPhysicalReg);
-
-  RegStorage result = RegStorage::InvalidReg();
-  if (arg.IsFP()) {
-    if (cur_fpu_reg_ < fpuArgMappingToPhysicalRegSize) {
-      result = m2l_->TargetReg(fpuArgMappingToPhysicalReg[cur_fpu_reg_++],
-                               arg.IsWide() ? kWide : kNotWide);
-    }
-  } else {
-    if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
-      if (arg.IsWide() && cur_core_reg_ == 0) {
-        // Don't use a1-a2 as a register pair, move to a2-a3 instead.
-        cur_core_reg_++;
-      }
-      result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++],
-                               arg.IsRef() ? kRef : kNotWide);
-      if (arg.IsWide() && cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
-        result = RegStorage::MakeRegPair(
-            result, m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], kNotWide));
-      }
-    }
-  }
-  return result;
-}
-
-RegStorage MipsMir2Lir::InToRegStorageMips64Mapper::GetNextReg(ShortyArg arg) {
-  const SpecialTargetRegister coreArgMappingToPhysicalReg[] =
-      {kArg1, kArg2, kArg3, kArg4, kArg5, kArg6, kArg7};
-  const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
-  const SpecialTargetRegister fpArgMappingToPhysicalReg[] =
-      {kFArg1, kFArg2, kFArg3, kFArg4, kFArg5, kFArg6, kFArg7};
-  const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
-
-  RegStorage result = RegStorage::InvalidReg();
-  if (arg.IsFP()) {
-    if (cur_arg_reg_ < fpArgMappingToPhysicalRegSize) {
-      DCHECK(!arg.IsRef());
-      result = m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_arg_reg_++],
-                               arg.IsWide() ? kWide : kNotWide);
-    }
-  } else {
-    if (cur_arg_reg_ < coreArgMappingToPhysicalRegSize) {
-      DCHECK(!(arg.IsWide() && arg.IsRef()));
-      result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_arg_reg_++],
-                               arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
-    }
-  }
-  return result;
-}
-
-/*
- * Decode the register id.
- */
-ResourceMask MipsMir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
-  if (cu_->target64) {
-    return ResourceMask::Bit((reg.IsFloat() ? kMipsFPReg0 : 0) + reg.GetRegNum());
-  } else {
-    if (reg.IsDouble()) {
-      return ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0);
-    } else if (reg.IsSingle()) {
-      return ResourceMask::Bit(reg.GetRegNum() + kMipsFPReg0);
-    } else {
-      return ResourceMask::Bit(reg.GetRegNum());
-    }
-  }
-}
-
-ResourceMask MipsMir2Lir::GetPCUseDefEncoding() const {
-  return cu_->target64 ? ResourceMask::Bit(kMips64RegPC) : ResourceMask::Bit(kMipsRegPC);
-}
-
-void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
-                                           ResourceMask* def_mask) {
-  DCHECK(!lir->flags.use_def_invalid);
-
-  // Mips-specific resource map setup here.
-  if (flags & REG_DEF_SP) {
-    def_mask->SetBit(kMipsRegSP);
-  }
-
-  if (flags & REG_USE_SP) {
-    use_mask->SetBit(kMipsRegSP);
-  }
-
-  if (flags & REG_DEF_LR) {
-    def_mask->SetBit(kMipsRegLR);
-  }
-
-  if (!cu_->target64) {
-    if (flags & REG_DEF_HI) {
-      def_mask->SetBit(kMipsRegHI);
-    }
-
-    if (flags & REG_DEF_LO) {
-      def_mask->SetBit(kMipsRegLO);
-    }
-
-    if (flags & REG_USE_HI) {
-      use_mask->SetBit(kMipsRegHI);
-    }
-
-    if (flags & REG_USE_LO) {
-      use_mask->SetBit(kMipsRegLO);
-    }
-  }
-}
-
-/* For dumping instructions */
-#define MIPS_REG_COUNT 32
-static const char *mips_reg_name[MIPS_REG_COUNT] = {
-  "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
-  "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
-  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
-  "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
-};
-
-static const char *mips64_reg_name[MIPS_REG_COUNT] = {
-  "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
-  "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
-  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
-  "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
-};
-
-/*
- * Interpret a format string and build a string no longer than size
- * See format key in assemble_mips.cc.
- */
-std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
-  std::string buf;
-  int i;
-  const char *fmt_end = &fmt[strlen(fmt)];
-  char tbuf[256];
-  char nc;
-  while (fmt < fmt_end) {
-    int operand;
-    if (*fmt == '!') {
-      fmt++;
-      DCHECK_LT(fmt, fmt_end);
-      nc = *fmt++;
-      if (nc == '!') {
-        strcpy(tbuf, "!");
-      } else {
-        DCHECK_LT(fmt, fmt_end);
-        DCHECK_LT(static_cast<unsigned>(nc-'0'), 4u);
-        operand = lir->operands[nc-'0'];
-        switch (*fmt++) {
-          case 'b':
-            strcpy(tbuf, "0000");
-            for (i = 3; i >= 0; i--) {
-              tbuf[i] += operand & 1;
-              operand >>= 1;
-            }
-            break;
-          case 's':
-            snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
-            break;
-          case 'S':
-            DCHECK_EQ(RegStorage::RegNum(operand) & 1, 0);
-            snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
-            break;
-          case 'h':
-            snprintf(tbuf, arraysize(tbuf), "%04x", operand);
-            break;
-          case 'M':
-          case 'd':
-            snprintf(tbuf, arraysize(tbuf), "%d", operand);
-            break;
-          case 'D':
-            snprintf(tbuf, arraysize(tbuf), "%d", operand+1);
-            break;
-          case 'E':
-            snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
-            break;
-          case 'F':
-            snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
-            break;
-          case 't':
-            snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
-                     reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 + (operand << 1),
-                     lir->target);
-            break;
-          case 'T':
-            snprintf(tbuf, arraysize(tbuf), "0x%08x", operand << 2);
-            break;
-          case 'u': {
-            int offset_1 = lir->operands[0];
-            int offset_2 = NEXT_LIR(lir)->operands[0];
-            uintptr_t target =
-                (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) & ~3) +
-                    (offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
-            snprintf(tbuf, arraysize(tbuf), "%p", reinterpret_cast<void*>(target));
-            break;
-          }
-
-          /* Nothing to print for BLX_2 */
-          case 'v':
-            strcpy(tbuf, "see above");
-            break;
-          case 'r':
-            DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
-            if (cu_->target64) {
-              strcpy(tbuf, mips64_reg_name[operand]);
-            } else {
-              strcpy(tbuf, mips_reg_name[operand]);
-            }
-            break;
-          case 'N':
-            // Placeholder for delay slot handling
-            strcpy(tbuf, ";  nop");
-            break;
-          default:
-            strcpy(tbuf, "DecodeError");
-            break;
-        }
-        buf += tbuf;
-      }
-    } else {
-      buf += *fmt++;
-    }
-  }
-  return buf;
-}
-
-// FIXME: need to redo resource maps for MIPS - fix this at that time.
-void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, const ResourceMask& mask, const char *prefix) {
-  char buf[256];
-  buf[0] = 0;
-
-  if (mask.Equals(kEncodeAll)) {
-    strcpy(buf, "all");
-  } else {
-    char num[8];
-    int i;
-
-    for (i = 0; i < (cu_->target64 ? kMips64RegEnd : kMipsRegEnd); i++) {
-      if (mask.HasBit(i)) {
-        snprintf(num, arraysize(num), "%d ", i);
-        strcat(buf, num);
-      }
-    }
-
-    if (mask.HasBit(ResourceMask::kCCode)) {
-      strcat(buf, "cc ");
-    }
-    if (mask.HasBit(ResourceMask::kFPStatus)) {
-      strcat(buf, "fpcc ");
-    }
-    // Memory bits.
-    if (mips_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
-      snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
-               DECODE_ALIAS_INFO_REG(mips_lir->flags.alias_info),
-               DECODE_ALIAS_INFO_WIDE(mips_lir->flags.alias_info) ? "(+1)" : "");
-    }
-    if (mask.HasBit(ResourceMask::kLiteral)) {
-      strcat(buf, "lit ");
-    }
-
-    if (mask.HasBit(ResourceMask::kHeapRef)) {
-      strcat(buf, "heap ");
-    }
-    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
-      strcat(buf, "noalias ");
-    }
-  }
-  if (buf[0]) {
-    LOG(INFO) << prefix << ": " <<  buf;
-  }
-}
-
-/*
- * TUNING: is true leaf?  Can't just use METHOD_IS_LEAF to determine as some
- * instructions might call out to C/assembly helper functions.  Until
- * machinery is in place, always spill lr.
- */
-
-void MipsMir2Lir::AdjustSpillMask() {
-  core_spill_mask_ |= (1 << rs_rRA.GetRegNum());
-  num_core_spills_++;
-}
-
-/* Clobber all regs that might be used by an external C call */
-void MipsMir2Lir::ClobberCallerSave() {
-  if (cu_->target64) {
-    Clobber(rs_rZEROd);
-    Clobber(rs_rATd);
-    Clobber(rs_rV0d);
-    Clobber(rs_rV1d);
-    Clobber(rs_rA0d);
-    Clobber(rs_rA1d);
-    Clobber(rs_rA2d);
-    Clobber(rs_rA3d);
-    Clobber(rs_rA4d);
-    Clobber(rs_rA5d);
-    Clobber(rs_rA6d);
-    Clobber(rs_rA7d);
-    Clobber(rs_rT0d);
-    Clobber(rs_rT1d);
-    Clobber(rs_rT2d);
-    Clobber(rs_rT3d);
-    Clobber(rs_rT8d);
-    Clobber(rs_rT9d);
-    Clobber(rs_rK0d);
-    Clobber(rs_rK1d);
-    Clobber(rs_rGPd);
-    Clobber(rs_rFPd);
-    Clobber(rs_rRAd);
-
-    Clobber(rs_rF0);
-    Clobber(rs_rF1);
-    Clobber(rs_rF2);
-    Clobber(rs_rF3);
-    Clobber(rs_rF4);
-    Clobber(rs_rF5);
-    Clobber(rs_rF6);
-    Clobber(rs_rF7);
-    Clobber(rs_rF8);
-    Clobber(rs_rF9);
-    Clobber(rs_rF10);
-    Clobber(rs_rF11);
-    Clobber(rs_rF12);
-    Clobber(rs_rF13);
-    Clobber(rs_rF14);
-    Clobber(rs_rF15);
-    Clobber(rs_rD0);
-    Clobber(rs_rD1);
-    Clobber(rs_rD2);
-    Clobber(rs_rD3);
-    Clobber(rs_rD4);
-    Clobber(rs_rD5);
-    Clobber(rs_rD6);
-    Clobber(rs_rD7);
-  } else {
-    Clobber(rs_rZERO);
-    Clobber(rs_rAT);
-    Clobber(rs_rV0);
-    Clobber(rs_rV1);
-    Clobber(rs_rA0);
-    Clobber(rs_rA1);
-    Clobber(rs_rA2);
-    Clobber(rs_rA3);
-    Clobber(rs_rT0_32);
-    Clobber(rs_rT1_32);
-    Clobber(rs_rT2_32);
-    Clobber(rs_rT3_32);
-    Clobber(rs_rT4_32);
-    Clobber(rs_rT5_32);
-    Clobber(rs_rT6_32);
-    Clobber(rs_rT7_32);
-    Clobber(rs_rT8);
-    Clobber(rs_rT9);
-    Clobber(rs_rK0);
-    Clobber(rs_rK1);
-    Clobber(rs_rGP);
-    Clobber(rs_rFP);
-    Clobber(rs_rRA);
-    Clobber(rs_rF0);
-    Clobber(rs_rF2);
-    Clobber(rs_rF4);
-    Clobber(rs_rF6);
-    Clobber(rs_rF8);
-    Clobber(rs_rF10);
-    Clobber(rs_rF12);
-    Clobber(rs_rF14);
-    if (fpuIs32Bit_) {
-      Clobber(rs_rF1);
-      Clobber(rs_rF3);
-      Clobber(rs_rF5);
-      Clobber(rs_rF7);
-      Clobber(rs_rF9);
-      Clobber(rs_rF11);
-      Clobber(rs_rF13);
-      Clobber(rs_rF15);
-      Clobber(rs_rD0_fr0);
-      Clobber(rs_rD1_fr0);
-      Clobber(rs_rD2_fr0);
-      Clobber(rs_rD3_fr0);
-      Clobber(rs_rD4_fr0);
-      Clobber(rs_rD5_fr0);
-      Clobber(rs_rD6_fr0);
-      Clobber(rs_rD7_fr0);
-    } else {
-      Clobber(rs_rD0_fr1);
-      Clobber(rs_rD1_fr1);
-      Clobber(rs_rD2_fr1);
-      Clobber(rs_rD3_fr1);
-      Clobber(rs_rD4_fr1);
-      Clobber(rs_rD5_fr1);
-      Clobber(rs_rD6_fr1);
-      Clobber(rs_rD7_fr1);
-    }
-  }
-}
-
-RegLocation MipsMir2Lir::GetReturnWideAlt() {
-  UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS";
-  RegLocation res = LocCReturnWide();
-  return res;
-}
-
-RegLocation MipsMir2Lir::GetReturnAlt() {
-  UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS";
-  RegLocation res = LocCReturn();
-  return res;
-}
-
-/* To be used when explicitly managing register use */
-void MipsMir2Lir::LockCallTemps() {
-  LockTemp(TargetReg(kArg0));
-  LockTemp(TargetReg(kArg1));
-  LockTemp(TargetReg(kArg2));
-  LockTemp(TargetReg(kArg3));
-  if (cu_->target64) {
-    LockTemp(TargetReg(kArg4));
-    LockTemp(TargetReg(kArg5));
-    LockTemp(TargetReg(kArg6));
-    LockTemp(TargetReg(kArg7));
-  } else {
-    if (fpuIs32Bit_) {
-      LockTemp(TargetReg(kFArg0));
-      LockTemp(TargetReg(kFArg1));
-      LockTemp(TargetReg(kFArg2));
-      LockTemp(TargetReg(kFArg3));
-      LockTemp(rs_rD6_fr0);
-      LockTemp(rs_rD7_fr0);
-    } else {
-      LockTemp(TargetReg(kFArg0));
-      LockTemp(TargetReg(kFArg2));
-      LockTemp(rs_rD6_fr1);
-      LockTemp(rs_rD7_fr1);
-    }
-  }
-}
-
-/* To be used when explicitly managing register use */
-void MipsMir2Lir::FreeCallTemps() {
-  FreeTemp(TargetReg(kArg0));
-  FreeTemp(TargetReg(kArg1));
-  FreeTemp(TargetReg(kArg2));
-  FreeTemp(TargetReg(kArg3));
-  if (cu_->target64) {
-    FreeTemp(TargetReg(kArg4));
-    FreeTemp(TargetReg(kArg5));
-    FreeTemp(TargetReg(kArg6));
-    FreeTemp(TargetReg(kArg7));
-  } else {
-    if (fpuIs32Bit_) {
-      FreeTemp(TargetReg(kFArg0));
-      FreeTemp(TargetReg(kFArg1));
-      FreeTemp(TargetReg(kFArg2));
-      FreeTemp(TargetReg(kFArg3));
-      FreeTemp(rs_rD6_fr0);
-      FreeTemp(rs_rD7_fr0);
-    } else {
-      FreeTemp(TargetReg(kFArg0));
-      FreeTemp(TargetReg(kFArg2));
-      FreeTemp(rs_rD6_fr1);
-      FreeTemp(rs_rD7_fr1);
-    }
-  }
-  FreeTemp(TargetReg(kHiddenArg));
-}
-
-bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED) {
-  if (cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
-    NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-void MipsMir2Lir::CompilerInitializeRegAlloc() {
-  if (cu_->target64) {
-    reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64d, sp_regs_64,
-                                              dp_regs_64, reserved_regs_64, reserved_regs_64d,
-                                              core_temps_64, core_temps_64d, sp_temps_64,
-                                              dp_temps_64));
-
-    // Alias single precision floats to appropriate half of overlapping double.
-    for (RegisterInfo* info : reg_pool_->sp_regs_) {
-      int sp_reg_num = info->GetReg().GetRegNum();
-      int dp_reg_num = sp_reg_num;
-      RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
-      RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
-      // Double precision register's master storage should refer to itself.
-      DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
-      // Redirect single precision's master storage to master.
-      info->SetMaster(dp_reg_info);
-      // Singles should show a single 32-bit mask bit, at first referring to the low half.
-      DCHECK_EQ(info->StorageMask(), 0x1U);
-    }
-
-    // Alias 32bit W registers to corresponding 64bit X registers.
-    for (RegisterInfo* info : reg_pool_->core_regs_) {
-      int d_reg_num = info->GetReg().GetRegNum();
-      RegStorage d_reg = RegStorage::Solo64(d_reg_num);
-      RegisterInfo* d_reg_info = GetRegInfo(d_reg);
-      // 64bit D register's master storage should refer to itself.
-      DCHECK_EQ(d_reg_info, d_reg_info->Master());
-      // Redirect 32bit master storage to 64bit D.
-      info->SetMaster(d_reg_info);
-      // 32bit should show a single 32-bit mask bit, at first referring to the low half.
-      DCHECK_EQ(info->StorageMask(), 0x1U);
-    }
-  } else {
-    reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool,  // core64
-                                              sp_regs_32,
-                                              fpuIs32Bit_ ? dp_fr0_regs_32 : dp_fr1_regs_32,
-                                              reserved_regs_32, empty_pool,  // reserved64
-                                              core_temps_32, empty_pool,  // core64_temps
-                                              fpuIs32Bit_ ? sp_fr0_temps_32 : sp_fr1_temps_32,
-                                              fpuIs32Bit_ ? dp_fr0_temps_32 : dp_fr1_temps_32));
-
-    // Alias single precision floats to appropriate half of overlapping double.
-    for (RegisterInfo* info : reg_pool_->sp_regs_) {
-      int sp_reg_num = info->GetReg().GetRegNum();
-      int dp_reg_num = sp_reg_num & ~1;
-      if (fpuIs32Bit_ || (sp_reg_num == dp_reg_num)) {
-        RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
-        RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
-        // Double precision register's master storage should refer to itself.
-        DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
-        // Redirect single precision's master storage to master.
-        info->SetMaster(dp_reg_info);
-        // Singles should show a single 32-bit mask bit, at first referring to the low half.
-        DCHECK_EQ(info->StorageMask(), 0x1U);
-        if (sp_reg_num & 1) {
-          // For odd singles, change to user the high word of the backing double.
-          info->SetStorageMask(0x2);
-        }
-      }
-    }
-  }
-
-  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
-  // TODO: adjust when we roll to hard float calling convention.
-  reg_pool_->next_core_reg_ = 2;
-  reg_pool_->next_sp_reg_ = 2;
-  if (cu_->target64) {
-    reg_pool_->next_dp_reg_ = 1;
-  } else {
-    reg_pool_->next_dp_reg_ = 2;
-  }
-}
-
-/*
- * In the Arm code a it is typical to use the link register
- * to hold the target address.  However, for Mips we must
- * ensure that all branch instructions can be restarted if
- * there is a trap in the shadow.  Allocate a temp register.
- */
-RegStorage MipsMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
-  // NOTE: native pointer.
-  if (cu_->target64) {
-    LoadWordDisp(TargetPtrReg(kSelf), GetThreadOffset<8>(trampoline).Int32Value(),
-                 TargetPtrReg(kInvokeTgt));
-  } else {
-    LoadWordDisp(TargetPtrReg(kSelf), GetThreadOffset<4>(trampoline).Int32Value(),
-                 TargetPtrReg(kInvokeTgt));
-  }
-  return TargetPtrReg(kInvokeTgt);
-}
-
-LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
-  RegStorage tmp = AllocTemp();
-  // NOTE: native pointer.
-  if (cu_->target64) {
-    LoadWordDisp(TargetPtrReg(kSelf), Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
-  } else {
-    LoadWordDisp(TargetPtrReg(kSelf), Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
-  }
-  LIR *inst = LoadWordDisp(tmp, 0, tmp);
-  FreeTemp(tmp);
-  return inst;
-}
-
-LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
-  DCHECK(!r_dest.IsFloat());  // See RegClassForFieldLoadStore().
-  if (!cu_->target64) {
-    DCHECK(r_dest.IsPair());
-  }
-  ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers.
-  RegStorage reg_ptr = TargetReg(kArg0);
-  OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
-  RegStorage r_tgt = LoadHelper(kQuickA64Load);
-  ForceImplicitNullCheck(reg_ptr, 0, true);  // is_wide = true
-  LIR *ret = OpReg(kOpBlx, r_tgt);
-  RegStorage reg_ret;
-  if (cu_->target64) {
-    OpRegCopy(r_dest, TargetReg(kRet0));
-  } else {
-    reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
-    OpRegCopyWide(r_dest, reg_ret);
-  }
-  return ret;
-}
-
-LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
-  DCHECK(!r_src.IsFloat());  // See RegClassForFieldLoadStore().
-  if (cu_->target64) {
-    DCHECK(!r_src.IsPair());
-  } else {
-    DCHECK(r_src.IsPair());
-  }
-  ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers.
-  RegStorage temp_ptr = AllocTemp();
-  OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
-  ForceImplicitNullCheck(temp_ptr, 0, true);  // is_wide = true
-  RegStorage temp_value = AllocTempWide();
-  OpRegCopyWide(temp_value, r_src);
-  if (cu_->target64) {
-    OpRegCopyWide(TargetReg(kArg0, kWide), temp_ptr);
-    OpRegCopyWide(TargetReg(kArg1, kWide), temp_value);
-  } else {
-    RegStorage reg_ptr = TargetReg(kArg0);
-    OpRegCopy(reg_ptr, temp_ptr);
-    RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
-    OpRegCopyWide(reg_value, temp_value);
-  }
-  FreeTemp(temp_ptr);
-  FreeTemp(temp_value);
-  RegStorage r_tgt = LoadHelper(kQuickA64Store);
-  return OpReg(kOpBlx, r_tgt);
-}
-
-static dwarf::Reg DwarfCoreReg(int num) {
-  return dwarf::Reg::MipsCore(num);
-}
-
-void MipsMir2Lir::SpillCoreRegs() {
-  if (num_core_spills_ == 0) {
-    return;
-  }
-  uint32_t mask = core_spill_mask_;
-  int ptr_size = cu_->target64 ? 8 : 4;
-  int offset = num_core_spills_ * ptr_size;
-  const RegStorage rs_sp = TargetPtrReg(kSp);
-  OpRegImm(kOpSub, rs_sp, offset);
-  cfi_.AdjustCFAOffset(offset);
-  for (int reg = 0; mask; mask >>= 1, reg++) {
-    if (mask & 0x1) {
-      offset -= ptr_size;
-      StoreWordDisp(rs_sp, offset,
-                    cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg));
-      cfi_.RelOffset(DwarfCoreReg(reg), offset);
-    }
-  }
-}
-
-void MipsMir2Lir::UnSpillCoreRegs() {
-  if (num_core_spills_ == 0) {
-    return;
-  }
-  uint32_t mask = core_spill_mask_;
-  int offset  = frame_size_;
-  int ptr_size = cu_->target64 ? 8 : 4;
-  const RegStorage rs_sp = TargetPtrReg(kSp);
-  for (int reg = 0; mask; mask >>= 1, reg++) {
-    if (mask & 0x1) {
-      offset -= ptr_size;
-      LoadWordDisp(rs_sp, offset,
-                   cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg));
-      cfi_.Restore(DwarfCoreReg(reg));
-    }
-  }
-  OpRegImm(kOpAdd, rs_sp, frame_size_);
-  cfi_.AdjustCFAOffset(-frame_size_);
-}
-
-bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) {
-  return (lir->opcode == kMipsB);
-}
-
-RegisterClass MipsMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
-  if (UNLIKELY(is_volatile)) {
-    // On Mips, atomic 64-bit load/store requires a core register.
-    // Smaller aligned load/store is atomic for both core and fp registers.
-    if (size == k64 || size == kDouble) {
-      return kCoreReg;
-    }
-  }
-  // TODO: Verify that both core and fp registers are suitable for smaller sizes.
-  return RegClassBySize(size);
-}
-
-MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
-    : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips64_mapper_(this),
-    in_to_reg_storage_mips_mapper_(this),
-    isaIsR6_(cu_->target64 ? true : cu->compiler_driver->GetInstructionSetFeatures()
-                ->AsMipsInstructionSetFeatures()->IsR6()),
-    fpuIs32Bit_(cu_->target64 ? false : cu->compiler_driver->GetInstructionSetFeatures()
-                   ->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
-  for (int i = 0; i < kMipsLast; i++) {
-    DCHECK_EQ(MipsMir2Lir::EncodingMap[i].opcode, i)
-        << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
-        << " is wrong: expecting " << i << ", seeing "
-        << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
-  }
-}
-
-Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                           ArenaAllocator* const arena) {
-  return new MipsMir2Lir(cu, mir_graph, arena);
-}
-
-uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return MipsMir2Lir::EncodingMap[opcode].flags;
-}
-
-const char* MipsMir2Lir::GetTargetInstName(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return MipsMir2Lir::EncodingMap[opcode].name;
-}
-
-const char* MipsMir2Lir::GetTargetInstFmt(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return MipsMir2Lir::EncodingMap[opcode].fmt;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
deleted file mode 100644
index 4d6c058..0000000
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ /dev/null
@@ -1,1115 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips.h"
-
-#include "arch/mips/instruction_set_features_mips.h"
-#include "arch/mips/entrypoints_direct_mips.h"
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "dex/mir_graph.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "mips_lir.h"
-
-namespace art {
-
-static constexpr size_t kMips64DoublewordSize = 8;
-
-/* This file contains codegen for the Mips ISA */
-LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
-  int opcode;
-  if (cu_->target64) {
-    DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
-    if (r_dest.Is64Bit()) {
-      if (r_dest.IsDouble()) {
-        if (r_src.IsDouble()) {
-          opcode = kMipsFmovd;
-        } else {
-          // Note the operands are swapped for the dmtc1 instr.
-          RegStorage t_opnd = r_src;
-          r_src = r_dest;
-          r_dest = t_opnd;
-          opcode = kMips64Dmtc1;
-        }
-      } else {
-        DCHECK(r_src.IsDouble());
-        opcode = kMips64Dmfc1;
-      }
-    } else {
-      if (r_dest.IsSingle()) {
-        if (r_src.IsSingle()) {
-          opcode = kMipsFmovs;
-        } else {
-          // Note the operands are swapped for the mtc1 instr.
-          RegStorage t_opnd = r_src;
-          r_src = r_dest;
-          r_dest = t_opnd;
-          opcode = kMipsMtc1;
-        }
-      } else {
-        DCHECK(r_src.IsSingle());
-        opcode = kMipsMfc1;
-      }
-    }
-  } else {
-    // Must be both DOUBLE or both not DOUBLE.
-    DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
-    if (r_dest.IsDouble()) {
-      opcode = kMipsFmovd;
-    } else {
-      if (r_dest.IsSingle()) {
-        if (r_src.IsSingle()) {
-          opcode = kMipsFmovs;
-        } else {
-          // Note the operands are swapped for the mtc1 instr.
-          RegStorage t_opnd = r_src;
-          r_src = r_dest;
-          r_dest = t_opnd;
-          opcode = kMipsMtc1;
-        }
-      } else {
-        DCHECK(r_src.IsSingle());
-        opcode = kMipsMfc1;
-      }
-    }
-  }
-  LIR* res;
-  if (cu_->target64) {
-    res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
-  } else {
-    res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
-  }
-  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
-    res->flags.is_nop = true;
-  }
-  return res;
-}
-
-bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) {
-  // For encodings, see LoadConstantNoClobber below.
-  return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
-}
-
-bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value ATTRIBUTE_UNUSED) {
-  return false;  // TUNING
-}
-
-bool MipsMir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) {
-  return false;  // TUNING
-}
-
-bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value ATTRIBUTE_UNUSED) {
-  return false;  // TUNING
-}
-
-/*
- * Load a immediate using a shortcut if possible; otherwise
- * grab from the per-translation literal pool.  If target is
- * a high register, build constant into a low register and copy.
- *
- * No additional register clobbering operation performed. Use this version when
- * 1) r_dest is freshly returned from AllocTemp or
- * 2) The codegen is under fixed register usage
- */
-LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
-  LIR *res;
-
-  RegStorage r_dest_save = r_dest;
-  int is_fp_reg = r_dest.IsFloat();
-  if (is_fp_reg) {
-    DCHECK(r_dest.IsSingle());
-    r_dest = AllocTemp();
-  }
-
-  // See if the value can be constructed cheaply.
-  if (value == 0) {
-    res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO);
-  } else if (IsUint<16>(value)) {
-    // Use OR with (unsigned) immediate to encode 16b unsigned int.
-    res = NewLIR3(kMipsOri, r_dest.GetReg(), rZERO, value);
-  } else if (IsInt<16>(value)) {
-    // Use ADD with (signed) immediate to encode 16b signed int.
-    res = NewLIR3(kMipsAddiu, r_dest.GetReg(), rZERO, value);
-  } else {
-    res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
-    if (value & 0xffff)
-      NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
-  }
-
-  if (is_fp_reg) {
-    NewLIR2(kMipsMtc1, r_dest.GetReg(), r_dest_save.GetReg());
-    FreeTemp(r_dest);
-  }
-
-  return res;
-}
-
-LIR* MipsMir2Lir::LoadConstantWideNoClobber(RegStorage r_dest, int64_t value) {
-  LIR* res = nullptr;
-  DCHECK(r_dest.Is64Bit());
-  RegStorage r_dest_save = r_dest;
-  int is_fp_reg = r_dest.IsFloat();
-  if (is_fp_reg) {
-    DCHECK(r_dest.IsDouble());
-    r_dest = AllocTemp();
-  }
-
-  int bit31 = (value & UINT64_C(0x80000000)) != 0;
-
-  // Loads with 1 instruction.
-  if (IsUint<16>(value)) {
-    res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
-  } else if (IsInt<16>(value)) {
-    res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
-  } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
-    res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
-  } else if (IsInt<32>(value)) {
-    // Loads with 2 instructions.
-    res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
-    NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
-  } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
-    res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
-    NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
-  } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
-    res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
-    NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
-  } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
-             (value >> 32) <= (32767 - bit31)) {
-    res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
-    NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
-  } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
-    res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
-    NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
-  } else {
-    int64_t tmp = value;
-    int shift_cnt = 0;
-    while ((tmp & 1) == 0) {
-      tmp >>= 1;
-      shift_cnt++;
-    }
-
-    if (IsUint<16>(tmp)) {
-      res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp);
-      NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
-              shift_cnt & 0x1F);
-    } else if (IsInt<16>(tmp)) {
-      res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
-      NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
-              shift_cnt & 0x1F);
-    } else if (IsInt<32>(tmp)) {
-      // Loads with 3 instructions.
-      res = NewLIR2(kMipsLui, r_dest.GetReg(), tmp >> 16);
-      NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), tmp);
-      NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
-              shift_cnt & 0x1F);
-    } else {
-      tmp = value >> 16;
-      shift_cnt = 16;
-      while ((tmp & 1) == 0) {
-        tmp >>= 1;
-        shift_cnt++;
-      }
-
-      if (IsUint<16>(tmp)) {
-        res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp);
-        NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
-                shift_cnt & 0x1F);
-        NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
-      } else if (IsInt<16>(tmp)) {
-        res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
-        NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
-                shift_cnt & 0x1F);
-        NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
-      } else {
-        // Loads with 3-4 instructions.
-        uint64_t tmp2 = value;
-        if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
-          res = NewLIR2(kMipsLui, r_dest.GetReg(), tmp2 >> 16);
-        }
-        if ((tmp2 & 0xFFFF) != 0) {
-          if (res)
-            NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), tmp2);
-          else
-            res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp2);
-        }
-        if (bit31) {
-          tmp2 += UINT64_C(0x100000000);
-        }
-        if (((tmp2 >> 32) & 0xFFFF) != 0) {
-          NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
-        }
-        if (tmp2 & UINT64_C(0x800000000000)) {
-          tmp2 += UINT64_C(0x1000000000000);
-        }
-        if ((tmp2 >> 48) != 0) {
-          NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
-        }
-      }
-    }
-  }
-
-  if (is_fp_reg) {
-    NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
-    FreeTemp(r_dest);
-  }
-  return res;
-}
-
-LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
-  LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
-  res->target = target;
-  return res;
-}
-
-LIR* MipsMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
-  MipsOpCode opcode = kMipsNop;
-  switch (op) {
-    case kOpBlx:
-      opcode = kMipsJalr;
-      break;
-    case kOpBx:
-      return NewLIR2(kMipsJalr, rZERO, r_dest_src.GetReg());
-    default:
-      LOG(FATAL) << "Bad case in OpReg";
-      UNREACHABLE();
-  }
-  return NewLIR2(opcode, cu_->target64 ? rRAd : rRA, r_dest_src.GetReg());
-}
-
-LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
-  if ((op == kOpAdd) || (op == kOpSub)) {
-    return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
-  } else {
-    LOG(FATAL) << "Bad case in OpRegImm";
-    UNREACHABLE();
-  }
-}
-
-LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
-  MipsOpCode opcode = kMipsNop;
-  bool is64bit = cu_->target64 && (r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit());
-  switch (op) {
-    case kOpAdd:
-      opcode = is64bit ? kMips64Daddu : kMipsAddu;
-      break;
-    case kOpSub:
-      opcode = is64bit ? kMips64Dsubu : kMipsSubu;
-      break;
-    case kOpAnd:
-      opcode = kMipsAnd;
-      break;
-    case kOpMul:
-      opcode = isaIsR6_ ? kMipsR6Mul : kMipsR2Mul;
-      break;
-    case kOpOr:
-      opcode = kMipsOr;
-      break;
-    case kOpXor:
-      opcode = kMipsXor;
-      break;
-    case kOpLsl:
-      opcode = is64bit ? kMips64Dsllv : kMipsSllv;
-      break;
-    case kOpLsr:
-      opcode = is64bit ? kMips64Dsrlv : kMipsSrlv;
-      break;
-    case kOpAsr:
-      opcode = is64bit ? kMips64Dsrav : kMipsSrav;
-      break;
-    case kOpAdc:
-    case kOpSbc:
-      LOG(FATAL) << "No carry bit on MIPS";
-      break;
-    default:
-      LOG(FATAL) << "Bad case in OpRegRegReg";
-      break;
-  }
-  return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
-}
-
-LIR* MipsMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
-  LIR *res;
-  MipsOpCode opcode = kMipsNop;
-  bool short_form = true;
-  bool is64bit = cu_->target64 && (r_dest.Is64Bit() || r_src1.Is64Bit());
-
-  switch (op) {
-    case kOpAdd:
-      if (IS_SIMM16(value)) {
-        opcode = is64bit ? kMips64Daddiu : kMipsAddiu;
-      } else {
-        short_form = false;
-        opcode = is64bit ? kMips64Daddu : kMipsAddu;
-      }
-      break;
-    case kOpSub:
-      if (IS_SIMM16((-value))) {
-        value = -value;
-        opcode = is64bit ? kMips64Daddiu : kMipsAddiu;
-      } else {
-        short_form = false;
-        opcode = is64bit ? kMips64Dsubu : kMipsSubu;
-      }
-      break;
-    case kOpLsl:
-      if (is64bit) {
-        DCHECK(value >= 0 && value <= 63);
-        if (value >= 0 && value <= 31) {
-          opcode = kMips64Dsll;
-        } else {
-          opcode = kMips64Dsll32;
-          value = value - 32;
-        }
-      } else {
-        DCHECK(value >= 0 && value <= 31);
-        opcode = kMipsSll;
-      }
-      break;
-    case kOpLsr:
-      if (is64bit) {
-        DCHECK(value >= 0 && value <= 63);
-        if (value >= 0 && value <= 31) {
-          opcode = kMips64Dsrl;
-        } else {
-          opcode = kMips64Dsrl32;
-          value = value - 32;
-        }
-      } else {
-        DCHECK(value >= 0 && value <= 31);
-        opcode = kMipsSrl;
-      }
-      break;
-    case kOpAsr:
-      if (is64bit) {
-        DCHECK(value >= 0 && value <= 63);
-        if (value >= 0 && value <= 31) {
-          opcode = kMips64Dsra;
-        } else {
-          opcode = kMips64Dsra32;
-          value = value - 32;
-        }
-      } else {
-        DCHECK(value >= 0 && value <= 31);
-        opcode = kMipsSra;
-      }
-      break;
-    case kOpAnd:
-      if (IS_UIMM16((value))) {
-        opcode = kMipsAndi;
-      } else {
-        short_form = false;
-        opcode = kMipsAnd;
-      }
-      break;
-    case kOpOr:
-      if (IS_UIMM16((value))) {
-        opcode = kMipsOri;
-      } else {
-        short_form = false;
-        opcode = kMipsOr;
-      }
-      break;
-    case kOpXor:
-      if (IS_UIMM16((value))) {
-        opcode = kMipsXori;
-      } else {
-        short_form = false;
-        opcode = kMipsXor;
-      }
-      break;
-    case kOpMul:
-      short_form = false;
-      opcode = isaIsR6_ ? kMipsR6Mul : kMipsR2Mul;
-      break;
-    default:
-      LOG(FATAL) << "Bad case in OpRegRegImm";
-      break;
-  }
-
-  if (short_form) {
-    res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value);
-  } else {
-    if (r_dest != r_src1) {
-      res = LoadConstant(r_dest, value);
-      NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
-    } else {
-      RegStorage r_scratch;
-      if (is64bit) {
-        r_scratch = AllocTempWide();
-        res = LoadConstantWide(r_scratch, value);
-      } else {
-        r_scratch = AllocTemp();
-        res = LoadConstant(r_scratch, value);
-      }
-      NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
-    }
-  }
-  return res;
-}
-
-LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
-  MipsOpCode opcode = kMipsNop;
-  LIR *res;
-  switch (op) {
-    case kOpMov:
-      opcode = kMipsMove;
-      break;
-    case kOpMvn:
-      return NewLIR3(kMipsNor, r_dest_src1.GetReg(), r_src2.GetReg(), rZERO);
-    case kOpNeg:
-      if (cu_->target64 && r_dest_src1.Is64Bit()) {
-        return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
-      } else {
-        return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
-      }
-    case kOpAdd:
-    case kOpAnd:
-    case kOpMul:
-    case kOpOr:
-    case kOpSub:
-    case kOpXor:
-      return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
-    case kOp2Byte:
-      if (cu_->target64) {
-        res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
-      } else {
-        if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
-            ->IsMipsIsaRevGreaterThanEqual2()) {
-          res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
-        } else {
-          res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
-          OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
-        }
-      }
-      return res;
-    case kOp2Short:
-      if (cu_->target64) {
-        res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
-      } else {
-        if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
-            ->IsMipsIsaRevGreaterThanEqual2()) {
-          res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
-        } else {
-          res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
-          OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
-        }
-      }
-      return res;
-    case kOp2Char:
-      return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
-    default:
-      LOG(FATAL) << "Bad case in OpRegReg";
-      UNREACHABLE();
-  }
-  return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
-}
-
-LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
-                              RegStorage r_base ATTRIBUTE_UNUSED,
-                              int offset ATTRIBUTE_UNUSED,
-                              MoveType move_type ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL);
-  UNREACHABLE();
-}
-
-LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
-                              int offset ATTRIBUTE_UNUSED,
-                              RegStorage r_src ATTRIBUTE_UNUSED,
-                              MoveType move_type ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL);
-  UNREACHABLE();
-}
-
-LIR* MipsMir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
-                               ConditionCode cc ATTRIBUTE_UNUSED,
-                               RegStorage r_dest ATTRIBUTE_UNUSED,
-                               RegStorage r_src ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
-  UNREACHABLE();
-}
-
-LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
-  LIR *res;
-  if (cu_->target64) {
-    res = LoadConstantWideNoClobber(r_dest, value);
-    return res;
-  }
-  if (fpuIs32Bit_ || !r_dest.IsFloat()) {
-    // 32bit FPU (pairs) or loading into GPR.
-    if (!r_dest.IsPair()) {
-      // Form 64-bit pair.
-      r_dest = Solo64ToPair64(r_dest);
-    }
-    res = LoadConstantNoClobber(r_dest.GetLow(), Low32Bits(value));
-    LoadConstantNoClobber(r_dest.GetHigh(), High32Bits(value));
-  } else {
-    // Here if we have a 64bit FPU and loading into FPR.
-    RegStorage r_temp = AllocTemp();
-    r_dest = Fp64ToSolo32(r_dest);
-    res = LoadConstantNoClobber(r_dest, Low32Bits(value));
-    LoadConstantNoClobber(r_temp, High32Bits(value));
-    NewLIR2(kMipsMthc1, r_temp.GetReg(), r_dest.GetReg());
-    FreeTemp(r_temp);
-  }
-  return res;
-}
-
-/* Load value from base + scaled index. */
-LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
-                                  int scale, OpSize size) {
-  LIR *first = nullptr;
-  LIR *res;
-  MipsOpCode opcode = kMipsNop;
-  bool is64bit = cu_->target64 && r_dest.Is64Bit();
-  RegStorage t_reg = is64bit ? AllocTempWide() : AllocTemp();
-
-  if (r_dest.IsFloat()) {
-    DCHECK(r_dest.IsSingle());
-    DCHECK((size == k32) || (size == kSingle) || (size == kReference));
-    size = kSingle;
-  } else {
-    if (size == kSingle)
-      size = k32;
-  }
-
-  if (cu_->target64) {
-    if (!scale) {
-      if (is64bit) {
-        first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
-      } else {
-        first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
-      }
-    } else {
-      first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
-      NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
-    }
-  } else {
-    if (!scale) {
-      first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
-    } else {
-      first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
-      NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
-    }
-  }
-
-  switch (size) {
-    case k64:
-      if (cu_->target64) {
-        opcode = kMips64Ld;
-      } else {
-        LOG(FATAL) << "Bad case in LoadBaseIndexed";
-      }
-      break;
-    case kSingle:
-      opcode = kMipsFlwc1;
-      break;
-    case k32:
-    case kReference:
-      opcode = kMipsLw;
-      break;
-    case kUnsignedHalf:
-      opcode = kMipsLhu;
-      break;
-    case kSignedHalf:
-      opcode = kMipsLh;
-      break;
-    case kUnsignedByte:
-      opcode = kMipsLbu;
-      break;
-    case kSignedByte:
-      opcode = kMipsLb;
-      break;
-    default:
-      LOG(FATAL) << "Bad case in LoadBaseIndexed";
-  }
-
-  res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg());
-  FreeTemp(t_reg);
-  return (first) ? first : res;
-}
-
-// Store value base base + scaled index.
-LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
-                                   int scale, OpSize size) {
-  LIR *first = nullptr;
-  MipsOpCode opcode = kMipsNop;
-  RegStorage t_reg = AllocTemp();
-
-  if (r_src.IsFloat()) {
-    DCHECK(r_src.IsSingle());
-    DCHECK((size == k32) || (size == kSingle) || (size == kReference));
-    size = kSingle;
-  } else {
-    if (size == kSingle)
-      size = k32;
-  }
-
-  MipsOpCode add_opcode = cu_->target64 ? kMips64Daddu : kMipsAddu;
-  if (!scale) {
-    first = NewLIR3(add_opcode, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
-  } else {
-    first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
-    NewLIR3(add_opcode, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
-  }
-
-  switch (size) {
-    case kSingle:
-      opcode = kMipsFswc1;
-      break;
-    case k32:
-    case kReference:
-      opcode = kMipsSw;
-      break;
-    case kUnsignedHalf:
-    case kSignedHalf:
-      opcode = kMipsSh;
-      break;
-    case kUnsignedByte:
-    case kSignedByte:
-      opcode = kMipsSb;
-      break;
-    default:
-      LOG(FATAL) << "Bad case in StoreBaseIndexed";
-  }
-  NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg());
-  return first;
-}
-
-// FIXME: don't split r_dest into 2 containers.
-LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
-                                   OpSize size) {
-/*
- * Load value from base + displacement.  Optionally perform null check
- * on base (which must have an associated s_reg and MIR).  If not
- * performing null check, incoming MIR can be null. IMPORTANT: this
- * code must not allocate any new temps.  If a new register is needed
- * and base and dest are the same, spill some other register to
- * rlp and then restore.
- */
-  LIR *res;
-  LIR *load = nullptr;
-  LIR *load2 = nullptr;
-  MipsOpCode opcode = kMipsNop;
-  bool short_form = IS_SIMM16(displacement);
-  bool is64bit = false;
-
-  switch (size) {
-    case k64:
-    case kDouble:
-      if (cu_->target64) {
-        r_dest = Check64BitReg(r_dest);
-        if (!r_dest.IsFloat()) {
-          opcode = kMips64Ld;
-        } else {
-          opcode = kMipsFldc1;
-        }
-        DCHECK_ALIGNED(displacement, 4);
-        break;
-      }
-      is64bit = true;
-      if (fpuIs32Bit_ && !r_dest.IsPair()) {
-        // Form 64-bit pair.
-        r_dest = Solo64ToPair64(r_dest);
-      }
-      short_form = IS_SIMM16_2WORD(displacement);
-      FALLTHROUGH_INTENDED;
-    case k32:
-    case kSingle:
-    case kReference:
-      opcode = kMipsLw;
-      if (r_dest.IsFloat()) {
-        opcode = kMipsFlwc1;
-        if (!is64bit) {
-          DCHECK(r_dest.IsSingle());
-        } else {
-          DCHECK(r_dest.IsDouble());
-        }
-      }
-      DCHECK_ALIGNED(displacement, 4);
-      break;
-    case kUnsignedHalf:
-      opcode = kMipsLhu;
-      DCHECK_ALIGNED(displacement, 2);
-      break;
-    case kSignedHalf:
-      opcode = kMipsLh;
-      DCHECK_ALIGNED(displacement, 2);
-      break;
-    case kUnsignedByte:
-      opcode = kMipsLbu;
-      break;
-    case kSignedByte:
-      opcode = kMipsLb;
-      break;
-    default:
-      LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
-  }
-
-  if (cu_->target64) {
-    if (short_form) {
-      if (!IsAligned<kMips64DoublewordSize>(displacement) && opcode == kMips64Ld) {
-        RegStorage r_tmp = AllocTemp();
-        load = res = NewLIR3(kMips64Lwu, r_dest.GetReg(), displacement + LOWORD_OFFSET,
-                             r_base.GetReg());
-        load2 = NewLIR3(kMips64Lwu, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
-        NewLIR3(kMips64Dsll32, r_tmp.GetReg(), r_tmp.GetReg(), 0x0);
-        NewLIR3(kMipsOr, r_dest.GetReg(), r_dest.GetReg(), r_tmp.GetReg());
-        FreeTemp(r_tmp);
-      } else if (!IsAligned<kMips64DoublewordSize>(displacement) && opcode == kMipsFldc1) {
-        RegStorage r_tmp = AllocTemp();
-        r_dest = Fp64ToSolo32(r_dest);
-        load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), displacement + LOWORD_OFFSET,
-                             r_base.GetReg());
-        load2 = NewLIR3(kMipsLw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
-        NewLIR2(kMipsMthc1, r_tmp.GetReg(), r_dest.GetReg());
-        FreeTemp(r_tmp);
-      } else {
-        load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
-      }
-    } else {
-      RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
-      res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
-      load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
-      if (r_tmp != r_dest)
-        FreeTemp(r_tmp);
-    }
-
-    if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-      DCHECK_EQ(r_base, TargetPtrReg(kSp));
-      AnnotateDalvikRegAccess(load, (displacement + LOWORD_OFFSET) >> 2,
-                              true /* is_load */, r_dest.Is64Bit() /* is64bit */);
-      if (load2 != nullptr) {
-        AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
-                                true /* is_load */, r_dest.Is64Bit() /* is64bit */);
-      }
-    }
-    return res;
-  }
-
-  if (short_form) {
-    if (!is64bit) {
-      load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
-    } else {
-      if (fpuIs32Bit_ || !r_dest.IsFloat()) {
-        DCHECK(r_dest.IsPair());
-        load = res = NewLIR3(opcode, r_dest.GetLowReg(), displacement + LOWORD_OFFSET,
-                             r_base.GetReg());
-        load2 = NewLIR3(opcode, r_dest.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
-      } else {
-        // Here if 64bit fpu and r_dest is a 64bit fp register.
-        RegStorage r_tmp = AllocTemp();
-        // FIXME: why is r_dest a 64BitPair here???
-        r_dest = Fp64ToSolo32(r_dest);
-        load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), displacement + LOWORD_OFFSET,
-                             r_base.GetReg());
-        load2 = NewLIR3(kMipsLw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
-        NewLIR2(kMipsMthc1, r_tmp.GetReg(), r_dest.GetReg());
-        FreeTemp(r_tmp);
-      }
-    }
-  } else {
-    if (!is64bit) {
-      RegStorage r_tmp = (r_base == r_dest || r_dest.IsFloat()) ? AllocTemp() : r_dest;
-      res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
-      load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
-      if (r_tmp != r_dest)
-        FreeTemp(r_tmp);
-    } else {
-      RegStorage r_tmp = AllocTemp();
-      res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
-      if (fpuIs32Bit_ || !r_dest.IsFloat()) {
-        DCHECK(r_dest.IsPair());
-        load = NewLIR3(opcode, r_dest.GetLowReg(), LOWORD_OFFSET, r_tmp.GetReg());
-        load2 = NewLIR3(opcode, r_dest.GetHighReg(), HIWORD_OFFSET, r_tmp.GetReg());
-      } else {
-        // Here if 64bit fpu and r_dest is a 64bit fp register
-        r_dest = Fp64ToSolo32(r_dest);
-        load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), LOWORD_OFFSET, r_tmp.GetReg());
-        load2 = NewLIR3(kMipsLw, r_tmp.GetReg(), HIWORD_OFFSET, r_tmp.GetReg());
-        NewLIR2(kMipsMthc1, r_tmp.GetReg(), r_dest.GetReg());
-      }
-      FreeTemp(r_tmp);
-    }
-  }
-
-  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    DCHECK_EQ(r_base, TargetPtrReg(kSp));
-    AnnotateDalvikRegAccess(load, (displacement + (is64bit ? LOWORD_OFFSET : 0)) >> 2,
-                            true /* is_load */, is64bit /* is64bit */);
-    if (is64bit) {
-      AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
-                              true /* is_load */, is64bit /* is64bit */);
-    }
-  }
-  return res;
-}
-
-void MipsMir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags, bool is_wide) {
-  if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-    if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
-      return;
-    }
-    // Force an implicit null check by performing a memory operation (load) from the given
-    // register with offset 0.  This will cause a signal if the register contains 0 (null).
-    LIR* load = Load32Disp(reg, LOWORD_OFFSET, rs_rZERO);
-    MarkSafepointPC(load);
-    if (is_wide) {
-      load = Load32Disp(reg, HIWORD_OFFSET, rs_rZERO);
-      MarkSafepointPC(load);
-    }
-  }
-}
-
-LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
-                               VolatileKind is_volatile) {
-  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))
-      && (!cu_->target64 || displacement & 0x7)) {
-    // TODO: use lld/scd instructions for Mips64.
-    // Do atomic 64-bit load.
-    return GenAtomic64Load(r_base, displacement, r_dest);
-  }
-
-  // TODO: base this on target.
-  if (size == kWord) {
-    size = cu_->target64 ? k64 : k32;
-  }
-  LIR* load;
-  load = LoadBaseDispBody(r_base, displacement, r_dest, size);
-
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    GenMemBarrier(kLoadAny);
-  }
-
-  return load;
-}
-
-// FIXME: don't split r_dest into 2 containers.
-LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
-                                    OpSize size) {
-  LIR *res;
-  LIR *store = nullptr;
-  LIR *store2 = nullptr;
-  MipsOpCode opcode = kMipsNop;
-  bool short_form = IS_SIMM16(displacement);
-  bool is64bit = false;
-
-  switch (size) {
-    case k64:
-    case kDouble:
-      if (cu_->target64) {
-        r_src = Check64BitReg(r_src);
-        if (!r_src.IsFloat()) {
-          opcode = kMips64Sd;
-        } else {
-          opcode = kMipsFsdc1;
-        }
-        DCHECK_ALIGNED(displacement, 4);
-        break;
-      }
-      is64bit = true;
-      if (fpuIs32Bit_ && !r_src.IsPair()) {
-        // Form 64-bit pair.
-        r_src = Solo64ToPair64(r_src);
-      }
-      short_form = IS_SIMM16_2WORD(displacement);
-      FALLTHROUGH_INTENDED;
-    case k32:
-    case kSingle:
-    case kReference:
-      opcode = kMipsSw;
-      if (r_src.IsFloat()) {
-        opcode = kMipsFswc1;
-        if (!is64bit) {
-          DCHECK(r_src.IsSingle());
-        } else {
-          DCHECK(r_src.IsDouble());
-        }
-      }
-      DCHECK_ALIGNED(displacement, 4);
-      break;
-    case kUnsignedHalf:
-    case kSignedHalf:
-      opcode = kMipsSh;
-      DCHECK_ALIGNED(displacement, 2);
-      break;
-    case kUnsignedByte:
-    case kSignedByte:
-      opcode = kMipsSb;
-      break;
-    default:
-      LOG(FATAL) << "Bad case in StoreBaseDispBody";
-  }
-
-  if (cu_->target64) {
-    if (short_form) {
-      if (!IsAligned<kMips64DoublewordSize>(displacement) && opcode == kMips64Sd) {
-        RegStorage r_tmp = AllocTemp();
-        res = NewLIR2(kMipsMove, r_tmp.GetReg(), r_src.GetReg());
-        store = NewLIR3(kMipsSw, r_tmp.GetReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
-        NewLIR3(kMips64Dsrl32, r_tmp.GetReg(), r_tmp.GetReg(), 0x0);
-        store2 = NewLIR3(kMipsSw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
-        FreeTemp(r_tmp);
-      } else if (!IsAligned<kMips64DoublewordSize>(displacement) && opcode == kMipsFsdc1) {
-        RegStorage r_tmp = AllocTemp();
-        r_src = Fp64ToSolo32(r_src);
-        store = res = NewLIR3(kMipsFswc1, r_src.GetReg(), displacement + LOWORD_OFFSET,
-                              r_base.GetReg());
-        NewLIR2(kMipsMfhc1, r_tmp.GetReg(), r_src.GetReg());
-        store2 = NewLIR3(kMipsSw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
-        FreeTemp(r_tmp);
-      } else {
-        store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
-      }
-    } else {
-      RegStorage r_scratch = AllocTemp();
-      res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
-      store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
-      FreeTemp(r_scratch);
-    }
-
-    if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-      DCHECK_EQ(r_base, TargetPtrReg(kSp));
-      AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
-                              false /* is_load */, r_src.Is64Bit() /* is64bit */);
-      if (store2 != nullptr) {
-        AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
-                                false /* is_load */, r_src.Is64Bit() /* is64bit */);
-      }
-    }
-    return res;
-  }
-
-  if (short_form) {
-    if (!is64bit) {
-      store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
-    } else {
-      if (fpuIs32Bit_ || !r_src.IsFloat()) {
-        DCHECK(r_src.IsPair());
-        store = res = NewLIR3(opcode, r_src.GetLowReg(), displacement + LOWORD_OFFSET,
-                              r_base.GetReg());
-        store2 = NewLIR3(opcode, r_src.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
-      } else {
-        // Here if 64bit fpu and r_src is a 64bit fp register
-        RegStorage r_tmp = AllocTemp();
-        r_src = Fp64ToSolo32(r_src);
-        store = res = NewLIR3(kMipsFswc1, r_src.GetReg(), displacement + LOWORD_OFFSET,
-                              r_base.GetReg());
-        NewLIR2(kMipsMfhc1, r_tmp.GetReg(), r_src.GetReg());
-        store2 = NewLIR3(kMipsSw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
-        FreeTemp(r_tmp);
-      }
-    }
-  } else {
-    RegStorage r_scratch = AllocTemp();
-    res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
-    if (!is64bit) {
-      store =  NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
-    } else {
-      if (fpuIs32Bit_ || !r_src.IsFloat()) {
-        DCHECK(r_src.IsPair());
-        store = NewLIR3(opcode, r_src.GetLowReg(), LOWORD_OFFSET, r_scratch.GetReg());
-        store2 = NewLIR3(opcode, r_src.GetHighReg(), HIWORD_OFFSET, r_scratch.GetReg());
-      } else {
-        // Here if 64bit fpu and r_src is a 64bit fp register
-        RegStorage r_tmp = AllocTemp();
-        r_src = Fp64ToSolo32(r_src);
-        store = NewLIR3(kMipsFswc1, r_src.GetReg(), LOWORD_OFFSET, r_scratch.GetReg());
-        NewLIR2(kMipsMfhc1, r_tmp.GetReg(), r_src.GetReg());
-        store2 = NewLIR3(kMipsSw, r_tmp.GetReg(), HIWORD_OFFSET, r_scratch.GetReg());
-        FreeTemp(r_tmp);
-      }
-    }
-    FreeTemp(r_scratch);
-  }
-
-  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    DCHECK_EQ(r_base, TargetPtrReg(kSp));
-    AnnotateDalvikRegAccess(store, (displacement + (is64bit ? LOWORD_OFFSET : 0)) >> 2,
-                            false /* is_load */, is64bit /* is64bit */);
-    if (is64bit) {
-      AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
-                              false /* is_load */, is64bit /* is64bit */);
-    }
-  }
-
-  return res;
-}
-
-LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
-                                VolatileKind is_volatile) {
-  if (is_volatile == kVolatile) {
-    // Ensure that prior accesses become visible to other threads first.
-    GenMemBarrier(kAnyStore);
-  }
-
-  LIR* store;
-  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
-      (!cu_->target64 || displacement & 0x7))) {
-    // TODO: use lld/scd instructions for Mips64.
-    // Do atomic 64-bit load.
-    store = GenAtomic64Store(r_base, displacement, r_src);
-  } else {
-    // TODO: base this on target.
-    if (size == kWord) {
-      size = cu_->target64 ? k64 : k32;
-    }
-    store = StoreBaseDispBody(r_base, displacement, r_src, size);
-  }
-
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    // Preserve order with respect to any subsequent volatile loads.
-    // We need StoreLoad, but that generally requires the most expensive barrier.
-    GenMemBarrier(kAnyAny);
-  }
-
-  return store;
-}
-
-LIR* MipsMir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
-                        RegStorage r_base ATTRIBUTE_UNUSED,
-                        int disp ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpMem for MIPS";
-  UNREACHABLE();
-}
-
-LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc ATTRIBUTE_UNUSED, LIR* target ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
-  UNREACHABLE();
-}
-
-LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
-  if (!cu_->target64 && IsDirectEntrypoint(trampoline)) {
-    // Reserve argument space on stack (for $a0-$a3) for
-    // entrypoints that directly reference native implementations.
-    // This is not safe in general, as it violates the frame size
-    // of the Quick method, but it is used here only for calling
-    // native functions, outside of the runtime.
-    OpRegImm(kOpSub, rs_rSP, 16);
-    LIR* retVal = OpReg(op, r_tgt);
-    OpRegImm(kOpAdd, rs_rSP, 16);
-    return retVal;
-  }
-
-  return OpReg(op, r_tgt);
-}
-
-RegStorage MipsMir2Lir::AllocPtrSizeTemp(bool required) {
-  return cu_->target64 ? AllocTempWide(required) : AllocTemp(required);
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
deleted file mode 100644
index f96816c..0000000
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
-#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
-
-#include "mir_to_lir.h"
-
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "gc_root.h"
-#include "utils.h"
-
-namespace art {
-
-/* Mark a temp register as dead.  Does not affect allocation state. */
-inline void Mir2Lir::ClobberBody(RegisterInfo* p) {
-  DCHECK(p->IsTemp());
-  if (p->SReg() != INVALID_SREG) {
-    DCHECK(!(p->IsLive() && p->IsDirty()))  << "Live & dirty temp in clobber";
-    p->MarkDead();
-    if (p->IsWide()) {
-      p->SetIsWide(false);
-      if (p->GetReg().NotExactlyEquals(p->Partner())) {
-        // Register pair - deal with the other half.
-        p = GetRegInfo(p->Partner());
-        p->SetIsWide(false);
-        p->MarkDead();
-      }
-    }
-  }
-}
-
-inline LIR* Mir2Lir::RawLIR(DexOffset dalvik_offset, int opcode, int op0,
-                            int op1, int op2, int op3, int op4, LIR* target) {
-  LIR* insn = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
-  insn->dalvik_offset = dalvik_offset;
-  insn->opcode = opcode;
-  insn->operands[0] = op0;
-  insn->operands[1] = op1;
-  insn->operands[2] = op2;
-  insn->operands[3] = op3;
-  insn->operands[4] = op4;
-  insn->target = target;
-  SetupResourceMasks(insn);
-  if ((opcode == kPseudoTargetLabel) || (opcode == kPseudoSafepointPC) ||
-      (opcode == kPseudoExportedPC)) {
-    // Always make labels scheduling barriers
-    DCHECK(!insn->flags.use_def_invalid);
-    insn->u.m.use_mask = insn->u.m.def_mask = &kEncodeAll;
-  }
-  return insn;
-}
-
-/*
- * The following are building blocks to construct low-level IRs with 0 - 4
- * operands.
- */
-inline LIR* Mir2Lir::NewLIR0(int opcode) {
-  DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND))
-      << GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
-      << current_dalvik_offset_;
-  LIR* insn = RawLIR(current_dalvik_offset_, opcode);
-  AppendLIR(insn);
-  return insn;
-}
-
-inline LIR* Mir2Lir::NewLIR1(int opcode, int dest) {
-  DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
-      << current_dalvik_offset_;
-  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest);
-  AppendLIR(insn);
-  return insn;
-}
-
-inline LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1) {
-  DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
-      << current_dalvik_offset_;
-  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1);
-  AppendLIR(insn);
-  return insn;
-}
-
-inline LIR* Mir2Lir::NewLIR2NoDest(int opcode, int src, int info) {
-  DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
-      << current_dalvik_offset_;
-  LIR* insn = RawLIR(current_dalvik_offset_, opcode, src, info);
-  AppendLIR(insn);
-  return insn;
-}
-
-inline LIR* Mir2Lir::NewLIR3(int opcode, int dest, int src1, int src2) {
-  DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
-      << current_dalvik_offset_;
-  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2);
-  AppendLIR(insn);
-  return insn;
-}
-
-inline LIR* Mir2Lir::NewLIR4(int opcode, int dest, int src1, int src2, int info) {
-  DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
-      << current_dalvik_offset_;
-  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info);
-  AppendLIR(insn);
-  return insn;
-}
-
-inline LIR* Mir2Lir::NewLIR5(int opcode, int dest, int src1, int src2, int info1,
-                             int info2) {
-  DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP))
-      << GetTargetInstName(opcode) << " " << opcode << " "
-      << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
-      << current_dalvik_offset_;
-  LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest, src1, src2, info1, info2);
-  AppendLIR(insn);
-  return insn;
-}
-
-/*
- * Mark the corresponding bit(s).
- */
-inline void Mir2Lir::SetupRegMask(ResourceMask* mask, int reg) {
-  DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
-  DCHECK_LT(static_cast<size_t>(reg), reginfo_map_.size());
-  DCHECK(reginfo_map_[reg] != nullptr) << "No info for 0x" << reg;
-  *mask = mask->Union(reginfo_map_[reg]->DefUseMask());
-}
-
-/*
- * Clear the corresponding bit(s).
- */
-inline void Mir2Lir::ClearRegMask(ResourceMask* mask, int reg) {
-  DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
-  DCHECK_LT(static_cast<size_t>(reg), reginfo_map_.size());
-  DCHECK(reginfo_map_[reg] != nullptr) << "No info for 0x" << reg;
-  *mask = mask->ClearBits(reginfo_map_[reg]->DefUseMask());
-}
-
-/*
- * Set up the proper fields in the resource mask
- */
-inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
-  int opcode = lir->opcode;
-
-  if (IsPseudoLirOp(opcode)) {
-    lir->u.m.use_mask = lir->u.m.def_mask = &kEncodeNone;
-    if (opcode != kPseudoBarrier) {
-      lir->flags.fixup = kFixupLabel;
-    }
-    return;
-  }
-
-  uint64_t flags = GetTargetInstFlags(opcode);
-
-  if (flags & NEEDS_FIXUP) {
-    // Note: target-specific setup may specialize the fixup kind.
-    lir->flags.fixup = kFixupLabel;
-  }
-
-  /* Get the starting size of the instruction's template. */
-  lir->flags.size = GetInsnSize(lir);
-  estimated_native_code_size_ += lir->flags.size;
-
-  /* Set up the mask for resources. */
-  ResourceMask use_mask;
-  ResourceMask def_mask;
-
-  if (flags & (IS_LOAD | IS_STORE)) {
-    /* Set memory reference type (defaults to heap, overridden by ScopedMemRefType). */
-    if (flags & IS_LOAD) {
-      use_mask.SetBit(mem_ref_type_);
-    } else {
-      /* Currently only loads can be marked as kMustNotAlias. */
-      DCHECK(mem_ref_type_ != ResourceMask::kMustNotAlias);
-    }
-    if (flags & IS_STORE) {
-      /* Literals cannot be written to. */
-      DCHECK(mem_ref_type_ != ResourceMask::kLiteral);
-      def_mask.SetBit(mem_ref_type_);
-    }
-  }
-
-  /*
-   * Conservatively assume the branch here will call out a function that in
-   * turn will trash everything.
-   */
-  if (flags & IS_BRANCH) {
-    lir->u.m.def_mask = lir->u.m.use_mask = &kEncodeAll;
-    return;
-  }
-
-  if (flags & REG_DEF0) {
-    SetupRegMask(&def_mask, lir->operands[0]);
-  }
-
-  if (flags & REG_DEF1) {
-    SetupRegMask(&def_mask, lir->operands[1]);
-  }
-
-  if (flags & REG_DEF2) {
-    SetupRegMask(&def_mask, lir->operands[2]);
-  }
-
-  if (flags & REG_USE0) {
-    SetupRegMask(&use_mask, lir->operands[0]);
-  }
-
-  if (flags & REG_USE1) {
-    SetupRegMask(&use_mask, lir->operands[1]);
-  }
-
-  if (flags & REG_USE2) {
-    SetupRegMask(&use_mask, lir->operands[2]);
-  }
-
-  if (flags & REG_USE3) {
-    SetupRegMask(&use_mask, lir->operands[3]);
-  }
-
-  if (flags & REG_USE4) {
-    SetupRegMask(&use_mask, lir->operands[4]);
-  }
-
-  if (flags & SETS_CCODES) {
-    def_mask.SetBit(ResourceMask::kCCode);
-  }
-
-  if (flags & USES_CCODES) {
-    use_mask.SetBit(ResourceMask::kCCode);
-  }
-
-  // Handle target-specific actions
-  SetupTargetResourceMasks(lir, flags, &use_mask, &def_mask);
-
-  lir->u.m.use_mask = mask_cache_.GetMask(use_mask);
-  lir->u.m.def_mask = mask_cache_.GetMask(def_mask);
-}
-
-inline art::Mir2Lir::RegisterInfo* Mir2Lir::GetRegInfo(RegStorage reg) {
-  RegisterInfo* res = reg.IsPair() ? reginfo_map_[reg.GetLowReg()] : reginfo_map_[reg.GetReg()];
-  DCHECK(res != nullptr);
-  return res;
-}
-
-inline void Mir2Lir::CheckRegLocation(RegLocation rl) const {
-  if (kFailOnSizeError || kReportSizeError) {
-    CheckRegLocationImpl(rl, kFailOnSizeError, kReportSizeError);
-  }
-}
-
-inline void Mir2Lir::CheckRegStorage(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp)
-    const {
-  if (kFailOnSizeError || kReportSizeError) {
-    CheckRegStorageImpl(rs, wide, ref, fp, kFailOnSizeError, kReportSizeError);
-  }
-}
-
-inline size_t Mir2Lir::GetCacheOffset(uint32_t index) {
-  return sizeof(GcRoot<mirror::Object>) * index;
-}
-
-inline size_t Mir2Lir::GetCachePointerOffset(uint32_t index, size_t pointer_size) {
-  return pointer_size * index;
-}
-
-inline Mir2Lir::ShortyIterator::ShortyIterator(const char* shorty, bool is_static)
-    : cur_(shorty + 1), pending_this_(!is_static), initialized_(false) {
-  DCHECK(shorty != nullptr);
-  DCHECK_NE(*shorty, 0);
-}
-
-inline bool Mir2Lir::ShortyIterator::Next() {
-  if (!initialized_) {
-    initialized_ = true;
-  } else if (pending_this_) {
-    pending_this_ = false;
-  } else if (*cur_ != 0) {
-    cur_++;
-  }
-
-  return *cur_ != 0 || pending_this_;
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_INL_H_
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
deleted file mode 100644
index 8da3863..0000000
--- a/compiler/dex/quick/mir_to_lir.cc
+++ /dev/null
@@ -1,1460 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mir_to_lir-inl.h"
-
-#include "dex/dataflow_iterator-inl.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "driver/compiler_driver.h"
-#include "primitive.h"
-#include "thread-inl.h"
-
-namespace art {
-
-class Mir2Lir::SpecialSuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
- public:
-  SpecialSuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont)
-      : LIRSlowPath(m2l, branch, cont),
-        num_used_args_(0u) {
-  }
-
-  void PreserveArg(int in_position) {
-    // Avoid duplicates.
-    for (size_t i = 0; i != num_used_args_; ++i) {
-      if (used_args_[i] == in_position) {
-        return;
-      }
-    }
-    DCHECK_LT(num_used_args_, kMaxArgsToPreserve);
-    used_args_[num_used_args_] = in_position;
-    ++num_used_args_;
-  }
-
-  void Compile() OVERRIDE {
-    m2l_->ResetRegPool();
-    m2l_->ResetDefTracking();
-    GenerateTargetLabel(kPseudoSuspendTarget);
-
-    m2l_->LockCallTemps();
-
-    // Generate frame.
-    m2l_->GenSpecialEntryForSuspend();
-
-    // Spill all args.
-    for (size_t i = 0, end = m2l_->in_to_reg_storage_mapping_.GetEndMappedIn(); i < end;
-        i += m2l_->in_to_reg_storage_mapping_.GetShorty(i).IsWide() ? 2u : 1u) {
-      m2l_->SpillArg(i);
-    }
-
-    m2l_->FreeCallTemps();
-
-    // Do the actual suspend call to runtime.
-    m2l_->CallRuntimeHelper(kQuickTestSuspend, true);
-
-    m2l_->LockCallTemps();
-
-    // Unspill used regs. (Don't unspill unused args.)
-    for (size_t i = 0; i != num_used_args_; ++i) {
-      m2l_->UnspillArg(used_args_[i]);
-    }
-
-    // Pop the frame.
-    m2l_->GenSpecialExitForSuspend();
-
-    // Branch to the continue label.
-    DCHECK(cont_ != nullptr);
-    m2l_->OpUnconditionalBranch(cont_);
-
-    m2l_->FreeCallTemps();
-  }
-
- private:
-  static constexpr size_t kMaxArgsToPreserve = 2u;
-  size_t num_used_args_;
-  int used_args_[kMaxArgsToPreserve];
-};
-
-RegisterClass Mir2Lir::ShortyToRegClass(char shorty_type) {
-  RegisterClass res;
-  switch (shorty_type) {
-    case 'L':
-      res = kRefReg;
-      break;
-    case 'F':
-      // Expected fallthrough.
-    case 'D':
-      res = kFPReg;
-      break;
-    default:
-      res = kCoreReg;
-  }
-  return res;
-}
-
-void Mir2Lir::LockArg(size_t in_position) {
-  RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
-
-  if (reg_arg.Valid()) {
-    LockTemp(reg_arg);
-  }
-}
-
-RegStorage Mir2Lir::LoadArg(size_t in_position, RegisterClass reg_class, bool wide) {
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-  int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
-
-  if (cu_->instruction_set == kX86) {
-    /*
-     * When doing a call for x86, it moves the stack pointer in order to push return.
-     * Thus, we add another 4 bytes to figure out the out of caller (in of callee).
-     */
-    offset += sizeof(uint32_t);
-  }
-
-  if (cu_->instruction_set == kX86_64) {
-    /*
-     * When doing a call for x86, it moves the stack pointer in order to push return.
-     * Thus, we add another 8 bytes to figure out the out of caller (in of callee).
-     */
-    offset += sizeof(uint64_t);
-  }
-
-  RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
-
-  // TODO: REVISIT: This adds a spill of low part while we could just copy it.
-  if (reg_arg.Valid() && wide && (reg_arg.GetWideKind() == kNotWide)) {
-    // For wide register we've got only half of it.
-    // Flush it to memory then.
-    StoreBaseDisp(TargetPtrReg(kSp), offset, reg_arg, k32, kNotVolatile);
-    reg_arg = RegStorage::InvalidReg();
-  }
-
-  if (!reg_arg.Valid()) {
-    reg_arg = wide ?  AllocTypedTempWide(false, reg_class) : AllocTypedTemp(false, reg_class);
-    LoadBaseDisp(TargetPtrReg(kSp), offset, reg_arg, wide ? k64 : k32, kNotVolatile);
-  } else {
-    // Check if we need to copy the arg to a different reg_class.
-    if (!RegClassMatches(reg_class, reg_arg)) {
-      if (wide) {
-        RegStorage new_reg = AllocTypedTempWide(false, reg_class);
-        OpRegCopyWide(new_reg, reg_arg);
-        reg_arg = new_reg;
-      } else {
-        RegStorage new_reg = AllocTypedTemp(false, reg_class);
-        OpRegCopy(new_reg, reg_arg);
-        reg_arg = new_reg;
-      }
-    }
-  }
-  return reg_arg;
-}
-
-void Mir2Lir::LoadArgDirect(size_t in_position, RegLocation rl_dest) {
-  DCHECK_EQ(rl_dest.location, kLocPhysReg);
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-  int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
-  if (cu_->instruction_set == kX86) {
-    /*
-     * When doing a call for x86, it moves the stack pointer in order to push return.
-     * Thus, we add another 4 bytes to figure out the out of caller (in of callee).
-     */
-    offset += sizeof(uint32_t);
-  }
-
-  if (cu_->instruction_set == kX86_64) {
-    /*
-     * When doing a call for x86, it moves the stack pointer in order to push return.
-     * Thus, we add another 8 bytes to figure out the out of caller (in of callee).
-     */
-    offset += sizeof(uint64_t);
-  }
-
-  RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
-
-  // TODO: REVISIT: This adds a spill of low part while we could just copy it.
-  if (reg_arg.Valid() && rl_dest.wide && (reg_arg.GetWideKind() == kNotWide)) {
-    // For wide register we've got only half of it.
-    // Flush it to memory then.
-    StoreBaseDisp(TargetPtrReg(kSp), offset, reg_arg, k32, kNotVolatile);
-    reg_arg = RegStorage::InvalidReg();
-  }
-
-  if (!reg_arg.Valid()) {
-    OpSize op_size = rl_dest.wide ? k64 : (rl_dest.ref ? kReference : k32);
-    LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, op_size, kNotVolatile);
-  } else {
-    if (rl_dest.wide) {
-      OpRegCopyWide(rl_dest.reg, reg_arg);
-    } else {
-      OpRegCopy(rl_dest.reg, reg_arg);
-    }
-  }
-}
-
-void Mir2Lir::SpillArg(size_t in_position) {
-  RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
-
-  if (reg_arg.Valid()) {
-    int offset = frame_size_ + StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
-    ShortyArg arg = in_to_reg_storage_mapping_.GetShorty(in_position);
-    OpSize size = arg.IsRef() ? kReference :
-        (arg.IsWide() && reg_arg.GetWideKind() == kWide) ? k64 : k32;
-    StoreBaseDisp(TargetPtrReg(kSp), offset, reg_arg, size, kNotVolatile);
-  }
-}
-
-void Mir2Lir::UnspillArg(size_t in_position) {
-  RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
-
-  if (reg_arg.Valid()) {
-    int offset = frame_size_ + StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
-    ShortyArg arg = in_to_reg_storage_mapping_.GetShorty(in_position);
-    OpSize size = arg.IsRef() ? kReference :
-        (arg.IsWide() && reg_arg.GetWideKind() == kWide) ? k64 : k32;
-    LoadBaseDisp(TargetPtrReg(kSp), offset, reg_arg, size, kNotVolatile);
-  }
-}
-
-Mir2Lir::SpecialSuspendCheckSlowPath* Mir2Lir::GenSpecialSuspendTest() {
-  LockCallTemps();
-  LIR* branch = OpTestSuspend(nullptr);
-  FreeCallTemps();
-  LIR* cont = NewLIR0(kPseudoTargetLabel);
-  SpecialSuspendCheckSlowPath* slow_path =
-      new (arena_) SpecialSuspendCheckSlowPath(this, branch, cont);
-  AddSlowPath(slow_path);
-  return slow_path;
-}
-
-bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
-  // FastInstance() already checked by DexFileMethodInliner.
-  const InlineIGetIPutData& data = special.d.ifield_data;
-  if (data.method_is_static != 0u || data.object_arg != 0u) {
-    // The object is not "this" and has to be null-checked.
-    return false;
-  }
-
-  OpSize size;
-  switch (data.op_variant) {
-    case InlineMethodAnalyser::IGetVariant(Instruction::IGET):
-      size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kSingle : k32;
-      break;
-    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE):
-      size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kDouble : k64;
-      break;
-    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT):
-      size = kReference;
-      break;
-    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT):
-      size = kSignedHalf;
-      break;
-    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR):
-      size = kUnsignedHalf;
-      break;
-    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE):
-      size = kSignedByte;
-      break;
-    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN):
-      size = kUnsignedByte;
-      break;
-    default:
-      LOG(FATAL) << "Unknown variant: " << data.op_variant;
-      UNREACHABLE();
-  }
-
-  // Point of no return - no aborts after this
-  if (!kLeafOptimization) {
-    auto* slow_path = GenSpecialSuspendTest();
-    slow_path->PreserveArg(data.object_arg);
-  }
-  LockArg(data.object_arg);
-  GenPrintLabel(mir);
-  RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
-  RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
-  RegisterClass ret_reg_class = ShortyToRegClass(cu_->shorty[0]);
-  RegLocation rl_dest = IsWide(size) ? GetReturnWide(ret_reg_class) : GetReturn(ret_reg_class);
-  RegStorage r_result = rl_dest.reg;
-  if (!RegClassMatches(reg_class, r_result)) {
-    r_result = IsWide(size) ? AllocTypedTempWide(rl_dest.fp, reg_class)
-                            : AllocTypedTemp(rl_dest.fp, reg_class);
-  }
-  if (IsRef(size)) {
-    LoadRefDisp(reg_obj, data.field_offset, r_result, data.is_volatile ? kVolatile : kNotVolatile);
-  } else {
-    LoadBaseDisp(reg_obj, data.field_offset, r_result, size, data.is_volatile ? kVolatile :
-        kNotVolatile);
-  }
-  if (r_result.NotExactlyEquals(rl_dest.reg)) {
-    if (IsWide(size)) {
-      OpRegCopyWide(rl_dest.reg, r_result);
-    } else {
-      OpRegCopy(rl_dest.reg, r_result);
-    }
-  }
-  return true;
-}
-
-bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
-  // FastInstance() already checked by DexFileMethodInliner.
-  const InlineIGetIPutData& data = special.d.ifield_data;
-  if (data.method_is_static != 0u || data.object_arg != 0u) {
-    // The object is not "this" and has to be null-checked.
-    return false;
-  }
-  if (data.return_arg_plus1 != 0u) {
-    // The setter returns a method argument which we don't support here.
-    return false;
-  }
-
-  OpSize size;
-  switch (data.op_variant) {
-    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT):
-      size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kSingle : k32;
-      break;
-    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE):
-      size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kDouble : k64;
-      break;
-    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT):
-      size = kReference;
-      break;
-    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT):
-      size = kSignedHalf;
-      break;
-    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR):
-      size = kUnsignedHalf;
-      break;
-    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE):
-      size = kSignedByte;
-      break;
-    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN):
-      size = kUnsignedByte;
-      break;
-    default:
-      LOG(FATAL) << "Unknown variant: " << data.op_variant;
-      UNREACHABLE();
-  }
-
-  // Point of no return - no aborts after this
-  if (!kLeafOptimization) {
-    auto* slow_path = GenSpecialSuspendTest();
-    slow_path->PreserveArg(data.object_arg);
-    slow_path->PreserveArg(data.src_arg);
-  }
-  LockArg(data.object_arg);
-  LockArg(data.src_arg);
-  GenPrintLabel(mir);
-  RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
-  RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
-  RegStorage reg_src = LoadArg(data.src_arg, reg_class, IsWide(size));
-  if (IsRef(size)) {
-    StoreRefDisp(reg_obj, data.field_offset, reg_src, data.is_volatile ? kVolatile : kNotVolatile);
-  } else {
-    StoreBaseDisp(reg_obj, data.field_offset, reg_src, size, data.is_volatile ? kVolatile :
-        kNotVolatile);
-  }
-  if (IsRef(size)) {
-    MarkGCCard(0, reg_src, reg_obj);
-  }
-  return true;
-}
-
-bool Mir2Lir::GenSpecialIdentity(MIR* mir, const InlineMethod& special) {
-  const InlineReturnArgData& data = special.d.return_data;
-  bool wide = (data.is_wide != 0u);
-
-  // Point of no return - no aborts after this
-  if (!kLeafOptimization) {
-    auto* slow_path = GenSpecialSuspendTest();
-    slow_path->PreserveArg(data.arg);
-  }
-  LockArg(data.arg);
-  GenPrintLabel(mir);
-  RegisterClass reg_class = ShortyToRegClass(cu_->shorty[0]);
-  RegLocation rl_dest = wide ? GetReturnWide(reg_class) : GetReturn(reg_class);
-  LoadArgDirect(data.arg, rl_dest);
-  return true;
-}
-
-/*
- * Special-case code generation for simple non-throwing leaf methods.
- */
-bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
-  DCHECK(special.flags & kInlineSpecial);
-  current_dalvik_offset_ = mir->offset;
-  DCHECK(current_mir_ == nullptr);  // Safepoints attributed to prologue.
-  MIR* return_mir = nullptr;
-  bool successful = false;
-  EnsureInitializedArgMappingToPhysicalReg();
-
-  switch (special.opcode) {
-    case kInlineOpNop:
-      successful = true;
-      DCHECK_EQ(mir->dalvikInsn.opcode, Instruction::RETURN_VOID);
-      if (!kLeafOptimization) {
-        GenSpecialSuspendTest();
-      }
-      return_mir = mir;
-      break;
-    case kInlineOpNonWideConst: {
-      successful = true;
-      if (!kLeafOptimization) {
-        GenSpecialSuspendTest();
-      }
-      RegLocation rl_dest = GetReturn(ShortyToRegClass(cu_->shorty[0]));
-      GenPrintLabel(mir);
-      LoadConstant(rl_dest.reg, static_cast<int>(special.d.data));
-      return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
-      break;
-    }
-    case kInlineOpReturnArg:
-      successful = GenSpecialIdentity(mir, special);
-      return_mir = mir;
-      break;
-    case kInlineOpIGet:
-      successful = GenSpecialIGet(mir, special);
-      return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
-      break;
-    case kInlineOpIPut:
-      successful = GenSpecialIPut(mir, special);
-      return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
-      break;
-    default:
-      break;
-  }
-
-  if (successful) {
-    if (kIsDebugBuild) {
-      // Clear unreachable catch entries.
-      mir_graph_->catches_.clear();
-    }
-
-    // Handle verbosity for return MIR.
-    if (return_mir != nullptr) {
-      current_dalvik_offset_ = return_mir->offset;
-      // Not handling special identity case because it already generated code as part
-      // of the return. The label should have been added before any code was generated.
-      if (special.opcode != kInlineOpReturnArg) {
-        GenPrintLabel(return_mir);
-      }
-    }
-    GenSpecialExitSequence();
-
-    if (!kLeafOptimization) {
-      HandleSlowPaths();
-    } else {
-      core_spill_mask_ = 0;
-      num_core_spills_ = 0;
-      fp_spill_mask_ = 0;
-      num_fp_spills_ = 0;
-      frame_size_ = 0;
-      core_vmap_table_.clear();
-      fp_vmap_table_.clear();
-    }
-  }
-
-  return successful;
-}
-
-/*
- * Target-independent code generation.  Use only high-level
- * load/store utilities here, or target-dependent genXX() handlers
- * when necessary.
- */
-void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list) {
-  RegLocation rl_src[3];
-  RegLocation rl_dest = mir_graph_->GetBadLoc();
-  RegLocation rl_result = mir_graph_->GetBadLoc();
-  const Instruction::Code opcode = mir->dalvikInsn.opcode;
-  const int opt_flags = mir->optimization_flags;
-  const uint32_t vB = mir->dalvikInsn.vB;
-  const uint32_t vC = mir->dalvikInsn.vC;
-  DCHECK(CheckCorePoolSanity()) << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " @ 0x:"
-                                << std::hex << current_dalvik_offset_;
-
-  // Prep Src and Dest locations.
-  int next_sreg = 0;
-  int next_loc = 0;
-  uint64_t attrs = MIRGraph::GetDataFlowAttributes(opcode);
-  rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
-  if (attrs & DF_UA) {
-    if (attrs & DF_A_WIDE) {
-      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
-      next_sreg+= 2;
-    } else {
-      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
-      next_sreg++;
-    }
-  }
-  if (attrs & DF_UB) {
-    if (attrs & DF_B_WIDE) {
-      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
-      next_sreg+= 2;
-    } else {
-      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
-      next_sreg++;
-    }
-  }
-  if (attrs & DF_UC) {
-    if (attrs & DF_C_WIDE) {
-      rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
-    } else {
-      rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
-    }
-  }
-  if (attrs & DF_DA) {
-    if (attrs & DF_A_WIDE) {
-      rl_dest = mir_graph_->GetDestWide(mir);
-    } else {
-      rl_dest = mir_graph_->GetDest(mir);
-    }
-  }
-  switch (opcode) {
-    case Instruction::NOP:
-      break;
-
-    case Instruction::MOVE_EXCEPTION:
-      GenMoveException(rl_dest);
-      break;
-
-    case Instruction::RETURN_VOID_NO_BARRIER:
-    case Instruction::RETURN_VOID:
-      if (((cu_->access_flags & kAccConstructor) != 0) &&
-          cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
-                                                          cu_->class_def_idx)) {
-        GenMemBarrier(kStoreStore);
-      }
-      if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
-        GenSuspendTest(opt_flags);
-      }
-      break;
-
-    case Instruction::RETURN_OBJECT:
-      DCHECK(rl_src[0].ref);
-      FALLTHROUGH_INTENDED;
-    case Instruction::RETURN:
-      if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
-        GenSuspendTest(opt_flags);
-      }
-      StoreValue(GetReturn(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
-      break;
-
-    case Instruction::RETURN_WIDE:
-      if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
-        GenSuspendTest(opt_flags);
-      }
-      StoreValueWide(GetReturnWide(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
-      break;
-
-    case Instruction::MOVE_RESULT:
-    case Instruction::MOVE_RESULT_WIDE:
-    case Instruction::MOVE_RESULT_OBJECT:
-      // Already processed with invoke or filled-new-array.
-      break;
-
-    case Instruction::MOVE:
-    case Instruction::MOVE_OBJECT:
-    case Instruction::MOVE_16:
-    case Instruction::MOVE_OBJECT_16:
-    case Instruction::MOVE_FROM16:
-    case Instruction::MOVE_OBJECT_FROM16:
-      StoreValue(rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::MOVE_WIDE:
-    case Instruction::MOVE_WIDE_16:
-    case Instruction::MOVE_WIDE_FROM16:
-      StoreValueWide(rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::CONST:
-    case Instruction::CONST_4:
-    case Instruction::CONST_16:
-      GenConst(rl_dest, vB);
-      break;
-
-    case Instruction::CONST_HIGH16:
-      GenConst(rl_dest, vB << 16);
-      break;
-
-    case Instruction::CONST_WIDE_16:
-    case Instruction::CONST_WIDE_32:
-      GenConstWide(rl_dest, static_cast<int64_t>(static_cast<int32_t>(vB)));
-      break;
-
-    case Instruction::CONST_WIDE:
-      GenConstWide(rl_dest, mir->dalvikInsn.vB_wide);
-      break;
-
-    case Instruction::CONST_WIDE_HIGH16:
-      rl_result = EvalLoc(rl_dest, kAnyReg, true);
-      LoadConstantWide(rl_result.reg, static_cast<int64_t>(vB) << 48);
-      StoreValueWide(rl_dest, rl_result);
-      break;
-
-    case Instruction::MONITOR_ENTER:
-      GenMonitorEnter(opt_flags, rl_src[0]);
-      break;
-
-    case Instruction::MONITOR_EXIT:
-      GenMonitorExit(opt_flags, rl_src[0]);
-      break;
-
-    case Instruction::CHECK_CAST: {
-      GenCheckCast(opt_flags, mir->offset, vB, rl_src[0]);
-      break;
-    }
-    case Instruction::INSTANCE_OF:
-      GenInstanceof(vC, rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::NEW_INSTANCE:
-      GenNewInstance(vB, rl_dest);
-      break;
-
-    case Instruction::THROW:
-      GenThrow(rl_src[0]);
-      break;
-
-    case Instruction::ARRAY_LENGTH: {
-      int len_offset;
-      len_offset = mirror::Array::LengthOffset().Int32Value();
-      rl_src[0] = LoadValue(rl_src[0], kRefReg);
-      GenNullCheck(rl_src[0].reg, opt_flags);
-      rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      Load32Disp(rl_src[0].reg, len_offset, rl_result.reg);
-      MarkPossibleNullPointerException(opt_flags);
-      StoreValue(rl_dest, rl_result);
-      break;
-    }
-    case Instruction::CONST_STRING:
-    case Instruction::CONST_STRING_JUMBO:
-      GenConstString(vB, rl_dest);
-      break;
-
-    case Instruction::CONST_CLASS:
-      GenConstClass(vB, rl_dest);
-      break;
-
-    case Instruction::FILL_ARRAY_DATA:
-      GenFillArrayData(mir, vB, rl_src[0]);
-      break;
-
-    case Instruction::FILLED_NEW_ARRAY:
-      GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
-                        false /* not range */));
-      break;
-
-    case Instruction::FILLED_NEW_ARRAY_RANGE:
-      GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
-                        true /* range */));
-      break;
-
-    case Instruction::NEW_ARRAY:
-      GenNewArray(vC, rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::GOTO:
-    case Instruction::GOTO_16:
-    case Instruction::GOTO_32:
-      if (mir_graph_->IsBackEdge(bb, bb->taken)) {
-        GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken]);
-      } else {
-        OpUnconditionalBranch(&label_list[bb->taken]);
-      }
-      break;
-
-    case Instruction::PACKED_SWITCH:
-      GenPackedSwitch(mir, vB, rl_src[0]);
-      break;
-
-    case Instruction::SPARSE_SWITCH:
-      GenSparseSwitch(mir, vB, rl_src[0]);
-      break;
-
-    case Instruction::CMPL_FLOAT:
-    case Instruction::CMPG_FLOAT:
-    case Instruction::CMPL_DOUBLE:
-    case Instruction::CMPG_DOUBLE:
-      GenCmpFP(opcode, rl_dest, rl_src[0], rl_src[1]);
-      break;
-
-    case Instruction::CMP_LONG:
-      GenCmpLong(rl_dest, rl_src[0], rl_src[1]);
-      break;
-
-    case Instruction::IF_EQ:
-    case Instruction::IF_NE:
-    case Instruction::IF_LT:
-    case Instruction::IF_GE:
-    case Instruction::IF_GT:
-    case Instruction::IF_LE: {
-      if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
-        GenSuspendTest(opt_flags);
-      }
-      LIR* taken = &label_list[bb->taken];
-      GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
-      break;
-    }
-    case Instruction::IF_EQZ:
-    case Instruction::IF_NEZ:
-    case Instruction::IF_LTZ:
-    case Instruction::IF_GEZ:
-    case Instruction::IF_GTZ:
-    case Instruction::IF_LEZ: {
-      if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
-        GenSuspendTest(opt_flags);
-      }
-      LIR* taken = &label_list[bb->taken];
-      GenCompareZeroAndBranch(opcode, rl_src[0], taken);
-      break;
-    }
-
-    case Instruction::AGET_WIDE:
-      GenArrayGet(opt_flags, rl_dest.fp ? kDouble : k64, rl_src[0], rl_src[1], rl_dest, 3);
-      break;
-    case Instruction::AGET_OBJECT:
-      GenArrayGet(opt_flags, kReference, rl_src[0], rl_src[1], rl_dest, 2);
-      break;
-    case Instruction::AGET:
-      GenArrayGet(opt_flags, rl_dest.fp ? kSingle : k32, rl_src[0], rl_src[1], rl_dest, 2);
-      break;
-    case Instruction::AGET_BOOLEAN:
-      GenArrayGet(opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
-      break;
-    case Instruction::AGET_BYTE:
-      GenArrayGet(opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
-      break;
-    case Instruction::AGET_CHAR:
-      GenArrayGet(opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
-      break;
-    case Instruction::AGET_SHORT:
-      GenArrayGet(opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
-      break;
-    case Instruction::APUT_WIDE:
-      GenArrayPut(opt_flags, rl_src[0].fp ? kDouble : k64, rl_src[1], rl_src[2], rl_src[0], 3, false);
-      break;
-    case Instruction::APUT:
-      GenArrayPut(opt_flags, rl_src[0].fp ? kSingle : k32, rl_src[1], rl_src[2], rl_src[0], 2, false);
-      break;
-    case Instruction::APUT_OBJECT: {
-      bool is_null = mir_graph_->IsConstantNullRef(rl_src[0]);
-      bool is_safe = is_null;  // Always safe to store null.
-      if (!is_safe) {
-        // Check safety from verifier type information.
-        const DexCompilationUnit* unit = mir_graph_->GetCurrentDexCompilationUnit();
-        is_safe = cu_->compiler_driver->IsSafeCast(unit, mir->offset);
-      }
-      if (is_null || is_safe) {
-        // Store of constant null doesn't require an assignability test and can be generated inline
-        // without fixed register usage or a card mark.
-        GenArrayPut(opt_flags, kReference, rl_src[1], rl_src[2], rl_src[0], 2, !is_null);
-      } else {
-        GenArrayObjPut(opt_flags, rl_src[1], rl_src[2], rl_src[0]);
-      }
-      break;
-    }
-    case Instruction::APUT_SHORT:
-    case Instruction::APUT_CHAR:
-      GenArrayPut(opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1, false);
-      break;
-    case Instruction::APUT_BYTE:
-    case Instruction::APUT_BOOLEAN:
-      GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2], rl_src[0], 0, false);
-      break;
-
-    case Instruction::IGET_OBJECT_QUICK:
-    case Instruction::IGET_OBJECT:
-      GenIGet(mir, opt_flags, kReference, Primitive::kPrimNot, rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::IGET_WIDE_QUICK:
-    case Instruction::IGET_WIDE:
-      // kPrimLong and kPrimDouble share the same entrypoints.
-      if (rl_dest.fp) {
-        GenIGet(mir, opt_flags, kDouble, Primitive::kPrimDouble, rl_dest, rl_src[0]);
-      } else {
-        GenIGet(mir, opt_flags, k64, Primitive::kPrimLong, rl_dest, rl_src[0]);
-      }
-      break;
-
-    case Instruction::IGET_QUICK:
-    case Instruction::IGET:
-      if (rl_dest.fp) {
-        GenIGet(mir, opt_flags, kSingle, Primitive::kPrimFloat, rl_dest, rl_src[0]);
-      } else {
-        GenIGet(mir, opt_flags, k32, Primitive::kPrimInt, rl_dest, rl_src[0]);
-      }
-      break;
-
-    case Instruction::IGET_CHAR_QUICK:
-    case Instruction::IGET_CHAR:
-      GenIGet(mir, opt_flags, kUnsignedHalf, Primitive::kPrimChar, rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::IGET_SHORT_QUICK:
-    case Instruction::IGET_SHORT:
-      GenIGet(mir, opt_flags, kSignedHalf, Primitive::kPrimShort, rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::IGET_BOOLEAN_QUICK:
-    case Instruction::IGET_BOOLEAN:
-      GenIGet(mir, opt_flags, kUnsignedByte, Primitive::kPrimBoolean, rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::IGET_BYTE_QUICK:
-    case Instruction::IGET_BYTE:
-      GenIGet(mir, opt_flags, kSignedByte, Primitive::kPrimByte, rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::IPUT_WIDE_QUICK:
-    case Instruction::IPUT_WIDE:
-      GenIPut(mir, opt_flags, rl_src[0].fp ? kDouble : k64, rl_src[0], rl_src[1]);
-      break;
-
-    case Instruction::IPUT_OBJECT_QUICK:
-    case Instruction::IPUT_OBJECT:
-      GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1]);
-      break;
-
-    case Instruction::IPUT_QUICK:
-    case Instruction::IPUT:
-      GenIPut(mir, opt_flags, rl_src[0].fp ? kSingle : k32, rl_src[0], rl_src[1]);
-      break;
-
-    case Instruction::IPUT_BYTE_QUICK:
-    case Instruction::IPUT_BOOLEAN_QUICK:
-    case Instruction::IPUT_BYTE:
-    case Instruction::IPUT_BOOLEAN:
-      GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1]);
-      break;
-
-    case Instruction::IPUT_CHAR_QUICK:
-    case Instruction::IPUT_CHAR:
-      GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1]);
-      break;
-
-    case Instruction::IPUT_SHORT_QUICK:
-    case Instruction::IPUT_SHORT:
-      GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1]);
-      break;
-
-    case Instruction::SGET_OBJECT:
-      GenSget(mir, rl_dest, kReference, Primitive::kPrimNot);
-      break;
-
-    case Instruction::SGET:
-      GenSget(mir, rl_dest, rl_dest.fp ? kSingle : k32, Primitive::kPrimInt);
-      break;
-
-    case Instruction::SGET_CHAR:
-      GenSget(mir, rl_dest, kUnsignedHalf, Primitive::kPrimChar);
-      break;
-
-    case Instruction::SGET_SHORT:
-      GenSget(mir, rl_dest, kSignedHalf, Primitive::kPrimShort);
-      break;
-
-    case Instruction::SGET_BOOLEAN:
-      GenSget(mir, rl_dest, kUnsignedByte, Primitive::kPrimBoolean);
-      break;
-
-    case Instruction::SGET_BYTE:
-      GenSget(mir, rl_dest, kSignedByte, Primitive::kPrimByte);
-      break;
-
-    case Instruction::SGET_WIDE:
-      // kPrimLong and kPrimDouble share the same entrypoints.
-      GenSget(mir, rl_dest, rl_dest.fp ? kDouble : k64, Primitive::kPrimDouble);
-      break;
-
-    case Instruction::SPUT_OBJECT:
-      GenSput(mir, rl_src[0], kReference);
-      break;
-
-    case Instruction::SPUT:
-      GenSput(mir, rl_src[0], rl_src[0].fp ? kSingle : k32);
-      break;
-
-    case Instruction::SPUT_BYTE:
-    case Instruction::SPUT_BOOLEAN:
-      GenSput(mir, rl_src[0], kUnsignedByte);
-      break;
-
-    case Instruction::SPUT_CHAR:
-      GenSput(mir, rl_src[0], kUnsignedHalf);
-      break;
-
-    case Instruction::SPUT_SHORT:
-      GenSput(mir, rl_src[0], kSignedHalf);
-      break;
-
-
-    case Instruction::SPUT_WIDE:
-      GenSput(mir, rl_src[0], rl_src[0].fp ? kDouble : k64);
-      break;
-
-    case Instruction::INVOKE_STATIC_RANGE:
-      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, true));
-      break;
-    case Instruction::INVOKE_STATIC:
-      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, false));
-      break;
-
-    case Instruction::INVOKE_DIRECT:
-      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, false));
-      break;
-    case Instruction::INVOKE_DIRECT_RANGE:
-      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
-      break;
-
-    case Instruction::INVOKE_VIRTUAL_QUICK:
-    case Instruction::INVOKE_VIRTUAL:
-      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
-      break;
-
-    case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
-    case Instruction::INVOKE_VIRTUAL_RANGE:
-      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
-      break;
-
-    case Instruction::INVOKE_SUPER:
-      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, false));
-      break;
-    case Instruction::INVOKE_SUPER_RANGE:
-      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, true));
-      break;
-
-    case Instruction::INVOKE_INTERFACE:
-      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, false));
-      break;
-    case Instruction::INVOKE_INTERFACE_RANGE:
-      GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, true));
-      break;
-
-    case Instruction::NEG_INT:
-    case Instruction::NOT_INT:
-      GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0], opt_flags);
-      break;
-
-    case Instruction::NEG_LONG:
-    case Instruction::NOT_LONG:
-      GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[0], opt_flags);
-      break;
-
-    case Instruction::NEG_FLOAT:
-      GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[0]);
-      break;
-
-    case Instruction::NEG_DOUBLE:
-      GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[0]);
-      break;
-
-    case Instruction::INT_TO_LONG:
-      GenIntToLong(rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::LONG_TO_INT:
-      GenLongToInt(rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::INT_TO_BYTE:
-    case Instruction::INT_TO_SHORT:
-    case Instruction::INT_TO_CHAR:
-      GenIntNarrowing(opcode, rl_dest, rl_src[0]);
-      break;
-
-    case Instruction::INT_TO_FLOAT:
-    case Instruction::INT_TO_DOUBLE:
-    case Instruction::LONG_TO_FLOAT:
-    case Instruction::LONG_TO_DOUBLE:
-    case Instruction::FLOAT_TO_INT:
-    case Instruction::FLOAT_TO_LONG:
-    case Instruction::FLOAT_TO_DOUBLE:
-    case Instruction::DOUBLE_TO_INT:
-    case Instruction::DOUBLE_TO_LONG:
-    case Instruction::DOUBLE_TO_FLOAT:
-      GenConversion(opcode, rl_dest, rl_src[0]);
-      break;
-
-
-    case Instruction::ADD_INT:
-    case Instruction::ADD_INT_2ADDR:
-    case Instruction::MUL_INT:
-    case Instruction::MUL_INT_2ADDR:
-    case Instruction::AND_INT:
-    case Instruction::AND_INT_2ADDR:
-    case Instruction::OR_INT:
-    case Instruction::OR_INT_2ADDR:
-    case Instruction::XOR_INT:
-    case Instruction::XOR_INT_2ADDR:
-      if (rl_src[0].is_const &&
-          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]), opcode)) {
-        GenArithOpIntLit(opcode, rl_dest, rl_src[1],
-                             mir_graph_->ConstantValue(rl_src[0].orig_sreg));
-      } else if (rl_src[1].is_const &&
-                 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
-        GenArithOpIntLit(opcode, rl_dest, rl_src[0],
-                             mir_graph_->ConstantValue(rl_src[1].orig_sreg));
-      } else {
-        GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
-      }
-      break;
-
-    case Instruction::SUB_INT:
-    case Instruction::SUB_INT_2ADDR:
-    case Instruction::DIV_INT:
-    case Instruction::DIV_INT_2ADDR:
-    case Instruction::REM_INT:
-    case Instruction::REM_INT_2ADDR:
-    case Instruction::SHL_INT:
-    case Instruction::SHL_INT_2ADDR:
-    case Instruction::SHR_INT:
-    case Instruction::SHR_INT_2ADDR:
-    case Instruction::USHR_INT:
-    case Instruction::USHR_INT_2ADDR:
-      if (rl_src[1].is_const &&
-          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
-        GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
-      } else {
-        GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
-      }
-      break;
-
-    case Instruction::ADD_LONG:
-    case Instruction::SUB_LONG:
-    case Instruction::AND_LONG:
-    case Instruction::OR_LONG:
-    case Instruction::XOR_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-    case Instruction::SUB_LONG_2ADDR:
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::OR_LONG_2ADDR:
-    case Instruction::XOR_LONG_2ADDR:
-      if (rl_src[0].is_const || rl_src[1].is_const) {
-        GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
-        break;
-      }
-      FALLTHROUGH_INTENDED;
-    case Instruction::MUL_LONG:
-    case Instruction::DIV_LONG:
-    case Instruction::REM_LONG:
-    case Instruction::MUL_LONG_2ADDR:
-    case Instruction::DIV_LONG_2ADDR:
-    case Instruction::REM_LONG_2ADDR:
-      GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
-      break;
-
-    case Instruction::SHL_LONG:
-    case Instruction::SHR_LONG:
-    case Instruction::USHR_LONG:
-    case Instruction::SHL_LONG_2ADDR:
-    case Instruction::SHR_LONG_2ADDR:
-    case Instruction::USHR_LONG_2ADDR:
-      if (rl_src[1].is_const) {
-        GenShiftImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1], opt_flags);
-      } else {
-        GenShiftOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
-      }
-      break;
-
-    case Instruction::DIV_FLOAT:
-    case Instruction::DIV_FLOAT_2ADDR:
-      if (HandleEasyFloatingPointDiv(rl_dest, rl_src[0], rl_src[1])) {
-        break;
-      }
-      FALLTHROUGH_INTENDED;
-    case Instruction::ADD_FLOAT:
-    case Instruction::SUB_FLOAT:
-    case Instruction::MUL_FLOAT:
-    case Instruction::REM_FLOAT:
-    case Instruction::ADD_FLOAT_2ADDR:
-    case Instruction::SUB_FLOAT_2ADDR:
-    case Instruction::MUL_FLOAT_2ADDR:
-    case Instruction::REM_FLOAT_2ADDR:
-      GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[1]);
-      break;
-
-    case Instruction::DIV_DOUBLE:
-    case Instruction::DIV_DOUBLE_2ADDR:
-      if (HandleEasyFloatingPointDiv(rl_dest, rl_src[0], rl_src[1])) {
-        break;
-      }
-      FALLTHROUGH_INTENDED;
-    case Instruction::ADD_DOUBLE:
-    case Instruction::SUB_DOUBLE:
-    case Instruction::MUL_DOUBLE:
-    case Instruction::REM_DOUBLE:
-    case Instruction::ADD_DOUBLE_2ADDR:
-    case Instruction::SUB_DOUBLE_2ADDR:
-    case Instruction::MUL_DOUBLE_2ADDR:
-    case Instruction::REM_DOUBLE_2ADDR:
-      GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[1]);
-      break;
-
-    case Instruction::RSUB_INT:
-    case Instruction::ADD_INT_LIT16:
-    case Instruction::MUL_INT_LIT16:
-    case Instruction::DIV_INT_LIT16:
-    case Instruction::REM_INT_LIT16:
-    case Instruction::AND_INT_LIT16:
-    case Instruction::OR_INT_LIT16:
-    case Instruction::XOR_INT_LIT16:
-    case Instruction::ADD_INT_LIT8:
-    case Instruction::RSUB_INT_LIT8:
-    case Instruction::MUL_INT_LIT8:
-    case Instruction::DIV_INT_LIT8:
-    case Instruction::REM_INT_LIT8:
-    case Instruction::AND_INT_LIT8:
-    case Instruction::OR_INT_LIT8:
-    case Instruction::XOR_INT_LIT8:
-    case Instruction::SHL_INT_LIT8:
-    case Instruction::SHR_INT_LIT8:
-    case Instruction::USHR_INT_LIT8:
-      GenArithOpIntLit(opcode, rl_dest, rl_src[0], vC);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-  DCHECK(CheckCorePoolSanity());
-}  // NOLINT(readability/fn_size)
-
-// Process extended MIR instructions
-void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
-  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
-    case kMirOpCopy: {
-      RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
-      RegLocation rl_dest = mir_graph_->GetDest(mir);
-      StoreValue(rl_dest, rl_src);
-      break;
-    }
-    case kMirOpFusedCmplFloat:
-      if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
-        GenSuspendTest(mir->optimization_flags);
-      }
-      GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, false /*double*/);
-      break;
-    case kMirOpFusedCmpgFloat:
-      if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
-        GenSuspendTest(mir->optimization_flags);
-      }
-      GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, false /*double*/);
-      break;
-    case kMirOpFusedCmplDouble:
-      if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
-        GenSuspendTest(mir->optimization_flags);
-      }
-      GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, true /*double*/);
-      break;
-    case kMirOpFusedCmpgDouble:
-      if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
-        GenSuspendTest(mir->optimization_flags);
-      }
-      GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, true /*double*/);
-      break;
-    case kMirOpFusedCmpLong:
-      if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
-        GenSuspendTest(mir->optimization_flags);
-      }
-      GenFusedLongCmpBranch(bb, mir);
-      break;
-    case kMirOpSelect:
-      GenSelect(bb, mir);
-      break;
-    case kMirOpNullCheck: {
-      RegLocation rl_obj = mir_graph_->GetSrc(mir, 0);
-      rl_obj = LoadValue(rl_obj, kRefReg);
-      // An explicit check is done because it is not expected that when this is used,
-      // that it will actually trip up the implicit checks (since an invalid access
-      // is needed on the null object).
-      GenExplicitNullCheck(rl_obj.reg, mir->optimization_flags);
-      break;
-    }
-    case kMirOpPhi:
-    case kMirOpNop:
-    case kMirOpRangeCheck:
-    case kMirOpDivZeroCheck:
-    case kMirOpCheck:
-      // Ignore these known opcodes
-      break;
-    default:
-      // Give the backends a chance to handle unknown extended MIR opcodes.
-      GenMachineSpecificExtendedMethodMIR(bb, mir);
-      break;
-  }
-}
-
-void Mir2Lir::GenPrintLabel(MIR* mir) {
-  // Mark the beginning of a Dalvik instruction for line tracking.
-  if (cu_->verbose) {
-     char* inst_str = mir_graph_->GetDalvikDisassembly(mir);
-     MarkBoundary(mir->offset, inst_str);
-  }
-}
-
-// Handle the content in each basic block.
-bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
-  if (bb->block_type == kDead) return false;
-  current_dalvik_offset_ = bb->start_offset;
-  MIR* mir;
-  int block_id = bb->id;
-
-  block_label_list_[block_id].operands[0] = bb->start_offset;
-
-  // Insert the block label.
-  block_label_list_[block_id].opcode = kPseudoNormalBlockLabel;
-  block_label_list_[block_id].flags.fixup = kFixupLabel;
-  AppendLIR(&block_label_list_[block_id]);
-
-  LIR* head_lir = nullptr;
-
-  // If this is a catch block, export the start address.
-  if (bb->catch_entry) {
-    head_lir = NewLIR0(kPseudoExportedPC);
-  }
-
-  // Free temp registers and reset redundant store tracking.
-  ClobberAllTemps();
-
-  if (bb->block_type == kEntryBlock) {
-    ResetRegPool();
-    int start_vreg = mir_graph_->GetFirstInVR();
-    AppendLIR(NewLIR0(kPseudoPrologueBegin));
-    DCHECK_EQ(cu_->target64, Is64BitInstructionSet(cu_->instruction_set));
-    if (cu_->target64) {
-      DCHECK(mir_graph_->GetMethodLoc().wide);
-    }
-    GenEntrySequence(&mir_graph_->reg_location_[start_vreg], mir_graph_->GetMethodLoc());
-    AppendLIR(NewLIR0(kPseudoPrologueEnd));
-    DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
-  } else if (bb->block_type == kExitBlock) {
-    ResetRegPool();
-    DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
-    AppendLIR(NewLIR0(kPseudoEpilogueBegin));
-    GenExitSequence();
-    AppendLIR(NewLIR0(kPseudoEpilogueEnd));
-    DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
-  }
-
-  for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    ResetRegPool();
-    if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
-      ClobberAllTemps();
-      // Reset temp allocation to minimize differences when A/B testing.
-      reg_pool_->ResetNextTemp();
-    }
-
-    if (cu_->disable_opt & (1 << kSuppressLoads)) {
-      ResetDefTracking();
-    }
-
-    // Reset temp tracking sanity check.
-    if (kIsDebugBuild) {
-      live_sreg_ = INVALID_SREG;
-    }
-
-    current_dalvik_offset_ = mir->offset;
-    current_mir_ = mir;
-    int opcode = mir->dalvikInsn.opcode;
-
-    GenPrintLabel(mir);
-
-    // Remember the first LIR for this block.
-    if (head_lir == nullptr) {
-      head_lir = &block_label_list_[bb->id];
-      // Set the first label as a scheduling barrier.
-      DCHECK(!head_lir->flags.use_def_invalid);
-      head_lir->u.m.def_mask = &kEncodeAll;
-    }
-
-    if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
-      HandleExtendedMethodMIR(bb, mir);
-      continue;
-    }
-
-    CompileDalvikInstruction(mir, bb, block_label_list_);
-  }
-
-  if (head_lir) {
-    // Eliminate redundant loads/stores and delay stores into later slots.
-    ApplyLocalOptimizations(head_lir, last_lir_insn_);
-  }
-  return false;
-}
-
-bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
-  cu_->NewTimingSplit("SpecialMIR2LIR");
-  // Find the first DalvikByteCode block.
-  DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
-  BasicBlock*bb = nullptr;
-  for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
-    BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
-    if (candidate->block_type == kDalvikByteCode) {
-      bb = candidate;
-      break;
-    }
-  }
-  if (bb == nullptr) {
-    return false;
-  }
-  DCHECK_EQ(bb->start_offset, 0);
-  DCHECK(bb->first_mir_insn != nullptr);
-
-  // Get the first instruction.
-  MIR* mir = bb->first_mir_insn;
-
-  // Free temp registers and reset redundant store tracking.
-  ResetRegPool();
-  ResetDefTracking();
-  ClobberAllTemps();
-
-  return GenSpecialCase(bb, mir, special);
-}
-
-void Mir2Lir::MethodMIR2LIR() {
-  cu_->NewTimingSplit("MIR2LIR");
-
-  // Hold the labels of each block.
-  block_label_list_ = arena_->AllocArray<LIR>(mir_graph_->GetNumBlocks(), kArenaAllocLIR);
-
-  PreOrderDfsIterator iter(mir_graph_);
-  BasicBlock* curr_bb = iter.Next();
-  BasicBlock* next_bb = iter.Next();
-  while (curr_bb != nullptr) {
-    MethodBlockCodeGen(curr_bb);
-    // If the fall_through block is no longer laid out consecutively, drop in a branch.
-    BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
-    if ((curr_bb_fall_through != nullptr) && (curr_bb_fall_through != next_bb)) {
-      OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
-    }
-    curr_bb = next_bb;
-    do {
-      next_bb = iter.Next();
-    } while ((next_bb != nullptr) && (next_bb->block_type == kDead));
-  }
-  HandleSlowPaths();
-}
-
-//
-// LIR Slow Path
-//
-
-LIR* Mir2Lir::LIRSlowPath::GenerateTargetLabel(int opcode) {
-  m2l_->SetCurrentDexPc(current_dex_pc_);
-  m2l_->current_mir_ = current_mir_;
-  LIR* target = m2l_->NewLIR0(opcode);
-  fromfast_->target = target;
-  return target;
-}
-
-
-void Mir2Lir::CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp,
-                                  bool fail, bool report)
-    const  {
-  if (rs.Valid()) {
-    if (ref == RefCheck::kCheckRef) {
-      if (cu_->target64 && !rs.Is64Bit()) {
-        if (fail) {
-          CHECK(false) << "Reg storage not 64b for ref.";
-        } else if (report) {
-          LOG(WARNING) << "Reg storage not 64b for ref.";
-        }
-      }
-    }
-    if (wide == WidenessCheck::kCheckWide) {
-      if (!rs.Is64Bit()) {
-        if (fail) {
-          CHECK(false) << "Reg storage not 64b for wide.";
-        } else if (report) {
-          LOG(WARNING) << "Reg storage not 64b for wide.";
-        }
-      }
-    }
-    // A tighter check would be nice, but for now soft-float will not check float at all.
-    if (fp == FPCheck::kCheckFP && cu_->instruction_set != kArm) {
-      if (!rs.IsFloat()) {
-        if (fail) {
-          CHECK(false) << "Reg storage not float for fp.";
-        } else if (report) {
-          LOG(WARNING) << "Reg storage not float for fp.";
-        }
-      }
-    } else if (fp == FPCheck::kCheckNotFP) {
-      if (rs.IsFloat()) {
-        if (fail) {
-          CHECK(false) << "Reg storage float for not-fp.";
-        } else if (report) {
-          LOG(WARNING) << "Reg storage float for not-fp.";
-        }
-      }
-    }
-  }
-}
-
-void Mir2Lir::CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const {
-  // Regrettably can't use the fp part of rl, as that is not really indicative of where a value
-  // will be stored.
-  CheckRegStorageImpl(rl.reg, rl.wide ? WidenessCheck::kCheckWide : WidenessCheck::kCheckNotWide,
-      rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
-}
-
-size_t Mir2Lir::GetInstructionOffset(LIR* lir ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()";
-  UNREACHABLE();
-}
-
-void Mir2Lir::InToRegStorageMapping::Initialize(ShortyIterator* shorty,
-                                                InToRegStorageMapper* mapper) {
-  DCHECK(mapper != nullptr);
-  DCHECK(shorty != nullptr);
-  DCHECK(!IsInitialized());
-  DCHECK_EQ(end_mapped_in_, 0u);
-  DCHECK(!has_arguments_on_stack_);
-  while (shorty->Next()) {
-     ShortyArg arg = shorty->GetArg();
-     RegStorage reg = mapper->GetNextReg(arg);
-     mapping_.emplace_back(arg, reg);
-     if (arg.IsWide()) {
-       mapping_.emplace_back(ShortyArg(kInvalidShorty), RegStorage::InvalidReg());
-     }
-     if (reg.Valid()) {
-       end_mapped_in_ = mapping_.size();
-       // If the VR is wide but wasn't mapped as wide then account for it.
-       if (arg.IsWide() && !reg.Is64Bit()) {
-         --end_mapped_in_;
-       }
-     } else {
-       has_arguments_on_stack_ = true;
-     }
-  }
-  initialized_ = true;
-}
-
-RegStorage Mir2Lir::InToRegStorageMapping::GetReg(size_t in_position) {
-  DCHECK(IsInitialized());
-  DCHECK_LT(in_position, mapping_.size());
-  DCHECK_NE(mapping_[in_position].first.GetType(), kInvalidShorty);
-  return mapping_[in_position].second;
-}
-
-Mir2Lir::ShortyArg Mir2Lir::InToRegStorageMapping::GetShorty(size_t in_position) {
-  DCHECK(IsInitialized());
-  DCHECK_LT(static_cast<size_t>(in_position), mapping_.size());
-  DCHECK_NE(mapping_[in_position].first.GetType(), kInvalidShorty);
-  return mapping_[in_position].first;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
deleted file mode 100644
index a0db1e8..0000000
--- a/compiler/dex/quick/mir_to_lir.h
+++ /dev/null
@@ -1,1933 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
-#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
-
-#include "base/arena_allocator.h"
-#include "base/arena_containers.h"
-#include "base/arena_object.h"
-#include "compiled_method.h"
-#include "dex/compiler_enums.h"
-#include "dex/dex_flags.h"
-#include "dex/dex_types.h"
-#include "dex/reg_location.h"
-#include "dex/reg_storage.h"
-#include "dex/quick/resource_mask.h"
-#include "entrypoints/quick/quick_entrypoints_enum.h"
-#include "invoke_type.h"
-#include "lazy_debug_frame_opcode_writer.h"
-#include "leb128.h"
-#include "primitive.h"
-#include "safe_map.h"
-#include "utils/array_ref.h"
-#include "utils/dex_cache_arrays_layout.h"
-#include "utils/stack_checks.h"
-
-namespace art {
-
-// Set to 1 to measure cost of suspend check.
-#define NO_SUSPEND 0
-
-#define IS_BINARY_OP         (1ULL << kIsBinaryOp)
-#define IS_BRANCH            (1ULL << kIsBranch)
-#define IS_IT                (1ULL << kIsIT)
-#define IS_MOVE              (1ULL << kIsMoveOp)
-#define IS_LOAD              (1ULL << kMemLoad)
-#define IS_QUAD_OP           (1ULL << kIsQuadOp)
-#define IS_QUIN_OP           (1ULL << kIsQuinOp)
-#define IS_SEXTUPLE_OP       (1ULL << kIsSextupleOp)
-#define IS_STORE             (1ULL << kMemStore)
-#define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
-#define IS_UNARY_OP          (1ULL << kIsUnaryOp)
-#define IS_VOLATILE          (1ULL << kMemVolatile)
-#define NEEDS_FIXUP          (1ULL << kPCRelFixup)
-#define NO_OPERAND           (1ULL << kNoOperand)
-#define REG_DEF0             (1ULL << kRegDef0)
-#define REG_DEF1             (1ULL << kRegDef1)
-#define REG_DEF2             (1ULL << kRegDef2)
-#define REG_DEFA             (1ULL << kRegDefA)
-#define REG_DEFD             (1ULL << kRegDefD)
-#define REG_DEF_FPCS_LIST0   (1ULL << kRegDefFPCSList0)
-#define REG_DEF_FPCS_LIST2   (1ULL << kRegDefFPCSList2)
-#define REG_DEF_LIST0        (1ULL << kRegDefList0)
-#define REG_DEF_LIST1        (1ULL << kRegDefList1)
-#define REG_DEF_LR           (1ULL << kRegDefLR)
-#define REG_DEF_SP           (1ULL << kRegDefSP)
-#define REG_USE0             (1ULL << kRegUse0)
-#define REG_USE1             (1ULL << kRegUse1)
-#define REG_USE2             (1ULL << kRegUse2)
-#define REG_USE3             (1ULL << kRegUse3)
-#define REG_USE4             (1ULL << kRegUse4)
-#define REG_USEA             (1ULL << kRegUseA)
-#define REG_USEC             (1ULL << kRegUseC)
-#define REG_USED             (1ULL << kRegUseD)
-#define REG_USEB             (1ULL << kRegUseB)
-#define REG_USE_FPCS_LIST0   (1ULL << kRegUseFPCSList0)
-#define REG_USE_FPCS_LIST2   (1ULL << kRegUseFPCSList2)
-#define REG_USE_LIST0        (1ULL << kRegUseList0)
-#define REG_USE_LIST1        (1ULL << kRegUseList1)
-#define REG_USE_LR           (1ULL << kRegUseLR)
-#define REG_USE_PC           (1ULL << kRegUsePC)
-#define REG_USE_SP           (1ULL << kRegUseSP)
-#define SETS_CCODES          (1ULL << kSetsCCodes)
-#define USES_CCODES          (1ULL << kUsesCCodes)
-#define USE_FP_STACK         (1ULL << kUseFpStack)
-#define REG_USE_LO           (1ULL << kUseLo)
-#define REG_USE_HI           (1ULL << kUseHi)
-#define REG_DEF_LO           (1ULL << kDefLo)
-#define REG_DEF_HI           (1ULL << kDefHi)
-#define SCALED_OFFSET_X0     (1ULL << kMemScaledx0)
-#define SCALED_OFFSET_X2     (1ULL << kMemScaledx2)
-#define SCALED_OFFSET_X4     (1ULL << kMemScaledx4)
-
-// Special load/stores
-#define IS_LOADX             (IS_LOAD | IS_VOLATILE)
-#define IS_LOAD_OFF          (IS_LOAD | SCALED_OFFSET_X0)
-#define IS_LOAD_OFF2         (IS_LOAD | SCALED_OFFSET_X2)
-#define IS_LOAD_OFF4         (IS_LOAD | SCALED_OFFSET_X4)
-
-#define IS_STOREX            (IS_STORE | IS_VOLATILE)
-#define IS_STORE_OFF         (IS_STORE | SCALED_OFFSET_X0)
-#define IS_STORE_OFF2        (IS_STORE | SCALED_OFFSET_X2)
-#define IS_STORE_OFF4        (IS_STORE | SCALED_OFFSET_X4)
-
-// Common combo register usage patterns.
-#define REG_DEF01            (REG_DEF0 | REG_DEF1)
-#define REG_DEF012           (REG_DEF0 | REG_DEF1 | REG_DEF2)
-#define REG_DEF01_USE2       (REG_DEF0 | REG_DEF1 | REG_USE2)
-#define REG_DEF0_USE01       (REG_DEF0 | REG_USE01)
-#define REG_DEF0_USE0        (REG_DEF0 | REG_USE0)
-#define REG_DEF0_USE12       (REG_DEF0 | REG_USE12)
-#define REG_DEF0_USE123      (REG_DEF0 | REG_USE123)
-#define REG_DEF0_USE1        (REG_DEF0 | REG_USE1)
-#define REG_DEF0_USE2        (REG_DEF0 | REG_USE2)
-#define REG_DEFAD_USEAD      (REG_DEFAD_USEA | REG_USED)
-#define REG_DEFAD_USEA       (REG_DEFA_USEA | REG_DEFD)
-#define REG_DEFA_USEA        (REG_DEFA | REG_USEA)
-#define REG_USE012           (REG_USE01 | REG_USE2)
-#define REG_USE014           (REG_USE01 | REG_USE4)
-#define REG_USE01            (REG_USE0 | REG_USE1)
-#define REG_USE02            (REG_USE0 | REG_USE2)
-#define REG_USE12            (REG_USE1 | REG_USE2)
-#define REG_USE23            (REG_USE2 | REG_USE3)
-#define REG_USE123           (REG_USE1 | REG_USE2 | REG_USE3)
-
-/*
- * Assembly is an iterative process, and usually terminates within
- * two or three passes.  This should be high enough to handle bizarre
- * cases, but detect an infinite loop bug.
- */
-#define MAX_ASSEMBLER_RETRIES 50
-
-class BasicBlock;
-class BitVector;
-struct CallInfo;
-struct CompilationUnit;
-struct CompilerTemp;
-struct InlineMethod;
-class MIR;
-struct LIR;
-struct RegisterInfo;
-class DexFileMethodInliner;
-class MIRGraph;
-class MirMethodLoweringInfo;
-class MirSFieldLoweringInfo;
-
-typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int,
-                            const MethodReference& target_method,
-                            uint32_t method_idx, uintptr_t direct_code,
-                            uintptr_t direct_method, InvokeType type);
-
-typedef ArenaVector<uint8_t> CodeBuffer;
-typedef uint32_t CodeOffset;           // Native code offset in bytes.
-
-struct UseDefMasks {
-  const ResourceMask* use_mask;        // Resource mask for use.
-  const ResourceMask* def_mask;        // Resource mask for def.
-};
-
-struct AssemblyInfo {
-  LIR* pcrel_next;           // Chain of LIR nodes needing pc relative fixups.
-};
-
-struct LIR {
-  CodeOffset offset;             // Offset of this instruction.
-  NarrowDexOffset dalvik_offset;   // Offset of Dalvik opcode in code units (16-bit words).
-  int16_t opcode;
-  LIR* next;
-  LIR* prev;
-  LIR* target;
-  struct {
-    unsigned int alias_info:17;  // For Dalvik register disambiguation.
-    bool is_nop:1;               // LIR is optimized away.
-    unsigned int size:4;         // Note: size of encoded instruction is in bytes.
-    bool use_def_invalid:1;      // If true, masks should not be used.
-    unsigned int generation:1;   // Used to track visitation state during fixup pass.
-    unsigned int fixup:8;        // Fixup kind.
-  } flags;
-  union {
-    UseDefMasks m;               // Use & Def masks used during optimization.
-    AssemblyInfo a;              // Instruction info used during assembly phase.
-  } u;
-  int32_t operands[5];           // [0..4] = [dest, src1, src2, extra, extra2].
-};
-
-// Utility macros to traverse the LIR list.
-#define NEXT_LIR(lir) (lir->next)
-#define PREV_LIR(lir) (lir->prev)
-
-// Defines for alias_info (tracks Dalvik register references).
-#define DECODE_ALIAS_INFO_REG(X)        (X & 0xffff)
-#define DECODE_ALIAS_INFO_WIDE_FLAG     (0x10000)
-#define DECODE_ALIAS_INFO_WIDE(X)       ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
-#define ENCODE_ALIAS_INFO(REG, ISWIDE)  (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0))
-
-#define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8))
-#define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \
-  do { \
-    low_reg = both_regs & 0xff; \
-    high_reg = (both_regs >> 8) & 0xff; \
-  } while (false)
-
-// Mask to denote sreg as the start of a 64-bit item.  Must not interfere with low 16 bits.
-#define STARTING_WIDE_SREG 0x10000
-
-class Mir2Lir {
-  public:
-    static constexpr bool kFailOnSizeError = true && kIsDebugBuild;
-    static constexpr bool kReportSizeError = true && kIsDebugBuild;
-
-    // TODO: If necessary, this could be made target-dependent.
-    static constexpr uint16_t kSmallSwitchThreshold = 5;
-
-    /*
-     * Auxiliary information describing the location of data embedded in the Dalvik
-     * byte code stream.
-     */
-    struct EmbeddedData {
-      CodeOffset offset;        // Code offset of data block.
-      const uint16_t* table;      // Original dex data.
-      DexOffset vaddr;            // Dalvik offset of parent opcode.
-    };
-
-    struct FillArrayData : EmbeddedData {
-      int32_t size;
-    };
-
-    struct SwitchTable : EmbeddedData {
-      LIR* anchor;                // Reference instruction for relative offsets.
-      MIR* switch_mir;            // The switch mir.
-    };
-
-    /* Static register use counts */
-    struct RefCounts {
-      int count;
-      int s_reg;
-    };
-
-    /*
-     * Data structure tracking the mapping detween a Dalvik value (32 or 64 bits)
-     * and native register storage.  The primary purpose is to reuse previuosly
-     * loaded values, if possible, and otherwise to keep the value in register
-     * storage as long as possible.
-     *
-     * NOTE 1: wide_value refers to the width of the Dalvik value contained in
-     * this register (or pair).  For example, a 64-bit register containing a 32-bit
-     * Dalvik value would have wide_value==false even though the storage container itself
-     * is wide.  Similarly, a 32-bit register containing half of a 64-bit Dalvik value
-     * would have wide_value==true (and additionally would have its partner field set to the
-     * other half whose wide_value field would also be true.
-     *
-     * NOTE 2: In the case of a register pair, you can determine which of the partners
-     * is the low half by looking at the s_reg names.  The high s_reg will equal low_sreg + 1.
-     *
-     * NOTE 3: In the case of a 64-bit register holding a Dalvik wide value, wide_value
-     * will be true and partner==self.  s_reg refers to the low-order word of the Dalvik
-     * value, and the s_reg of the high word is implied (s_reg + 1).
-     *
-     * NOTE 4: The reg and is_temp fields should always be correct.  If is_temp is false no
-     * other fields have meaning. [perhaps not true, wide should work for promoted regs?]
-     * If is_temp==true and live==false, no other fields have
-     * meaning.  If is_temp==true and live==true, wide_value, partner, dirty, s_reg, def_start
-     * and def_end describe the relationship between the temp register/register pair and
-     * the Dalvik value[s] described by s_reg/s_reg+1.
-     *
-     * The fields used_storage, master_storage and storage_mask are used to track allocation
-     * in light of potential aliasing.  For example, consider Arm's d2, which overlaps s4 & s5.
-     * d2's storage mask would be 0x00000003, the two low-order bits denoting 64 bits of
-     * storage use.  For s4, it would be 0x0000001; for s5 0x00000002.  These values should not
-     * change once initialized.  The "used_storage" field tracks current allocation status.
-     * Although each record contains this field, only the field from the largest member of
-     * an aliased group is used.  In our case, it would be d2's.  The master_storage pointer
-     * of d2, s4 and s5 would all point to d2's used_storage field.  Each bit in a used_storage
-     * represents 32 bits of storage.  d2's used_storage would be initialized to 0xfffffffc.
-     * Then, if we wanted to determine whether s4 could be allocated, we would "and"
-     * s4's storage_mask with s4's *master_storage.  If the result is zero, s4 is free and
-     * to allocate: *master_storage |= storage_mask.  To free, *master_storage &= ~storage_mask.
-     *
-     * For an X86 vector register example, storage_mask would be:
-     *    0x00000001 for 32-bit view of xmm1
-     *    0x00000003 for 64-bit view of xmm1
-     *    0x0000000f for 128-bit view of xmm1
-     *    0x000000ff for 256-bit view of ymm1   // future expansion, if needed
-     *    0x0000ffff for 512-bit view of ymm1   // future expansion, if needed
-     *    0xffffffff for 1024-bit view of ymm1  // future expansion, if needed
-     *
-     * The "liveness" of a register is handled in a similar way.  The liveness_ storage is
-     * held in the widest member of an aliased set.  Note, though, that for a temp register to
-     * reused as live, it must both be marked live and the associated SReg() must match the
-     * desired s_reg.  This gets a little complicated when dealing with aliased registers.  All
-     * members of an aliased set will share the same liveness flags, but each will individually
-     * maintain s_reg_.  In this way we can know that at least one member of an
-     * aliased set is live, but will only fully match on the appropriate alias view.  For example,
-     * if Arm d1 is live as a double and has s_reg_ set to Dalvik v8 (which also implies v9
-     * because it is wide), its aliases s2 and s3 will show as live, but will have
-     * s_reg_ == INVALID_SREG.  An attempt to later AllocLiveReg() of v9 with a single-precision
-     * view will fail because although s3's liveness bit is set, its s_reg_ will not match v9.
-     * This will cause all members of the aliased set to be clobbered and AllocLiveReg() will
-     * report that v9 is currently not live as a single (which is what we want).
-     *
-     * NOTE: the x86 usage is still somewhat in flux.  There are competing notions of how
-     * to treat xmm registers:
-     *     1. Treat them all as 128-bits wide, but denote how much data used via bytes field.
-     *         o This more closely matches reality, but means you'd need to be able to get
-     *           to the associated RegisterInfo struct to figure out how it's being used.
-     *         o This is how 64-bit core registers will be used - always 64 bits, but the
-     *           "bytes" field will be 4 for 32-bit usage and 8 for 64-bit usage.
-     *     2. View the xmm registers based on contents.
-     *         o A single in a xmm2 register would be k32BitVector, while a double in xmm2 would
-     *           be a k64BitVector.
-     *         o Note that the two uses above would be considered distinct registers (but with
-     *           the aliasing mechanism, we could detect interference).
-     *         o This is how aliased double and single float registers will be handled on
-     *           Arm and MIPS.
-     * Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and
-     * mechanism 2 for aliased float registers and x86 vector registers.
-     */
-    class RegisterInfo : public ArenaObject<kArenaAllocRegAlloc> {
-     public:
-      RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll);
-      ~RegisterInfo() {}
-
-      static const uint32_t k32SoloStorageMask     = 0x00000001;
-      static const uint32_t kLowSingleStorageMask  = 0x00000001;
-      static const uint32_t kHighSingleStorageMask = 0x00000002;
-      static const uint32_t k64SoloStorageMask     = 0x00000003;
-      static const uint32_t k128SoloStorageMask    = 0x0000000f;
-      static const uint32_t k256SoloStorageMask    = 0x000000ff;
-      static const uint32_t k512SoloStorageMask    = 0x0000ffff;
-      static const uint32_t k1024SoloStorageMask   = 0xffffffff;
-
-      bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; }
-      void MarkInUse() { master_->used_storage_ |= storage_mask_; }
-      void MarkFree() { master_->used_storage_ &= ~storage_mask_; }
-      // No part of the containing storage is live in this view.
-      bool IsDead() { return (master_->liveness_ & storage_mask_) == 0; }
-      // Liveness of this view matches.  Note: not equivalent to !IsDead().
-      bool IsLive() { return (master_->liveness_ & storage_mask_) == storage_mask_; }
-      void MarkLive(int s_reg) {
-        // TODO: Anything useful to assert here?
-        s_reg_ = s_reg;
-        master_->liveness_ |= storage_mask_;
-      }
-      void MarkDead() {
-        if (SReg() != INVALID_SREG) {
-          s_reg_ = INVALID_SREG;
-          master_->liveness_ &= ~storage_mask_;
-          ResetDefBody();
-        }
-      }
-      RegStorage GetReg() { return reg_; }
-      void SetReg(RegStorage reg) { reg_ = reg; }
-      bool IsTemp() { return is_temp_; }
-      void SetIsTemp(bool val) { is_temp_ = val; }
-      bool IsWide() { return wide_value_; }
-      void SetIsWide(bool val) {
-        wide_value_ = val;
-        if (!val) {
-          // If not wide, reset partner to self.
-          SetPartner(GetReg());
-        }
-      }
-      bool IsDirty() { return dirty_; }
-      void SetIsDirty(bool val) { dirty_ = val; }
-      RegStorage Partner() { return partner_; }
-      void SetPartner(RegStorage partner) { partner_ = partner; }
-      int SReg() { return (!IsTemp() || IsLive()) ? s_reg_ : INVALID_SREG; }
-      const ResourceMask& DefUseMask() { return def_use_mask_; }
-      void SetDefUseMask(const ResourceMask& def_use_mask) { def_use_mask_ = def_use_mask; }
-      RegisterInfo* Master() { return master_; }
-      void SetMaster(RegisterInfo* master) {
-        master_ = master;
-        if (master != this) {
-          master_->aliased_ = true;
-          DCHECK(alias_chain_ == nullptr);
-          alias_chain_ = master_->alias_chain_;
-          master_->alias_chain_ = this;
-        }
-      }
-      bool IsAliased() { return aliased_; }
-      RegisterInfo* GetAliasChain() { return alias_chain_; }
-      uint32_t StorageMask() { return storage_mask_; }
-      void SetStorageMask(uint32_t storage_mask) { storage_mask_ = storage_mask; }
-      LIR* DefStart() { return def_start_; }
-      void SetDefStart(LIR* def_start) { def_start_ = def_start; }
-      LIR* DefEnd() { return def_end_; }
-      void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
-      void ResetDefBody() { def_start_ = def_end_ = nullptr; }
-      // Find member of aliased set matching storage_used; return null if none.
-      RegisterInfo* FindMatchingView(uint32_t storage_used) {
-        RegisterInfo* res = Master();
-        for (; res != nullptr; res = res->GetAliasChain()) {
-          if (res->StorageMask() == storage_used)
-            break;
-        }
-        return res;
-      }
-
-     private:
-      RegStorage reg_;
-      bool is_temp_;               // Can allocate as temp?
-      bool wide_value_;            // Holds a Dalvik wide value (either itself, or part of a pair).
-      bool dirty_;                 // If live, is it dirty?
-      bool aliased_;               // Is this the master for other aliased RegisterInfo's?
-      RegStorage partner_;         // If wide_value, other reg of pair or self if 64-bit register.
-      int s_reg_;                  // Name of live value.
-      ResourceMask def_use_mask_;  // Resources for this element.
-      uint32_t used_storage_;      // 1 bit per 4 bytes of storage. Unused by aliases.
-      uint32_t liveness_;          // 1 bit per 4 bytes of storage. Unused by aliases.
-      RegisterInfo* master_;       // Pointer to controlling storage mask.
-      uint32_t storage_mask_;      // Track allocation of sub-units.
-      LIR *def_start_;             // Starting inst in last def sequence.
-      LIR *def_end_;               // Ending inst in last def sequence.
-      RegisterInfo* alias_chain_;  // Chain of aliased registers.
-    };
-
-    class RegisterPool : public DeletableArenaObject<kArenaAllocRegAlloc> {
-     public:
-      RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
-                   const ArrayRef<const RegStorage>& core_regs,
-                   const ArrayRef<const RegStorage>& core64_regs,
-                   const ArrayRef<const RegStorage>& sp_regs,
-                   const ArrayRef<const RegStorage>& dp_regs,
-                   const ArrayRef<const RegStorage>& reserved_regs,
-                   const ArrayRef<const RegStorage>& reserved64_regs,
-                   const ArrayRef<const RegStorage>& core_temps,
-                   const ArrayRef<const RegStorage>& core64_temps,
-                   const ArrayRef<const RegStorage>& sp_temps,
-                   const ArrayRef<const RegStorage>& dp_temps);
-      ~RegisterPool() {}
-      void ResetNextTemp() {
-        next_core_reg_ = 0;
-        next_sp_reg_ = 0;
-        next_dp_reg_ = 0;
-      }
-      ArenaVector<RegisterInfo*> core_regs_;
-      int next_core_reg_;
-      ArenaVector<RegisterInfo*> core64_regs_;
-      int next_core64_reg_;
-      ArenaVector<RegisterInfo*> sp_regs_;    // Single precision float.
-      int next_sp_reg_;
-      ArenaVector<RegisterInfo*> dp_regs_;    // Double precision float.
-      int next_dp_reg_;
-      ArenaVector<RegisterInfo*>* ref_regs_;  // Points to core_regs_ or core64_regs_
-      int* next_ref_reg_;
-
-     private:
-      Mir2Lir* const m2l_;
-    };
-
-    struct PromotionMap {
-      RegLocationType core_location:3;
-      uint8_t core_reg;
-      RegLocationType fp_location:3;
-      uint8_t fp_reg;
-      bool first_in_pair;
-    };
-
-    //
-    // Slow paths.  This object is used generate a sequence of code that is executed in the
-    // slow path.  For example, resolving a string or class is slow as it will only be executed
-    // once (after that it is resolved and doesn't need to be done again).  We want slow paths
-    // to be placed out-of-line, and not require a (mispredicted, probably) conditional forward
-    // branch over them.
-    //
-    // If you want to create a slow path, declare a class derived from LIRSlowPath and provide
-    // the Compile() function that will be called near the end of the code generated by the
-    // method.
-    //
-    // The basic flow for a slow path is:
-    //
-    //     CMP reg, #value
-    //     BEQ fromfast
-    //   cont:
-    //     ...
-    //     fast path code
-    //     ...
-    //     more code
-    //     ...
-    //     RETURN
-    ///
-    //   fromfast:
-    //     ...
-    //     slow path code
-    //     ...
-    //     B cont
-    //
-    // So you see we need two labels and two branches.  The first branch (called fromfast) is
-    // the conditional branch to the slow path code.  The second label (called cont) is used
-    // as an unconditional branch target for getting back to the code after the slow path
-    // has completed.
-    //
-
-    class LIRSlowPath : public ArenaObject<kArenaAllocSlowPaths> {
-     public:
-      LIRSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont = nullptr)
-          : m2l_(m2l), cu_(m2l->cu_),
-            current_dex_pc_(m2l->current_dalvik_offset_), current_mir_(m2l->current_mir_),
-            fromfast_(fromfast), cont_(cont) {
-      }
-      virtual ~LIRSlowPath() {}
-      virtual void Compile() = 0;
-
-      LIR *GetContinuationLabel() {
-        return cont_;
-      }
-
-      LIR *GetFromFast() {
-        return fromfast_;
-      }
-
-     protected:
-      LIR* GenerateTargetLabel(int opcode = kPseudoTargetLabel);
-
-      Mir2Lir* const m2l_;
-      CompilationUnit* const cu_;
-      const DexOffset current_dex_pc_;
-      MIR* current_mir_;
-      LIR* const fromfast_;
-      LIR* const cont_;
-    };
-
-    class SuspendCheckSlowPath;
-    class SpecialSuspendCheckSlowPath;
-
-    // Helper class for changing mem_ref_type_ until the end of current scope. See mem_ref_type_.
-    class ScopedMemRefType {
-     public:
-      ScopedMemRefType(Mir2Lir* m2l, ResourceMask::ResourceBit new_mem_ref_type)
-          : m2l_(m2l),
-            old_mem_ref_type_(m2l->mem_ref_type_) {
-        m2l_->mem_ref_type_ = new_mem_ref_type;
-      }
-
-      ~ScopedMemRefType() {
-        m2l_->mem_ref_type_ = old_mem_ref_type_;
-      }
-
-     private:
-      Mir2Lir* const m2l_;
-      ResourceMask::ResourceBit old_mem_ref_type_;
-
-      DISALLOW_COPY_AND_ASSIGN(ScopedMemRefType);
-    };
-
-    virtual ~Mir2Lir() {}
-
-    /**
-     * @brief Decodes the LIR offset.
-     * @return Returns the scaled offset of LIR.
-     */
-    virtual size_t GetInstructionOffset(LIR* lir);
-
-    int32_t s4FromSwitchData(const void* switch_data) {
-      return *reinterpret_cast<const int32_t*>(switch_data);
-    }
-
-    /*
-     * TODO: this is a trace JIT vestige, and its use should be reconsidered.  At the time
-     * it was introduced, it was intended to be a quick best guess of type without having to
-     * take the time to do type analysis.  Currently, though, we have a much better idea of
-     * the types of Dalvik virtual registers.  Instead of using this for a best guess, why not
-     * just use our knowledge of type to select the most appropriate register class?
-     */
-    RegisterClass RegClassBySize(OpSize size) {
-      if (size == kReference) {
-        return kRefReg;
-      } else {
-        return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte ||
-                size == kSignedByte) ? kCoreReg : kAnyReg;
-      }
-    }
-
-    size_t CodeBufferSizeInBytes() {
-      return code_buffer_.size() / sizeof(code_buffer_[0]);
-    }
-
-    static bool IsPseudoLirOp(int opcode) {
-      return (opcode < 0);
-    }
-
-    /*
-     * LIR operands are 32-bit integers.  Sometimes, (especially for managing
-     * instructions which require PC-relative fixups), we need the operands to carry
-     * pointers.  To do this, we assign these pointers an index in pointer_storage_, and
-     * hold that index in the operand array.
-     * TUNING: If use of these utilities becomes more common on 32-bit builds, it
-     * may be worth conditionally-compiling a set of identity functions here.
-     */
-    template <typename T>
-    uint32_t WrapPointer(const T* pointer) {
-      uint32_t res = pointer_storage_.size();
-      pointer_storage_.push_back(pointer);
-      return res;
-    }
-
-    template <typename T>
-    const T* UnwrapPointer(size_t index) {
-      return reinterpret_cast<const T*>(pointer_storage_[index]);
-    }
-
-    // strdup(), but allocates from the arena.
-    char* ArenaStrdup(const char* str) {
-      size_t len = strlen(str) + 1;
-      char* res = arena_->AllocArray<char>(len, kArenaAllocMisc);
-      if (res != nullptr) {
-        strncpy(res, str, len);
-      }
-      return res;
-    }
-
-    // Shared by all targets - implemented in codegen_util.cc
-    void AppendLIR(LIR* lir);
-    void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
-    void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
-
-    /**
-     * @brief Provides the maximum number of compiler temporaries that the backend can/wants
-     * to place in a frame.
-     * @return Returns the maximum number of compiler temporaries.
-     */
-    size_t GetMaxPossibleCompilerTemps() const;
-
-    /**
-     * @brief Provides the number of bytes needed in frame for spilling of compiler temporaries.
-     * @return Returns the size in bytes for space needed for compiler temporary spill region.
-     */
-    size_t GetNumBytesForCompilerTempSpillRegion();
-
-    DexOffset GetCurrentDexPc() const {
-      return current_dalvik_offset_;
-    }
-
-    RegisterClass ShortyToRegClass(char shorty_type);
-    int ComputeFrameSize();
-    void Materialize();
-    virtual CompiledMethod* GetCompiledMethod();
-    void MarkSafepointPC(LIR* inst);
-    void MarkSafepointPCAfter(LIR* after);
-    void SetupResourceMasks(LIR* lir);
-    void SetMemRefType(LIR* lir, bool is_load, int mem_type);
-    void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
-    void SetupRegMask(ResourceMask* mask, int reg);
-    void ClearRegMask(ResourceMask* mask, int reg);
-    void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
-    void EliminateLoad(LIR* lir, int reg_id);
-    void DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type);
-    void DumpPromotionMap();
-    void CodegenDump();
-    LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
-                int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = nullptr);
-    LIR* NewLIR0(int opcode);
-    LIR* NewLIR1(int opcode, int dest);
-    LIR* NewLIR2(int opcode, int dest, int src1);
-    LIR* NewLIR2NoDest(int opcode, int src, int info);
-    LIR* NewLIR3(int opcode, int dest, int src1, int src2);
-    LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info);
-    LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2);
-    LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
-    LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
-    LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method);
-    LIR* ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx);
-    LIR* AddWordData(LIR* *constant_list_p, int value);
-    LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
-    void DumpSparseSwitchTable(const uint16_t* table);
-    void DumpPackedSwitchTable(const uint16_t* table);
-    void MarkBoundary(DexOffset offset, const char* inst_str);
-    void NopLIR(LIR* lir);
-    void UnlinkLIR(LIR* lir);
-    bool IsInexpensiveConstant(RegLocation rl_src);
-    ConditionCode FlipComparisonOrder(ConditionCode before);
-    ConditionCode NegateComparison(ConditionCode before);
-    virtual void InstallLiteralPools();
-    void InstallSwitchTables();
-    void InstallFillArrayData();
-    bool VerifyCatchEntries();
-    void CreateMappingTables();
-    void CreateNativeGcMap();
-    void CreateNativeGcMapWithoutRegisterPromotion();
-    int AssignLiteralOffset(CodeOffset offset);
-    int AssignSwitchTablesOffset(CodeOffset offset);
-    int AssignFillArrayDataOffset(CodeOffset offset);
-    LIR* InsertCaseLabel(uint32_t bbid, int keyVal);
-
-    // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation.  No code generated.
-    virtual RegLocation NarrowRegLoc(RegLocation loc);
-
-    // Shared by all targets - implemented in local_optimizations.cc
-    void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src);
-    void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir);
-    void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir);
-    virtual void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir);
-
-    // Shared by all targets - implemented in ralloc_util.cc
-    int GetSRegHi(int lowSreg);
-    bool LiveOut(int s_reg);
-    void SimpleRegAlloc();
-    void ResetRegPool();
-    void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num);
-    void DumpRegPool(ArenaVector<RegisterInfo*>* regs);
-    void DumpCoreRegPool();
-    void DumpFpRegPool();
-    void DumpRegPools();
-    /* Mark a temp register as dead.  Does not affect allocation state. */
-    void Clobber(RegStorage reg);
-    void ClobberSReg(int s_reg);
-    void ClobberAliases(RegisterInfo* info, uint32_t clobber_mask);
-    int SRegToPMap(int s_reg);
-    void RecordCorePromotion(RegStorage reg, int s_reg);
-    RegStorage AllocPreservedCoreReg(int s_reg);
-    void RecordFpPromotion(RegStorage reg, int s_reg);
-    RegStorage AllocPreservedFpReg(int s_reg);
-    virtual RegStorage AllocPreservedSingle(int s_reg);
-    virtual RegStorage AllocPreservedDouble(int s_reg);
-    RegStorage AllocTempBody(ArenaVector<RegisterInfo*>& regs, int* next_temp, bool required);
-    virtual RegStorage AllocTemp(bool required = true);
-    virtual RegStorage AllocTempWide(bool required = true);
-    virtual RegStorage AllocTempRef(bool required = true);
-    virtual RegStorage AllocTempSingle(bool required = true);
-    virtual RegStorage AllocTempDouble(bool required = true);
-    virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class, bool required = true);
-    virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class, bool required = true);
-    void FlushReg(RegStorage reg);
-    void FlushRegWide(RegStorage reg);
-    RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide);
-    RegStorage FindLiveReg(ArenaVector<RegisterInfo*>& regs, int s_reg);
-    virtual void FreeTemp(RegStorage reg);
-    virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
-    virtual bool IsLive(RegStorage reg);
-    virtual bool IsTemp(RegStorage reg);
-    bool IsPromoted(RegStorage reg);
-    bool IsDirty(RegStorage reg);
-    virtual void LockTemp(RegStorage reg);
-    void ResetDef(RegStorage reg);
-    void NullifyRange(RegStorage reg, int s_reg);
-    void MarkDef(RegLocation rl, LIR *start, LIR *finish);
-    void MarkDefWide(RegLocation rl, LIR *start, LIR *finish);
-    void ResetDefLoc(RegLocation rl);
-    void ResetDefLocWide(RegLocation rl);
-    void ResetDefTracking();
-    void ClobberAllTemps();
-    void FlushSpecificReg(RegisterInfo* info);
-    void FlushAllRegs();
-    bool RegClassMatches(int reg_class, RegStorage reg);
-    void MarkLive(RegLocation loc);
-    void MarkTemp(RegStorage reg);
-    void UnmarkTemp(RegStorage reg);
-    void MarkWide(RegStorage reg);
-    void MarkNarrow(RegStorage reg);
-    void MarkClean(RegLocation loc);
-    void MarkDirty(RegLocation loc);
-    void MarkInUse(RegStorage reg);
-    bool CheckCorePoolSanity();
-    virtual RegLocation UpdateLoc(RegLocation loc);
-    virtual RegLocation UpdateLocWide(RegLocation loc);
-    RegLocation UpdateRawLoc(RegLocation loc);
-
-    /**
-     * @brief Used to prepare a register location to receive a wide value.
-     * @see EvalLoc
-     * @param loc the location where the value will be stored.
-     * @param reg_class Type of register needed.
-     * @param update Whether the liveness information should be updated.
-     * @return Returns the properly typed temporary in physical register pairs.
-     */
-    virtual RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
-
-    /**
-     * @brief Used to prepare a register location to receive a value.
-     * @param loc the location where the value will be stored.
-     * @param reg_class Type of register needed.
-     * @param update Whether the liveness information should be updated.
-     * @return Returns the properly typed temporary in physical register.
-     */
-    virtual RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
-
-    virtual void AnalyzeMIR(RefCounts* core_counts, MIR* mir, uint32_t weight);
-    virtual void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs);
-    void DumpCounts(const RefCounts* arr, int size, const char* msg);
-    virtual void DoPromotion();
-    int VRegOffset(int v_reg);
-    int SRegOffset(int s_reg);
-    RegLocation GetReturnWide(RegisterClass reg_class);
-    RegLocation GetReturn(RegisterClass reg_class);
-    RegisterInfo* GetRegInfo(RegStorage reg);
-
-    // Shared by all targets - implemented in gen_common.cc.
-    void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr);
-    virtual bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
-                                  RegLocation rl_src, RegLocation rl_dest, int lit);
-    bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
-    bool HandleEasyFloatingPointDiv(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    virtual void HandleSlowPaths();
-    void GenBarrier();
-    void GenDivZeroException();
-    // c_code holds condition code that's generated from testing divisor against 0.
-    void GenDivZeroCheck(ConditionCode c_code);
-    // reg holds divisor.
-    void GenDivZeroCheck(RegStorage reg);
-    void GenArrayBoundsCheck(RegStorage index, RegStorage length);
-    void GenArrayBoundsCheck(int32_t index, RegStorage length);
-    LIR* GenNullCheck(RegStorage reg);
-    void MarkPossibleNullPointerException(int opt_flags);
-    void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after);
-    void MarkPossibleStackOverflowException();
-    void ForceImplicitNullCheck(RegStorage reg, int opt_flags);
-    LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
-    LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
-    virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
-    void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2,
-                             LIR* taken);
-    void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken);
-    virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
-    virtual void GenLongToInt(RegLocation rl_dest, RegLocation rl_src);
-    void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
-                         RegLocation rl_src);
-    void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
-                     RegLocation rl_src);
-    void GenFilledNewArray(CallInfo* info);
-    void GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-    void GenSput(MIR* mir, RegLocation rl_src, OpSize size);
-    // Get entrypoints are specific for types, size alone is not sufficient to safely infer
-    // entrypoint.
-    void GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Type type);
-    void GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
-                 RegLocation rl_dest, RegLocation rl_obj);
-    void GenIPut(MIR* mir, int opt_flags, OpSize size,
-                 RegLocation rl_src, RegLocation rl_obj);
-    void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
-                        RegLocation rl_src);
-
-    void GenConstClass(uint32_t type_idx, RegLocation rl_dest);
-    void GenConstString(uint32_t string_idx, RegLocation rl_dest);
-    void GenNewInstance(uint32_t type_idx, RegLocation rl_dest);
-    void GenThrow(RegLocation rl_src);
-    void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
-    void GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src);
-    void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
-                      RegLocation rl_src1, RegLocation rl_src2);
-    virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                        RegLocation rl_src1, RegLocation rl_shift);
-    void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
-                          RegLocation rl_src, int lit);
-    virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                                RegLocation rl_src1, RegLocation rl_src2, int flags);
-    void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
-                           RegisterClass return_reg_class);
-    void GenSuspendTest(int opt_flags);
-    void GenSuspendTestAndBranch(int opt_flags, LIR* target);
-
-    // This will be overridden by x86 implementation.
-    virtual void GenConstWide(RegLocation rl_dest, int64_t value);
-    virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
-                       RegLocation rl_src1, RegLocation rl_src2, int flags);
-
-    // Shared by all targets - implemented in gen_invoke.cc.
-    LIR* CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
-                    bool use_link = true);
-    RegStorage CallHelperSetup(QuickEntrypointEnum trampoline);
-
-    void CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc);
-    void CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc);
-    void CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0, bool safepoint_pc);
-    void CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
-                                      bool safepoint_pc);
-    void CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
-                                 bool safepoint_pc);
-    void CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0, RegLocation arg1,
-                                         bool safepoint_pc);
-    void CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0, int arg1,
-                                         bool safepoint_pc);
-    void CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
-                                 bool safepoint_pc);
-    void CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
-                                 bool safepoint_pc);
-    void CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc);
-    void CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
-                                    bool safepoint_pc);
-    void CallRuntimeHelperRegRegLocationMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
-                                               RegLocation arg1, bool safepoint_pc);
-    void CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
-                                                 RegLocation arg1, bool safepoint_pc);
-    void CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0, RegStorage arg1,
-                                 bool safepoint_pc);
-    void CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
-                                    RegStorage arg1, int arg2, bool safepoint_pc);
-    void CallRuntimeHelperImmRegLocationMethod(QuickEntrypointEnum trampoline, int arg0,
-                                               RegLocation arg1, bool safepoint_pc);
-    void CallRuntimeHelperImmImmMethod(QuickEntrypointEnum trampoline, int arg0, int arg1,
-                                       bool safepoint_pc);
-    void CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
-                                                    RegLocation arg1, RegLocation arg2,
-                                                    bool safepoint_pc);
-    void CallRuntimeHelperRegLocationRegLocationRegLocation(QuickEntrypointEnum trampoline,
-                                                            RegLocation arg0, RegLocation arg1,
-                                                            RegLocation arg2,
-                                                            bool safepoint_pc);
-    void CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
-        QuickEntrypointEnum trampoline, RegLocation arg0, RegLocation arg1,
-        RegLocation arg2, RegLocation arg3, bool safepoint_pc);
-
-    void GenInvoke(CallInfo* info);
-    void GenInvokeNoInline(CallInfo* info);
-    virtual NextCallInsn GetNextSDCallInsn() = 0;
-
-    /*
-     * @brief Generate the actual call insn based on the method info.
-     * @param method_info the lowering info for the method call.
-     * @returns Call instruction
-     */
-    virtual LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) = 0;
-
-    virtual void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
-    virtual int GenDalvikArgs(CallInfo* info, int call_state, LIR** pcrLabel,
-                      NextCallInsn next_call_insn,
-                      const MethodReference& target_method,
-                      uint32_t vtable_idx,
-                      uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                      bool skip_this);
-    virtual int GenDalvikArgsBulkCopy(CallInfo* info, int first, int count);
-    virtual void GenDalvikArgsFlushPromoted(CallInfo* info, int start);
-    /**
-     * @brief Used to determine the register location of destination.
-     * @details This is needed during generation of inline intrinsics because it finds destination
-     *  of return,
-     * either the physical register or the target of move-result.
-     * @param info Information about the invoke.
-     * @return Returns the destination location.
-     */
-    RegLocation InlineTarget(CallInfo* info);
-
-    /**
-     * @brief Used to determine the wide register location of destination.
-     * @see InlineTarget
-     * @param info Information about the invoke.
-     * @return Returns the destination location.
-     */
-    RegLocation InlineTargetWide(CallInfo* info);
-
-    bool GenInlinedReferenceGetReferent(CallInfo* info);
-    virtual bool GenInlinedCharAt(CallInfo* info);
-    bool GenInlinedStringGetCharsNoCheck(CallInfo* info);
-    bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
-    bool GenInlinedStringFactoryNewStringFromBytes(CallInfo* info);
-    bool GenInlinedStringFactoryNewStringFromChars(CallInfo* info);
-    bool GenInlinedStringFactoryNewStringFromString(CallInfo* info);
-    virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
-    bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
-    virtual bool GenInlinedAbsInt(CallInfo* info);
-    virtual bool GenInlinedAbsLong(CallInfo* info);
-    virtual bool GenInlinedAbsFloat(CallInfo* info) = 0;
-    virtual bool GenInlinedAbsDouble(CallInfo* info) = 0;
-    bool GenInlinedFloatCvt(CallInfo* info);
-    bool GenInlinedDoubleCvt(CallInfo* info);
-    virtual bool GenInlinedCeil(CallInfo* info);
-    virtual bool GenInlinedFloor(CallInfo* info);
-    virtual bool GenInlinedRint(CallInfo* info);
-    virtual bool GenInlinedRound(CallInfo* info, bool is_double);
-    virtual bool GenInlinedArrayCopyCharArray(CallInfo* info);
-    virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
-    bool GenInlinedStringCompareTo(CallInfo* info);
-    virtual bool GenInlinedCurrentThread(CallInfo* info);
-    bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_object, bool is_volatile);
-    bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
-                             bool is_volatile, bool is_ordered);
-
-    // Shared by all targets - implemented in gen_loadstore.cc.
-    RegLocation LoadCurrMethod();
-    void LoadCurrMethodDirect(RegStorage r_tgt);
-    RegStorage LoadCurrMethodWithHint(RegStorage r_hint);
-    virtual LIR* LoadConstant(RegStorage r_dest, int value);
-    // Natural word size.
-    LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
-      return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile);
-    }
-    // Load 32 bits, regardless of target.
-    LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest)  {
-      return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile);
-    }
-    // Load a reference at base + displacement and decompress into register.
-    LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                     VolatileKind is_volatile) {
-      return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile);
-    }
-    // Load a reference at base + index and decompress into register.
-    LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale) {
-      return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference);
-    }
-    // Load Dalvik value with 32-bit memory storage.  If compressed object reference, decompress.
-    virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
-    // Load Dalvik value with 64-bit memory storage.
-    virtual RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind);
-    // Load Dalvik value with 32-bit memory storage.  If compressed object reference, decompress.
-    virtual void LoadValueDirect(RegLocation rl_src, RegStorage r_dest);
-    // Load Dalvik value with 32-bit memory storage.  If compressed object reference, decompress.
-    virtual void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest);
-    // Load Dalvik value with 64-bit memory storage.
-    virtual void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest);
-    // Load Dalvik value with 64-bit memory storage.
-    virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest);
-    // Store an item of natural word size.
-    LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) {
-      return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile);
-    }
-    // Store an uncompressed reference into a compressed 32-bit container.
-    LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                      VolatileKind is_volatile) {
-      return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile);
-    }
-    // Store an uncompressed reference into a compressed 32-bit container by index.
-    LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) {
-      return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference);
-    }
-    // Store 32 bits, regardless of target.
-    LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) {
-      return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile);
-    }
-
-    /**
-     * @brief Used to do the final store in the destination as per bytecode semantics.
-     * @param rl_dest The destination dalvik register location.
-     * @param rl_src The source register location. Can be either physical register or dalvik register.
-     */
-    virtual void StoreValue(RegLocation rl_dest, RegLocation rl_src);
-
-    /**
-     * @brief Used to do the final store in a wide destination as per bytecode semantics.
-     * @see StoreValue
-     * @param rl_dest The destination dalvik register location.
-     * @param rl_src The source register location. Can be either physical register or dalvik
-     *  register.
-     */
-    virtual void StoreValueWide(RegLocation rl_dest, RegLocation rl_src);
-
-    /**
-     * @brief Used to do the final store to a destination as per bytecode semantics.
-     * @see StoreValue
-     * @param rl_dest The destination dalvik register location.
-     * @param rl_src The source register location. It must be kLocPhysReg
-     *
-     * This is used for x86 two operand computations, where we have computed the correct
-     * register value that now needs to be properly registered.  This is used to avoid an
-     * extra register copy that would result if StoreValue was called.
-     */
-    virtual void StoreFinalValue(RegLocation rl_dest, RegLocation rl_src);
-
-    /**
-     * @brief Used to do the final store in a wide destination as per bytecode semantics.
-     * @see StoreValueWide
-     * @param rl_dest The destination dalvik register location.
-     * @param rl_src The source register location. It must be kLocPhysReg
-     *
-     * This is used for x86 two operand computations, where we have computed the correct
-     * register values that now need to be properly registered.  This is used to avoid an
-     * extra pair of register copies that would result if StoreValueWide was called.
-     */
-    virtual void StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src);
-
-    // Shared by all targets - implemented in mir_to_lir.cc.
-    void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list);
-    virtual void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir);
-    bool MethodBlockCodeGen(BasicBlock* bb);
-    bool SpecialMIR2LIR(const InlineMethod& special);
-    virtual void MethodMIR2LIR();
-    // Update LIR for verbose listings.
-    void UpdateLIROffsets();
-
-    /**
-     * @brief Mark a garbage collection card. Skip if the stored value is null.
-     * @param val_reg the register holding the stored value to check against null.
-     * @param tgt_addr_reg the address of the object or array where the value was stored.
-     * @param opt_flags the optimization flags which may indicate that the value is non-null.
-     */
-    void MarkGCCard(int opt_flags, RegStorage val_reg, RegStorage tgt_addr_reg);
-
-    /*
-     * @brief Load the address of the dex method into the register.
-     * @param target_method The MethodReference of the method to be invoked.
-     * @param type How the method will be invoked.
-     * @param register that will contain the code address.
-     * @note register will be passed to TargetReg to get physical register.
-     */
-    void LoadCodeAddress(const MethodReference& target_method, InvokeType type,
-                         SpecialTargetRegister symbolic_reg);
-
-    /*
-     * @brief Load the Method* of a dex method into the register.
-     * @param target_method The MethodReference of the method to be invoked.
-     * @param type How the method will be invoked.
-     * @param register that will contain the code address.
-     * @note register will be passed to TargetReg to get physical register.
-     */
-    virtual void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
-                                   SpecialTargetRegister symbolic_reg);
-
-    /*
-     * @brief Load the Class* of a Dex Class type into the register.
-     * @param dex DexFile that contains the class type.
-     * @param type How the method will be invoked.
-     * @param register that will contain the code address.
-     * @note register will be passed to TargetReg to get physical register.
-     */
-    virtual void LoadClassType(const DexFile& dex_file, uint32_t type_idx,
-                               SpecialTargetRegister symbolic_reg);
-
-    // TODO: Support PC-relative dex cache array loads on all platforms and
-    // replace CanUseOpPcRelDexCacheArrayLoad() with dex_cache_arrays_layout_.Valid().
-    virtual bool CanUseOpPcRelDexCacheArrayLoad() const;
-
-    /*
-     * @brief Load an element of one of the dex cache arrays.
-     * @param dex_file the dex file associated with the target dex cache.
-     * @param offset the offset of the element in the fixed dex cache arrays' layout.
-     * @param r_dest the register where to load the element.
-     * @param wide, load 64 bits if true, otherwise 32 bits.
-     */
-    virtual void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
-                                          bool wide);
-
-    // Routines that work for the generic case, but may be overriden by target.
-    /*
-     * @brief Compare memory to immediate, and branch if condition true.
-     * @param cond The condition code that when true will branch to the target.
-     * @param temp_reg A temporary register that can be used if compare to memory is not
-     * supported by the architecture.
-     * @param base_reg The register holding the base address.
-     * @param offset The offset from the base.
-     * @param check_value The immediate to compare to.
-     * @param target branch target (or null)
-     * @param compare output for getting LIR for comparison (or null)
-     * @returns The branch instruction that was generated.
-     */
-    virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
-                                   int offset, int check_value, LIR* target, LIR** compare);
-
-    // Required for target - codegen helpers.
-    virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
-                                    RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
-    virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
-    virtual void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
-                                            int32_t constant) = 0;
-    virtual void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
-                                             int64_t constant) = 0;
-    virtual LIR* CheckSuspendUsingLoad() = 0;
-
-    virtual RegStorage LoadHelper(QuickEntrypointEnum trampoline) = 0;
-
-    virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                              OpSize size, VolatileKind is_volatile) = 0;
-    virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
-                                 int scale, OpSize size) = 0;
-    virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0;
-    virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0;
-    virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                               OpSize size, VolatileKind is_volatile) = 0;
-    virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
-                                  int scale, OpSize size) = 0;
-
-    /**
-     * @brief Unconditionally mark a garbage collection card.
-     * @param tgt_addr_reg the address of the object or array where the value was stored.
-     */
-    virtual void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) = 0;
-
-    // Required for target - register utilities.
-
-    bool IsSameReg(RegStorage reg1, RegStorage reg2) {
-      RegisterInfo* info1 = GetRegInfo(reg1);
-      RegisterInfo* info2 = GetRegInfo(reg2);
-      return (info1->Master() == info2->Master() &&
-             (info1->StorageMask() & info2->StorageMask()) != 0);
-    }
-
-    static constexpr bool IsWide(OpSize size) {
-      return size == k64 || size == kDouble;
-    }
-
-    static constexpr bool IsRef(OpSize size) {
-      return size == kReference;
-    }
-
-    /**
-     * @brief Portable way of getting special registers from the backend.
-     * @param reg Enumeration describing the purpose of the register.
-     * @return Return the #RegStorage corresponding to the given purpose @p reg.
-     * @note This function is currently allowed to return any suitable view of the registers
-     *   (e.g. this could be 64-bit solo or 32-bit solo for 64-bit backends).
-     */
-    virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0;
-
-    /**
-     * @brief Portable way of getting special registers from the backend.
-     * @param reg Enumeration describing the purpose of the register.
-     * @param wide_kind What kind of view of the special register is required.
-     * @return Return the #RegStorage corresponding to the given purpose @p reg.
-     *
-     * @note For 32b system, wide (kWide) views only make sense for the argument registers and the
-     *       return. In that case, this function should return a pair where the first component of
-     *       the result will be the indicated special register.
-     */
-    virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
-      if (wide_kind == kWide) {
-        DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
-        static_assert((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) &&
-                      (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) &&
-                      (kArg7 == kArg6 + 1), "kargs range unexpected");
-        static_assert((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) &&
-                      (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) &&
-                      (kFArg7 == kFArg6 + 1) && (kFArg8 == kFArg7 + 1) && (kFArg9 == kFArg8 + 1) &&
-                      (kFArg10 == kFArg9 + 1) && (kFArg11 == kFArg10 + 1) &&
-                      (kFArg12 == kFArg11 + 1) && (kFArg13 == kFArg12 + 1) &&
-                      (kFArg14 == kFArg13 + 1) && (kFArg15 == kFArg14 + 1),
-                      "kfargs range unexpected");
-        static_assert(kRet1 == kRet0 + 1, "kret range unexpected");
-        return RegStorage::MakeRegPair(TargetReg(reg),
-                                       TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
-      } else {
-        return TargetReg(reg);
-      }
-    }
-
-    /**
-     * @brief Portable way of getting a special register for storing a pointer.
-     * @see TargetReg()
-     */
-    virtual RegStorage TargetPtrReg(SpecialTargetRegister reg) {
-      return TargetReg(reg);
-    }
-
-    // Get a reg storage corresponding to the wide & ref flags of the reg location.
-    virtual RegStorage TargetReg(SpecialTargetRegister reg, RegLocation loc) {
-      if (loc.ref) {
-        return TargetReg(reg, kRef);
-      } else {
-        return TargetReg(reg, loc.wide ? kWide : kNotWide);
-      }
-    }
-
-    void EnsureInitializedArgMappingToPhysicalReg();
-    virtual RegLocation GetReturnAlt() = 0;
-    virtual RegLocation GetReturnWideAlt() = 0;
-    virtual RegLocation LocCReturn() = 0;
-    virtual RegLocation LocCReturnRef() = 0;
-    virtual RegLocation LocCReturnDouble() = 0;
-    virtual RegLocation LocCReturnFloat() = 0;
-    virtual RegLocation LocCReturnWide() = 0;
-    virtual ResourceMask GetRegMaskCommon(const RegStorage& reg) const = 0;
-    virtual void AdjustSpillMask() = 0;
-    virtual void ClobberCallerSave() = 0;
-    virtual void FreeCallTemps() = 0;
-    virtual void LockCallTemps() = 0;
-    virtual void CompilerInitializeRegAlloc() = 0;
-
-    // Required for target - miscellaneous.
-    virtual void AssembleLIR() = 0;
-    virtual void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) = 0;
-    virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                          ResourceMask* use_mask, ResourceMask* def_mask) = 0;
-    virtual const char* GetTargetInstFmt(int opcode) = 0;
-    virtual const char* GetTargetInstName(int opcode) = 0;
-    virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0;
-
-    // Note: This may return kEncodeNone on architectures that do not expose a PC. The caller must
-    //       take care of this.
-    virtual ResourceMask GetPCUseDefEncoding() const = 0;
-    virtual uint64_t GetTargetInstFlags(int opcode) = 0;
-    virtual size_t GetInsnSize(LIR* lir) = 0;
-    virtual bool IsUnconditionalBranch(LIR* lir) = 0;
-
-    // Get the register class for load/store of a field.
-    virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0;
-
-    // Required for target - Dalvik-level generators.
-    virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                                   RegLocation rl_src1, RegLocation rl_src2, int flags) = 0;
-    virtual void GenArithOpDouble(Instruction::Code opcode,
-                                  RegLocation rl_dest, RegLocation rl_src1,
-                                  RegLocation rl_src2) = 0;
-    virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
-                                 RegLocation rl_src1, RegLocation rl_src2) = 0;
-    virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
-                          RegLocation rl_src1, RegLocation rl_src2) = 0;
-    virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest,
-                               RegLocation rl_src) = 0;
-    virtual bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) = 0;
-
-    /**
-     * @brief Used to generate code for intrinsic java\.lang\.Math methods min and max.
-     * @details This is also applicable for java\.lang\.StrictMath since it is a simple algorithm
-     * that applies on integers. The generated code will write the smallest or largest value
-     * directly into the destination register as specified by the invoke information.
-     * @param info Information about the invoke.
-     * @param is_min If true generates code that computes minimum. Otherwise computes maximum.
-     * @param is_long If true the value value is Long. Otherwise the value is Int.
-     * @return Returns true if successfully generated
-     */
-    virtual bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) = 0;
-    virtual bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double);
-
-    virtual bool GenInlinedSqrt(CallInfo* info) = 0;
-    virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0;
-    virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0;
-    virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
-                                  bool is_div) = 0;
-    virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit,
-                                     bool is_div) = 0;
-    /*
-     * @brief Generate an integer div or rem operation by a literal.
-     * @param rl_dest Destination Location.
-     * @param rl_src1 Numerator Location.
-     * @param rl_src2 Divisor Location.
-     * @param is_div 'true' if this is a division, 'false' for a remainder.
-     * @param flags The instruction optimization flags. It can include information
-     * if exception check can be elided.
-     */
-    virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
-                                  RegLocation rl_src2, bool is_div, int flags) = 0;
-    /*
-     * @brief Generate an integer div or rem operation by a literal.
-     * @param rl_dest Destination Location.
-     * @param rl_src Numerator Location.
-     * @param lit Divisor.
-     * @param is_div 'true' if this is a division, 'false' for a remainder.
-     */
-    virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
-                                     bool is_div) = 0;
-    virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0;
-
-    /**
-     * @brief Used for generating code that throws ArithmeticException if both registers are zero.
-     * @details This is used for generating DivideByZero checks when divisor is held in two
-     *  separate registers.
-     * @param reg The register holding the pair of 32-bit values.
-     */
-    virtual void GenDivZeroCheckWide(RegStorage reg) = 0;
-
-    virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0;
-    virtual void GenExitSequence() = 0;
-    virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0;
-    virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0;
-
-    /*
-     * @brief Handle Machine Specific MIR Extended opcodes.
-     * @param bb The basic block in which the MIR is from.
-     * @param mir The MIR whose opcode is not standard extended MIR.
-     * @note Base class implementation will abort for unknown opcodes.
-     */
-    virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
-
-    /**
-     * @brief Lowers the kMirOpSelect MIR into LIR.
-     * @param bb The basic block in which the MIR is from.
-     * @param mir The MIR whose opcode is kMirOpSelect.
-     */
-    virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0;
-
-    /**
-     * @brief Generates code to select one of the given constants depending on the given opcode.
-     */
-    virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                                  int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                  RegisterClass dest_reg_class) = 0;
-
-    /**
-     * @brief Used to generate a memory barrier in an architecture specific way.
-     * @details The last generated LIR will be considered for use as barrier. Namely,
-     * if the last LIR can be updated in a way where it will serve the semantics of
-     * barrier, then it will be used as such. Otherwise, a new LIR will be generated
-     * that can keep the semantics.
-     * @param barrier_kind The kind of memory barrier to generate.
-     * @return whether a new instruction was generated.
-     */
-    virtual bool GenMemBarrier(MemBarrierKind barrier_kind) = 0;
-
-    virtual void GenMoveException(RegLocation rl_dest) = 0;
-    virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                               int first_bit, int second_bit) = 0;
-    virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
-
-    // Create code for switch statements. Will decide between short and long versions below.
-    void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-    void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-
-    // Potentially backend-specific versions of switch instructions for shorter switch statements.
-    // The default implementation will create a chained compare-and-branch.
-    virtual void GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-    virtual void GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-    // Backend-specific versions of switch instructions for longer switch statements.
-    virtual void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
-    virtual void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
-
-    virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
-                             RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
-    virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
-                             RegLocation rl_index, RegLocation rl_src, int scale,
-                             bool card_mark) = 0;
-    virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                                   RegLocation rl_src1, RegLocation rl_shift, int flags) = 0;
-
-    // Required for target - single operation generators.
-    virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
-    virtual LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) = 0;
-    virtual LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
-                                LIR* target) = 0;
-    virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0;
-    virtual LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) = 0;
-    virtual LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
-    virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
-    virtual void OpEndIT(LIR* it) = 0;
-    virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0;
-    virtual void OpPcRelLoad(RegStorage reg, LIR* target) = 0;
-    virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0;
-    virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
-    virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0;
-    virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0;
-    virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0;
-
-    /**
-     * @brief Used to generate an LIR that does a load from mem to reg.
-     * @param r_dest The destination physical register.
-     * @param r_base The base physical register for memory operand.
-     * @param offset The displacement for memory operand.
-     * @param move_type Specification on the move desired (size, alignment, register kind).
-     * @return Returns the generate move LIR.
-     */
-    virtual LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
-                             MoveType move_type) = 0;
-
-    /**
-     * @brief Used to generate an LIR that does a store from reg to mem.
-     * @param r_base The base physical register for memory operand.
-     * @param offset The displacement for memory operand.
-     * @param r_src The destination physical register.
-     * @param bytes_to_move The number of bytes to move.
-     * @param is_aligned Whether the memory location is known to be aligned.
-     * @return Returns the generate move LIR.
-     */
-    virtual LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
-                             MoveType move_type) = 0;
-
-    /**
-     * @brief Used for generating a conditional register to register operation.
-     * @param op The opcode kind.
-     * @param cc The condition code that when true will perform the opcode.
-     * @param r_dest The destination physical register.
-     * @param r_src The source physical register.
-     * @return Returns the newly created LIR or null in case of creation failure.
-     */
-    virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) = 0;
-
-    virtual LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) = 0;
-    virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
-                             RegStorage r_src2) = 0;
-    virtual LIR* OpTestSuspend(LIR* target) = 0;
-    virtual LIR* OpVldm(RegStorage r_base, int count) = 0;
-    virtual LIR* OpVstm(RegStorage r_base, int count) = 0;
-    virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0;
-    virtual bool InexpensiveConstantInt(int32_t value) = 0;
-    virtual bool InexpensiveConstantFloat(int32_t value) = 0;
-    virtual bool InexpensiveConstantLong(int64_t value) = 0;
-    virtual bool InexpensiveConstantDouble(int64_t value) = 0;
-    virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode ATTRIBUTE_UNUSED) {
-      return InexpensiveConstantInt(value);
-    }
-
-    // May be optimized by targets.
-    virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
-    virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
-
-    virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0;
-
-    // Queries for backend support for vectors
-    /*
-     * Return the number of bits in a vector register.
-     * @return 0 if vector registers are not supported, or the
-     * number of bits in the vector register if supported.
-     */
-    virtual int VectorRegisterSize() {
-      return 0;
-    }
-
-    /*
-     * Return the number of reservable vector registers supported
-     * @param long_or_fp, true if floating point computations will be
-     * executed or the operations will be long type while vector
-     * registers are reserved.
-     * @return the number of vector registers that are available
-     * @note The backend should ensure that sufficient vector registers
-     * are held back to generate scalar code without exhausting vector
-     * registers, if scalar code also uses the vector registers.
-     */
-    virtual int NumReservableVectorRegisters(bool long_or_fp ATTRIBUTE_UNUSED) {
-      return 0;
-    }
-
-    /**
-     * @brief Buffer of DWARF's Call Frame Information opcodes.
-     * @details It is used by debuggers and other tools to unwind the call stack.
-     */
-    dwarf::LazyDebugFrameOpCodeWriter& cfi() { return cfi_; }
-
-  protected:
-    Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
-
-    CompilationUnit* GetCompilationUnit() {
-      return cu_;
-    }
-    /*
-     * @brief Do these SRs overlap?
-     * @param rl_op1 One RegLocation
-     * @param rl_op2 The other RegLocation
-     * @return 'true' if the VR pairs overlap
-     *
-     * Check to see if a result pair has a misaligned overlap with an operand pair.  This
-     * is not usual for dx to generate, but it is legal (for now).  In a future rev of
-     * dex, we'll want to make this case illegal.
-     */
-    bool PartiallyIntersects(RegLocation rl_op1, RegLocation rl_op2);
-
-    /*
-     * @brief Do these SRs intersect?
-     * @param rl_op1 One RegLocation
-     * @param rl_op2 The other RegLocation
-     * @return 'true' if the VR pairs intersect
-     *
-     * Check to see if a result pair has misaligned overlap or
-     * full overlap with an operand pair.
-     */
-    bool Intersects(RegLocation rl_op1, RegLocation rl_op2);
-
-    /*
-     * @brief Force a location (in a register) into a temporary register
-     * @param loc location of result
-     * @returns update location
-     */
-    virtual RegLocation ForceTemp(RegLocation loc);
-
-    /*
-     * @brief Force a wide location (in registers) into temporary registers
-     * @param loc location of result
-     * @returns update location
-     */
-    virtual RegLocation ForceTempWide(RegLocation loc);
-
-    virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
-                                    RegLocation rl_dest, RegLocation rl_src);
-
-    void AddSlowPath(LIRSlowPath* slowpath);
-
-    /*
-     *
-     * @brief Implement Set up instanceof a class.
-     * @param needs_access_check 'true' if we must check the access.
-     * @param type_known_final 'true' if the type is known to be a final class.
-     * @param type_known_abstract 'true' if the type is known to be an abstract class.
-     * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
-     * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
-     * @param type_idx Type index to use if use_declaring_class is 'false'.
-     * @param rl_dest Result to be set to 0 or 1.
-     * @param rl_src Object to be tested.
-     */
-    void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
-                                    bool type_known_abstract, bool use_declaring_class,
-                                    bool can_assume_type_is_in_dex_cache,
-                                    uint32_t type_idx, RegLocation rl_dest,
-                                    RegLocation rl_src);
-
-    /**
-     * @brief Used to insert marker that can be used to associate MIR with LIR.
-     * @details Only inserts marker if verbosity is enabled.
-     * @param mir The mir that is currently being generated.
-     */
-    void GenPrintLabel(MIR* mir);
-
-    /**
-     * @brief Used to generate return sequence when there is no frame.
-     * @details Assumes that the return registers have already been populated.
-     */
-    virtual void GenSpecialExitSequence() = 0;
-
-    /**
-     * @brief Used to generate stack frame for suspend path of special methods.
-     */
-    virtual void GenSpecialEntryForSuspend() = 0;
-
-    /**
-     * @brief Used to pop the stack frame for suspend path of special methods.
-     */
-    virtual void GenSpecialExitForSuspend() = 0;
-
-    /**
-     * @brief Used to generate code for special methods that are known to be
-     * small enough to work in frameless mode.
-     * @param bb The basic block of the first MIR.
-     * @param mir The first MIR of the special method.
-     * @param special Information about the special method.
-     * @return Returns whether or not this was handled successfully. Returns false
-     * if caller should punt to normal MIR2LIR conversion.
-     */
-    virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
-
-    void ClobberBody(RegisterInfo* p);
-    void SetCurrentDexPc(DexOffset dexpc) {
-      current_dalvik_offset_ = dexpc;
-    }
-
-    /**
-     * @brief Used to lock register if argument at in_position was passed that way.
-     * @details Does nothing if the argument is passed via stack.
-     * @param in_position The argument number whose register to lock.
-     */
-    void LockArg(size_t in_position);
-
-    /**
-     * @brief Used to load VR argument to a physical register.
-     * @details The load is only done if the argument is not already in physical register.
-     * LockArg must have been previously called.
-     * @param in_position The argument number to load.
-     * @param wide Whether the argument is 64-bit or not.
-     * @return Returns the register (or register pair) for the loaded argument.
-     */
-    RegStorage LoadArg(size_t in_position, RegisterClass reg_class, bool wide = false);
-
-    /**
-     * @brief Used to load a VR argument directly to a specified register location.
-     * @param in_position The argument number to place in register.
-     * @param rl_dest The register location where to place argument.
-     */
-    void LoadArgDirect(size_t in_position, RegLocation rl_dest);
-
-    /**
-     * @brief Used to spill register if argument at in_position was passed that way.
-     * @details Does nothing if the argument is passed via stack.
-     * @param in_position The argument number whose register to spill.
-     */
-    void SpillArg(size_t in_position);
-
-    /**
-     * @brief Used to unspill register if argument at in_position was passed that way.
-     * @details Does nothing if the argument is passed via stack.
-     * @param in_position The argument number whose register to spill.
-     */
-    void UnspillArg(size_t in_position);
-
-    /**
-     * @brief Generate suspend test in a special method.
-     */
-    SpecialSuspendCheckSlowPath* GenSpecialSuspendTest();
-
-    /**
-     * @brief Used to generate LIR for special getter method.
-     * @param mir The mir that represents the iget.
-     * @param special Information about the special getter method.
-     * @return Returns whether LIR was successfully generated.
-     */
-    bool GenSpecialIGet(MIR* mir, const InlineMethod& special);
-
-    /**
-     * @brief Used to generate LIR for special setter method.
-     * @param mir The mir that represents the iput.
-     * @param special Information about the special setter method.
-     * @return Returns whether LIR was successfully generated.
-     */
-    bool GenSpecialIPut(MIR* mir, const InlineMethod& special);
-
-    /**
-     * @brief Used to generate LIR for special return-args method.
-     * @param mir The mir that represents the return of argument.
-     * @param special Information about the special return-args method.
-     * @return Returns whether LIR was successfully generated.
-     */
-    bool GenSpecialIdentity(MIR* mir, const InlineMethod& special);
-
-    /**
-     * @brief Generate code to check if result is null and, if it is, call helper to load it.
-     * @param r_result the result register.
-     * @param trampoline the helper to call in slow path.
-     * @param imm the immediate passed to the helper.
-     */
-    void GenIfNullUseHelperImm(RegStorage r_result, QuickEntrypointEnum trampoline, int imm);
-
-    /**
-     * @brief Generate code to retrieve Class* for another type to be used by SGET/SPUT.
-     * @param field_info information about the field to be accessed.
-     * @param opt_flags the optimization flags of the MIR.
-     */
-    RegStorage GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& field_info, int opt_flags);
-
-    void AddDivZeroCheckSlowPath(LIR* branch);
-
-    // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using
-    // kArg2 as temp.
-    virtual void CopyToArgumentRegs(RegStorage arg0, RegStorage arg1);
-
-    /**
-     * @brief Load Constant into RegLocation
-     * @param rl_dest Destination RegLocation
-     * @param value Constant value
-     */
-    virtual void GenConst(RegLocation rl_dest, int value);
-
-    /**
-     * Returns true iff wide GPRs are just different views on the same physical register.
-     */
-    virtual bool WideGPRsAreAliases() const = 0;
-
-    /**
-     * Returns true iff wide FPRs are just different views on the same physical register.
-     */
-    virtual bool WideFPRsAreAliases() const = 0;
-
-
-    enum class WidenessCheck {  // private
-      kIgnoreWide,
-      kCheckWide,
-      kCheckNotWide
-    };
-
-    enum class RefCheck {  // private
-      kIgnoreRef,
-      kCheckRef,
-      kCheckNotRef
-    };
-
-    enum class FPCheck {  // private
-      kIgnoreFP,
-      kCheckFP,
-      kCheckNotFP
-    };
-
-    /**
-     * Check whether a reg storage seems well-formed, that is, if a reg storage is valid,
-     * that it has the expected form for the flags.
-     * A flag value of 0 means ignore. A flag value of -1 means false. A flag value of 1 means true.
-     */
-    void CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp, bool fail,
-                             bool report)
-        const;
-
-    /**
-     * Check whether a reg location seems well-formed, that is, if a reg storage is encoded,
-     * that it has the expected size.
-     */
-    void CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const;
-
-    // See CheckRegStorageImpl. Will print or fail depending on kFailOnSizeError and
-    // kReportSizeError.
-    void CheckRegStorage(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp) const;
-    // See CheckRegLocationImpl.
-    void CheckRegLocation(RegLocation rl) const;
-
-    // Find the references at the beginning of a basic block (for generating GC maps).
-    void InitReferenceVRegs(BasicBlock* bb, BitVector* references);
-
-    // Update references from prev_mir to mir in the same BB. If mir is null or before
-    // prev_mir, report failure (return false) and update references to the end of the BB.
-    bool UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references);
-
-    // Update references from prev_mir to mir.
-    void UpdateReferenceVRegs(MIR* mir, MIR* prev_mir, BitVector* references);
-
-    /**
-     * Returns true if the frame spills the given core register.
-     */
-    bool CoreSpillMaskContains(int reg) {
-      return (core_spill_mask_ & (1u << reg)) != 0;
-    }
-
-    size_t GetCacheOffset(uint32_t index);
-    size_t GetCachePointerOffset(uint32_t index, size_t pointer_size);
-
-    void LoadTypeFromCache(uint32_t type_index, RegStorage class_reg);
-
-  public:
-    // TODO: add accessors for these.
-    LIR* literal_list_;                        // Constants.
-    LIR* method_literal_list_;                 // Method literals requiring patching.
-    LIR* class_literal_list_;                  // Class literals requiring patching.
-    LIR* code_literal_list_;                   // Code literals requiring patching.
-    LIR* first_fixup_;                         // Doubly-linked list of LIR nodes requiring fixups.
-
-  protected:
-    ArenaAllocator* const arena_;
-    CompilationUnit* const cu_;
-    MIRGraph* const mir_graph_;
-    ArenaVector<SwitchTable*> switch_tables_;
-    ArenaVector<FillArrayData*> fill_array_data_;
-    ArenaVector<RegisterInfo*> tempreg_info_;
-    ArenaVector<RegisterInfo*> reginfo_map_;
-    ArenaVector<const void*> pointer_storage_;
-    CodeOffset data_offset_;            // starting offset of literal pool.
-    size_t total_size_;                   // header + code size.
-    LIR* block_label_list_;
-    PromotionMap* promotion_map_;
-    /*
-     * TODO: The code generation utilities don't have a built-in
-     * mechanism to propagate the original Dalvik opcode address to the
-     * associated generated instructions.  For the trace compiler, this wasn't
-     * necessary because the interpreter handled all throws and debugging
-     * requests.  For now we'll handle this by placing the Dalvik offset
-     * in the CompilationUnit struct before codegen for each instruction.
-     * The low-level LIR creation utilites will pull it from here.  Rework this.
-     */
-    DexOffset current_dalvik_offset_;
-    MIR* current_mir_;
-    size_t estimated_native_code_size_;     // Just an estimate; used to reserve code_buffer_ size.
-    std::unique_ptr<RegisterPool> reg_pool_;
-    /*
-     * Sanity checking for the register temp tracking.  The same ssa
-     * name should never be associated with one temp register per
-     * instruction compilation.
-     */
-    int live_sreg_;
-    CodeBuffer code_buffer_;
-    // The source mapping table data (pc -> dex). More entries than in encoded_mapping_table_
-    DefaultSrcMap src_mapping_table_;
-    // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
-    ArenaVector<uint8_t> encoded_mapping_table_;
-    ArenaVector<uint32_t> core_vmap_table_;
-    ArenaVector<uint32_t> fp_vmap_table_;
-    ArenaVector<uint8_t> native_gc_map_;
-    ArenaVector<LinkerPatch> patches_;
-    int num_core_spills_;
-    int num_fp_spills_;
-    int frame_size_;
-    unsigned int core_spill_mask_;
-    unsigned int fp_spill_mask_;
-    LIR* first_lir_insn_;
-    LIR* last_lir_insn_;
-
-    ArenaVector<LIRSlowPath*> slow_paths_;
-
-    // The memory reference type for new LIRs.
-    // NOTE: Passing this as an explicit parameter by all functions that directly or indirectly
-    // invoke RawLIR() would clutter the code and reduce the readability.
-    ResourceMask::ResourceBit mem_ref_type_;
-
-    // Each resource mask now takes 16-bytes, so having both use/def masks directly in a LIR
-    // would consume 32 bytes per LIR. Instead, the LIR now holds only pointers to the masks
-    // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache
-    // to deduplicate the masks.
-    ResourceMaskCache mask_cache_;
-
-    // Record the MIR that generated a given safepoint (null for prologue safepoints).
-    ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
-
-    // The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing.
-    const DexCacheArraysLayout dex_cache_arrays_layout_;
-
-    // For architectures that don't have true PC-relative addressing, we can promote
-    // a PC of an instruction (or another PC-relative address such as a pointer to
-    // the dex cache arrays if supported) to a register. This is indicated to the
-    // register promotion by allocating a backend temp.
-    CompilerTemp* pc_rel_temp_;
-
-    // For architectures that don't have true PC-relative addressing (see pc_rel_temp_
-    // above) and also have a limited range of offsets for loads, it's be useful to
-    // know the minimum offset into the dex cache arrays, so we calculate that as well
-    // if pc_rel_temp_ isn't null.
-    uint32_t dex_cache_arrays_min_offset_;
-
-    dwarf::LazyDebugFrameOpCodeWriter cfi_;
-
-    // ABI support
-    class ShortyArg {
-      public:
-        explicit ShortyArg(char type) : type_(type) { }
-        bool IsFP() { return type_ == 'F' || type_ == 'D'; }
-        bool IsWide() { return type_ == 'J' || type_ == 'D'; }
-        bool IsRef() { return type_ == 'L'; }
-        char GetType() { return type_; }
-      private:
-        char type_;
-    };
-
-    class ShortyIterator {
-      public:
-        ShortyIterator(const char* shorty, bool is_static);
-        bool Next();
-        ShortyArg GetArg() { return ShortyArg(pending_this_ ? 'L' : *cur_); }
-      private:
-        const char* cur_;
-        bool pending_this_;
-        bool initialized_;
-    };
-
-    class InToRegStorageMapper {
-     public:
-      virtual RegStorage GetNextReg(ShortyArg arg) = 0;
-      virtual ~InToRegStorageMapper() {}
-      virtual void Reset() = 0;
-    };
-
-    class InToRegStorageMapping {
-     public:
-      explicit InToRegStorageMapping(ArenaAllocator* arena)
-          : mapping_(arena->Adapter()),
-            end_mapped_in_(0u), has_arguments_on_stack_(false),  initialized_(false) {}
-      void Initialize(ShortyIterator* shorty, InToRegStorageMapper* mapper);
-      /**
-       * @return the past-the-end index of VRs mapped to physical registers.
-       * In other words any VR starting from this index is mapped to memory.
-       */
-      size_t GetEndMappedIn() { return end_mapped_in_; }
-      bool HasArgumentsOnStack() { return has_arguments_on_stack_; }
-      RegStorage GetReg(size_t in_position);
-      ShortyArg GetShorty(size_t in_position);
-      bool IsInitialized() { return initialized_; }
-     private:
-      static constexpr char kInvalidShorty = '-';
-      ArenaVector<std::pair<ShortyArg, RegStorage>> mapping_;
-      size_t end_mapped_in_;
-      bool has_arguments_on_stack_;
-      bool initialized_;
-    };
-
-  // Cached mapping of method input to reg storage according to ABI.
-  InToRegStorageMapping in_to_reg_storage_mapping_;
-  virtual InToRegStorageMapper* GetResetedInToRegStorageMapper() = 0;
-
-  private:
-    static bool SizeMatchesTypeForEntrypoint(OpSize size, Primitive::Type type);
-
-    friend class QuickCFITest;
-};  // Class Mir2Lir
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
deleted file mode 100644
index 6c6c9cf..0000000
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <vector>
-#include <memory>
-
-#include "arch/instruction_set.h"
-#include "arch/instruction_set_features.h"
-#include "cfi_test.h"
-#include "dex/compiler_ir.h"
-#include "dex/mir_graph.h"
-#include "dex/pass_manager.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex/quick/quick_compiler.h"
-#include "dex/quick/mir_to_lir.h"
-#include "dex/verification_results.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "gtest/gtest.h"
-
-#include "dex/quick/quick_cfi_test_expected.inc"
-
-namespace art {
-
-// Run the tests only on host.
-#ifndef __ANDROID__
-
-class QuickCFITest : public CFITest {
- public:
-  // Enable this flag to generate the expected outputs.
-  static constexpr bool kGenerateExpected = false;
-
-  void TestImpl(InstructionSet isa, const char* isa_str,
-                const std::vector<uint8_t>& expected_asm,
-                const std::vector<uint8_t>& expected_cfi) {
-    // Setup simple compiler context.
-    ArenaPool pool;
-    ArenaAllocator arena(&pool);
-    CompilerOptions compiler_options(
-      CompilerOptions::kDefaultCompilerFilter,
-      CompilerOptions::kDefaultHugeMethodThreshold,
-      CompilerOptions::kDefaultLargeMethodThreshold,
-      CompilerOptions::kDefaultSmallMethodThreshold,
-      CompilerOptions::kDefaultTinyMethodThreshold,
-      CompilerOptions::kDefaultNumDexMethodsThreshold,
-      CompilerOptions::kDefaultInlineDepthLimit,
-      CompilerOptions::kDefaultInlineMaxCodeUnits,
-      nullptr,
-      false,
-      CompilerOptions::kDefaultTopKProfileThreshold,
-      false,
-      true,  // generate_debug_info.
-      false,
-      false,
-      false,
-      false,
-      nullptr,
-      nullptr,
-      false,
-      "",
-      false,
-      false);
-    VerificationResults verification_results(&compiler_options);
-    DexFileToMethodInlinerMap method_inliner_map;
-    std::unique_ptr<const InstructionSetFeatures> isa_features;
-    std::string error;
-    isa_features.reset(InstructionSetFeatures::FromVariant(isa, "default", &error));
-    CompilerDriver driver(&compiler_options,
-                          &verification_results,
-                          &method_inliner_map,
-                          Compiler::kQuick,
-                          isa,
-                          isa_features.get(),
-                          /* boot_image */ false,
-                          /* image_classes */ nullptr,
-                          /* compiled_classes */ nullptr,
-                          /* compiled_methods */ nullptr,
-                          /* thread_count */ 0,
-                          /* dump_stats */ false,
-                          /* dump_passes */ false,
-                          /* timer */ nullptr,
-                          /* swap_fd */ -1,
-                          /* profile_compilation_info */ nullptr);
-    ClassLinker* linker = nullptr;
-    CompilationUnit cu(&pool, isa, &driver, linker);
-    DexFile::CodeItem code_item { 0, 0, 0, 0, 0, 0, { 0 } };  // NOLINT
-    cu.mir_graph.reset(new MIRGraph(&cu, &arena));
-    cu.mir_graph->current_code_item_ = &code_item;
-
-    // Generate empty method with some spills.
-    std::unique_ptr<Mir2Lir> m2l(QuickCompiler::GetCodeGenerator(&cu, nullptr));
-    m2l->frame_size_ = 64u;
-    m2l->CompilerInitializeRegAlloc();
-    for (const auto& info : m2l->reg_pool_->core_regs_) {
-      if (m2l->num_core_spills_ < 2 && !info->IsTemp() && !info->InUse()) {
-        m2l->core_spill_mask_ |= 1 << info->GetReg().GetRegNum();
-        m2l->num_core_spills_++;
-      }
-    }
-    for (const auto& info : m2l->reg_pool_->sp_regs_) {
-      if (m2l->num_fp_spills_ < 2 && !info->IsTemp() && !info->InUse()) {
-        m2l->fp_spill_mask_ |= 1 << info->GetReg().GetRegNum();
-        m2l->num_fp_spills_++;
-      }
-    }
-    m2l->AdjustSpillMask();
-    m2l->GenEntrySequence(nullptr, m2l->GetCompilationUnit()->target64 ?
-        m2l->LocCReturnWide() : m2l->LocCReturnRef());
-    m2l->GenExitSequence();
-    m2l->HandleSlowPaths();
-    m2l->AssembleLIR();
-    std::vector<uint8_t> actual_asm(m2l->code_buffer_.begin(), m2l->code_buffer_.end());
-    auto const& cfi_data = m2l->cfi().Patch(actual_asm.size());
-    std::vector<uint8_t> actual_cfi(cfi_data->begin(), cfi_data->end());
-    EXPECT_EQ(m2l->cfi().GetCurrentPC(), static_cast<int>(actual_asm.size()));
-
-    if (kGenerateExpected) {
-      GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi);
-    } else {
-      EXPECT_EQ(expected_asm, actual_asm);
-      EXPECT_EQ(expected_cfi, actual_cfi);
-    }
-  }
-};
-
-#define TEST_ISA(isa) \
-  TEST_F(QuickCFITest, isa) { \
-    std::vector<uint8_t> expected_asm(expected_asm_##isa, \
-        expected_asm_##isa + arraysize(expected_asm_##isa)); \
-    std::vector<uint8_t> expected_cfi(expected_cfi_##isa, \
-        expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
-    TestImpl(isa, #isa, expected_asm, expected_cfi); \
-  }
-
-TEST_ISA(kThumb2)
-TEST_ISA(kArm64)
-TEST_ISA(kX86)
-TEST_ISA(kX86_64)
-TEST_ISA(kMips)
-TEST_ISA(kMips64)
-
-#endif  // __ANDROID__
-
-}  // namespace art
diff --git a/compiler/dex/quick/quick_cfi_test_expected.inc b/compiler/dex/quick/quick_cfi_test_expected.inc
deleted file mode 100644
index 3032697..0000000
--- a/compiler/dex/quick/quick_cfi_test_expected.inc
+++ /dev/null
@@ -1,215 +0,0 @@
-static constexpr uint8_t expected_asm_kThumb2[] = {
-    0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x90, 0x0B, 0xB0,
-    0xBD, 0xEC, 0x02, 0x8A, 0x60, 0xBD, 0x00, 0x00,
-};
-static constexpr uint8_t expected_cfi_kThumb2[] = {
-    0x42, 0x0E, 0x0C, 0x85, 0x03, 0x86, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x14,
-    0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x42, 0x0A, 0x42,
-    0x0E, 0x14, 0x44, 0x0E, 0x0C, 0x06, 0x50, 0x06, 0x51, 0x44, 0x0B, 0x0E,
-    0x40,
-};
-// 0x00000000: push {r5, r6, lr}
-// 0x00000002: .cfi_def_cfa_offset: 12
-// 0x00000002: .cfi_offset: r5 at cfa-12
-// 0x00000002: .cfi_offset: r6 at cfa-8
-// 0x00000002: .cfi_offset: r14 at cfa-4
-// 0x00000002: vpush.f32 {s16-s17}
-// 0x00000006: .cfi_def_cfa_offset: 20
-// 0x00000006: .cfi_offset_extended: r80 at cfa-20
-// 0x00000006: .cfi_offset_extended: r81 at cfa-16
-// 0x00000006: sub sp, sp, #44
-// 0x00000008: .cfi_def_cfa_offset: 64
-// 0x00000008: str r0, [sp, #0]
-// 0x0000000a: .cfi_remember_state
-// 0x0000000a: add sp, sp, #44
-// 0x0000000c: .cfi_def_cfa_offset: 20
-// 0x0000000c: vpop.f32 {s16-s17}
-// 0x00000010: .cfi_def_cfa_offset: 12
-// 0x00000010: .cfi_restore_extended: r80
-// 0x00000010: .cfi_restore_extended: r81
-// 0x00000010: pop {r5, r6, pc}
-// 0x00000012: lsls r0, r0, #0
-// 0x00000014: .cfi_restore_state
-// 0x00000014: .cfi_def_cfa_offset: 64
-
-static constexpr uint8_t expected_asm_kArm64[] = {
-    0xFF, 0x03, 0x01, 0xD1, 0xE8, 0xA7, 0x01, 0x6D, 0xF4, 0xD7, 0x02, 0xA9,
-    0xFE, 0x1F, 0x00, 0xF9, 0xE0, 0x03, 0x00, 0xF9, 0xE8, 0xA7, 0x41, 0x6D,
-    0xF4, 0xD7, 0x42, 0xA9, 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91,
-    0xC0, 0x03, 0x5F, 0xD6,
-};
-static constexpr uint8_t expected_cfi_kArm64[] = {
-    0x44, 0x0E, 0x40, 0x44, 0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x44, 0x94,
-    0x06, 0x95, 0x04, 0x44, 0x9E, 0x02, 0x44, 0x0A, 0x44, 0x06, 0x48, 0x06,
-    0x49, 0x44, 0xD4, 0xD5, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E,
-    0x40,
-};
-// 0x00000000: sub sp, sp, #0x40 (64)
-// 0x00000004: .cfi_def_cfa_offset: 64
-// 0x00000004: stp d8, d9, [sp, #24]
-// 0x00000008: .cfi_offset_extended: r72 at cfa-40
-// 0x00000008: .cfi_offset_extended: r73 at cfa-32
-// 0x00000008: stp x20, x21, [sp, #40]
-// 0x0000000c: .cfi_offset: r20 at cfa-24
-// 0x0000000c: .cfi_offset: r21 at cfa-16
-// 0x0000000c: str lr, [sp, #56]
-// 0x00000010: .cfi_offset: r30 at cfa-8
-// 0x00000010: str x0, [sp]
-// 0x00000014: .cfi_remember_state
-// 0x00000014: ldp d8, d9, [sp, #24]
-// 0x00000018: .cfi_restore_extended: r72
-// 0x00000018: .cfi_restore_extended: r73
-// 0x00000018: ldp x20, x21, [sp, #40]
-// 0x0000001c: .cfi_restore: r20
-// 0x0000001c: .cfi_restore: r21
-// 0x0000001c: ldr lr, [sp, #56]
-// 0x00000020: .cfi_restore: r30
-// 0x00000020: add sp, sp, #0x40 (64)
-// 0x00000024: .cfi_def_cfa_offset: 0
-// 0x00000024: ret
-// 0x00000028: .cfi_restore_state
-// 0x00000028: .cfi_def_cfa_offset: 64
-
-static constexpr uint8_t expected_asm_kX86[] = {
-    0x83, 0xEC, 0x3C, 0x89, 0x6C, 0x24, 0x34, 0x89, 0x74, 0x24, 0x38, 0x89,
-    0x04, 0x24, 0x8B, 0x6C, 0x24, 0x34, 0x8B, 0x74, 0x24, 0x38, 0x83, 0xC4,
-    0x3C, 0xC3, 0x00, 0x00,
-};
-static constexpr uint8_t expected_cfi_kX86[] = {
-    0x43, 0x0E, 0x40, 0x44, 0x85, 0x03, 0x44, 0x86, 0x02, 0x43, 0x0A, 0x44,
-    0xC5, 0x44, 0xC6, 0x43, 0x0E, 0x04, 0x43, 0x0B, 0x0E, 0x40,
-};
-// 0x00000000: sub esp, 60
-// 0x00000003: .cfi_def_cfa_offset: 64
-// 0x00000003: mov [esp + 52], ebp
-// 0x00000007: .cfi_offset: r5 at cfa-12
-// 0x00000007: mov [esp + 56], esi
-// 0x0000000b: .cfi_offset: r6 at cfa-8
-// 0x0000000b: mov [esp], eax
-// 0x0000000e: .cfi_remember_state
-// 0x0000000e: mov ebp, [esp + 52]
-// 0x00000012: .cfi_restore: r5
-// 0x00000012: mov esi, [esp + 56]
-// 0x00000016: .cfi_restore: r6
-// 0x00000016: add esp, 60
-// 0x00000019: .cfi_def_cfa_offset: 4
-// 0x00000019: ret
-// 0x0000001a: addb [eax], al
-// 0x0000001c: .cfi_restore_state
-// 0x0000001c: .cfi_def_cfa_offset: 64
-
-static constexpr uint8_t expected_asm_kX86_64[] = {
-    0x48, 0x83, 0xEC, 0x38, 0x48, 0x89, 0x5C, 0x24, 0x28, 0x48, 0x89, 0x6C,
-    0x24, 0x30, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F,
-    0x11, 0x6C, 0x24, 0x20, 0x48, 0x8B, 0xC7, 0x48, 0x89, 0x3C, 0x24, 0x48,
-    0x8B, 0x5C, 0x24, 0x28, 0x48, 0x8B, 0x6C, 0x24, 0x30, 0xF2, 0x44, 0x0F,
-    0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, 0x20, 0x48,
-    0x83, 0xC4, 0x38, 0xC3,
-};
-static constexpr uint8_t expected_cfi_kX86_64[] = {
-    0x44, 0x0E, 0x40, 0x45, 0x83, 0x06, 0x45, 0x86, 0x04, 0x47, 0x9D, 0x0A,
-    0x47, 0x9E, 0x08, 0x47, 0x0A, 0x45, 0xC3, 0x45, 0xC6, 0x47, 0xDD, 0x47,
-    0xDE, 0x44, 0x0E, 0x08, 0x41, 0x0B, 0x0E, 0x40,
-};
-// 0x00000000: subq rsp, 56
-// 0x00000004: .cfi_def_cfa_offset: 64
-// 0x00000004: movq [rsp + 40], rbx
-// 0x00000009: .cfi_offset: r3 at cfa-24
-// 0x00000009: movq [rsp + 48], rbp
-// 0x0000000e: .cfi_offset: r6 at cfa-16
-// 0x0000000e: movsd [rsp + 24], xmm12
-// 0x00000015: .cfi_offset: r29 at cfa-40
-// 0x00000015: movsd [rsp + 32], xmm13
-// 0x0000001c: .cfi_offset: r30 at cfa-32
-// 0x0000001c: movq rax, rdi
-// 0x0000001f: movq [rsp], rdi
-// 0x00000023: .cfi_remember_state
-// 0x00000023: movq rbx, [rsp + 40]
-// 0x00000028: .cfi_restore: r3
-// 0x00000028: movq rbp, [rsp + 48]
-// 0x0000002d: .cfi_restore: r6
-// 0x0000002d: movsd xmm12, [rsp + 24]
-// 0x00000034: .cfi_restore: r29
-// 0x00000034: movsd xmm13, [rsp + 32]
-// 0x0000003b: .cfi_restore: r30
-// 0x0000003b: addq rsp, 56
-// 0x0000003f: .cfi_def_cfa_offset: 8
-// 0x0000003f: ret
-// 0x00000040: .cfi_restore_state
-// 0x00000040: .cfi_def_cfa_offset: 64
-
-static constexpr uint8_t expected_asm_kMips[] = {
-    0xF4, 0xFF, 0xBD, 0x27, 0x08, 0x00, 0xB2, 0xAF, 0x04, 0x00, 0xB3, 0xAF,
-    0x00, 0x00, 0xBF, 0xAF, 0xCC, 0xFF, 0xBD, 0x27, 0x25, 0x10, 0x80, 0x00,
-    0x00, 0x00, 0xA4, 0xAF, 0x3C, 0x00, 0xB2, 0x8F, 0x38, 0x00, 0xB3, 0x8F,
-    0x34, 0x00, 0xBF, 0x8F, 0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03,
-    0x00, 0x00, 0x00, 0x00,
-};
-static constexpr uint8_t expected_cfi_kMips[] = {
-    0x44, 0x0E, 0x0C, 0x44, 0x92, 0x01, 0x44, 0x93, 0x02, 0x44, 0x9F, 0x03,
-    0x44, 0x0E, 0x40, 0x48, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xDF, 0x44,
-    0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
-};
-// 0x00000000: addiu r29, r29, -12
-// 0x00000004: .cfi_def_cfa_offset: 12
-// 0x00000004: sw r18, +8(r29)
-// 0x00000008: .cfi_offset: r18 at cfa-4
-// 0x00000008: sw r19, +4(r29)
-// 0x0000000c: .cfi_offset: r19 at cfa-8
-// 0x0000000c: sw r31, +0(r29)
-// 0x00000010: .cfi_offset: r31 at cfa-12
-// 0x00000010: addiu r29, r29, -52
-// 0x00000014: .cfi_def_cfa_offset: 64
-// 0x00000014: or r2, r4, r0
-// 0x00000018: sw r4, +0(r29)
-// 0x0000001c: .cfi_remember_state
-// 0x0000001c: lw r18, +60(r29)
-// 0x00000020: .cfi_restore: r18
-// 0x00000020: lw r19, +56(r29)
-// 0x00000024: .cfi_restore: r19
-// 0x00000024: lw r31, +52(r29)
-// 0x00000028: .cfi_restore: r31
-// 0x00000028: addiu r29, r29, 64
-// 0x0000002c: .cfi_def_cfa_offset: 0
-// 0x0000002c: jr r31
-// 0x00000030: nop
-// 0x00000034: .cfi_restore_state
-// 0x00000034: .cfi_def_cfa_offset: 64
-
-static constexpr uint8_t expected_asm_kMips64[] = {
-    0xE8, 0xFF, 0xBD, 0x67, 0x10, 0x00, 0xB2, 0xFF, 0x08, 0x00, 0xB3, 0xFF,
-    0x00, 0x00, 0xBF, 0xFF, 0xD8, 0xFF, 0xBD, 0x67, 0x25, 0x10, 0x80, 0x00,
-    0x00, 0x00, 0xA4, 0xFF, 0x38, 0x00, 0xB2, 0xDF, 0x30, 0x00, 0xB3, 0xDF,
-    0x28, 0x00, 0xBF, 0xDF, 0x40, 0x00, 0xBD, 0x67, 0x09, 0x00, 0xE0, 0x03,
-    0x00, 0x00, 0x00, 0x00,
-};
-static constexpr uint8_t expected_cfi_kMips64[] = {
-    0x44, 0x0E, 0x18, 0x44, 0x92, 0x02, 0x44, 0x93, 0x04, 0x44, 0x9F, 0x06,
-    0x44, 0x0E, 0x40, 0x48, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xDF, 0x44,
-    0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
-};
-// 0x00000000: daddiu r29, r29, -24
-// 0x00000004: .cfi_def_cfa_offset: 24
-// 0x00000004: sd r18, +16(r29)
-// 0x00000008: .cfi_offset: r18 at cfa-8
-// 0x00000008: sd r19, +8(r29)
-// 0x0000000c: .cfi_offset: r19 at cfa-16
-// 0x0000000c: sd r31, +0(r29)
-// 0x00000010: .cfi_offset: r31 at cfa-24
-// 0x00000010: daddiu r29, r29, -40
-// 0x00000014: .cfi_def_cfa_offset: 64
-// 0x00000014: or r2, r4, r0
-// 0x00000018: sd r4, +0(r29)
-// 0x0000001c: .cfi_remember_state
-// 0x0000001c: ld r18, +56(r29)
-// 0x00000020: .cfi_restore: r18
-// 0x00000020: ld r19, +48(r29)
-// 0x00000024: .cfi_restore: r19
-// 0x00000024: ld r31, +40(r29)
-// 0x00000028: .cfi_restore: r31
-// 0x00000028: daddiu r29, r29, 64
-// 0x0000002c: .cfi_def_cfa_offset: 0
-// 0x0000002c: jr r31
-// 0x00000030: nop
-// 0x00000034: .cfi_restore_state
-// 0x00000034: .cfi_def_cfa_offset: 64
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
deleted file mode 100644
index 49768de..0000000
--- a/compiler/dex/quick/quick_compiler.cc
+++ /dev/null
@@ -1,934 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "quick_compiler.h"
-
-#include <cstdint>
-
-#include "art_method-inl.h"
-#include "base/dumpable.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/timing_logger.h"
-#include "compiler.h"
-#include "dex_file-inl.h"
-#include "dex_file_to_method_inliner_map.h"
-#include "dex/compiler_ir.h"
-#include "dex/dex_flags.h"
-#include "dex/mir_graph.h"
-#include "dex/pass_driver_me_opts.h"
-#include "dex/pass_driver_me_post_opt.h"
-#include "dex/pass_manager.h"
-#include "dex/quick/mir_to_lir.h"
-#include "dex/verified_method.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "elf_writer_quick.h"
-#include "experimental_flags.h"
-#include "jni/quick/jni_compiler.h"
-#include "mir_to_lir.h"
-#include "mirror/object.h"
-#include "runtime.h"
-
-// Specific compiler backends.
-#ifdef ART_ENABLE_CODEGEN_arm
-#include "dex/quick/arm/backend_arm.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_arm64
-#include "dex/quick/arm64/backend_arm64.h"
-#endif
-
-#if defined(ART_ENABLE_CODEGEN_mips) || defined(ART_ENABLE_CODEGEN_mips64)
-#include "dex/quick/mips/backend_mips.h"
-#endif
-
-#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
-#include "dex/quick/x86/backend_x86.h"
-#endif
-
-namespace art {
-
-static_assert(0U == static_cast<size_t>(kNone),   "kNone not 0");
-static_assert(1U == static_cast<size_t>(kArm),    "kArm not 1");
-static_assert(2U == static_cast<size_t>(kArm64),  "kArm64 not 2");
-static_assert(3U == static_cast<size_t>(kThumb2), "kThumb2 not 3");
-static_assert(4U == static_cast<size_t>(kX86),    "kX86 not 4");
-static_assert(5U == static_cast<size_t>(kX86_64), "kX86_64 not 5");
-static_assert(6U == static_cast<size_t>(kMips),   "kMips not 6");
-static_assert(7U == static_cast<size_t>(kMips64), "kMips64 not 7");
-
-// Additional disabled optimizations (over generally disabled) per instruction set.
-static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
-    // 0 = kNone.
-    ~0U,
-    // 1 = kArm, unused (will use kThumb2).
-    ~0U,
-    // 2 = kArm64.
-    0,
-    // 3 = kThumb2.
-    0,
-    // 4 = kX86.
-    (1 << kLoadStoreElimination) |
-    0,
-    // 5 = kX86_64.
-    (1 << kLoadStoreElimination) |
-    0,
-    // 6 = kMips.
-    (1 << kLoadStoreElimination) |
-    (1 << kLoadHoisting) |
-    (1 << kSuppressLoads) |
-    (1 << kNullCheckElimination) |
-    (1 << kPromoteRegs) |
-    (1 << kTrackLiveTemps) |
-    (1 << kSafeOptimizations) |
-    (1 << kBBOpt) |
-    (1 << kMatch) |
-    (1 << kPromoteCompilerTemps) |
-    0,
-    // 7 = kMips64.
-    (1 << kLoadStoreElimination) |
-    (1 << kLoadHoisting) |
-    (1 << kSuppressLoads) |
-    (1 << kNullCheckElimination) |
-    (1 << kPromoteRegs) |
-    (1 << kTrackLiveTemps) |
-    (1 << kSafeOptimizations) |
-    (1 << kBBOpt) |
-    (1 << kMatch) |
-    (1 << kPromoteCompilerTemps) |
-    0
-};
-static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
-              "kDisabledOpts unexpected");
-
-// Supported shorty types per instruction set. null means that all are available.
-// Z : boolean
-// B : byte
-// S : short
-// C : char
-// I : int
-// J : long
-// F : float
-// D : double
-// L : reference(object, array)
-// V : void
-static const char* kSupportedTypes[] = {
-    // 0 = kNone.
-    "",
-    // 1 = kArm, unused (will use kThumb2).
-    "",
-    // 2 = kArm64.
-    nullptr,
-    // 3 = kThumb2.
-    nullptr,
-    // 4 = kX86.
-    nullptr,
-    // 5 = kX86_64.
-    nullptr,
-    // 6 = kMips.
-    nullptr,
-    // 7 = kMips64.
-    nullptr
-};
-static_assert(sizeof(kSupportedTypes) == 8 * sizeof(char*), "kSupportedTypes unexpected");
-
-static int kAllOpcodes[] = {
-    Instruction::NOP,
-    Instruction::MOVE,
-    Instruction::MOVE_FROM16,
-    Instruction::MOVE_16,
-    Instruction::MOVE_WIDE,
-    Instruction::MOVE_WIDE_FROM16,
-    Instruction::MOVE_WIDE_16,
-    Instruction::MOVE_OBJECT,
-    Instruction::MOVE_OBJECT_FROM16,
-    Instruction::MOVE_OBJECT_16,
-    Instruction::MOVE_RESULT,
-    Instruction::MOVE_RESULT_WIDE,
-    Instruction::MOVE_RESULT_OBJECT,
-    Instruction::MOVE_EXCEPTION,
-    Instruction::RETURN_VOID,
-    Instruction::RETURN,
-    Instruction::RETURN_WIDE,
-    Instruction::RETURN_OBJECT,
-    Instruction::CONST_4,
-    Instruction::CONST_16,
-    Instruction::CONST,
-    Instruction::CONST_HIGH16,
-    Instruction::CONST_WIDE_16,
-    Instruction::CONST_WIDE_32,
-    Instruction::CONST_WIDE,
-    Instruction::CONST_WIDE_HIGH16,
-    Instruction::CONST_STRING,
-    Instruction::CONST_STRING_JUMBO,
-    Instruction::CONST_CLASS,
-    Instruction::MONITOR_ENTER,
-    Instruction::MONITOR_EXIT,
-    Instruction::CHECK_CAST,
-    Instruction::INSTANCE_OF,
-    Instruction::ARRAY_LENGTH,
-    Instruction::NEW_INSTANCE,
-    Instruction::NEW_ARRAY,
-    Instruction::FILLED_NEW_ARRAY,
-    Instruction::FILLED_NEW_ARRAY_RANGE,
-    Instruction::FILL_ARRAY_DATA,
-    Instruction::THROW,
-    Instruction::GOTO,
-    Instruction::GOTO_16,
-    Instruction::GOTO_32,
-    Instruction::PACKED_SWITCH,
-    Instruction::SPARSE_SWITCH,
-    Instruction::CMPL_FLOAT,
-    Instruction::CMPG_FLOAT,
-    Instruction::CMPL_DOUBLE,
-    Instruction::CMPG_DOUBLE,
-    Instruction::CMP_LONG,
-    Instruction::IF_EQ,
-    Instruction::IF_NE,
-    Instruction::IF_LT,
-    Instruction::IF_GE,
-    Instruction::IF_GT,
-    Instruction::IF_LE,
-    Instruction::IF_EQZ,
-    Instruction::IF_NEZ,
-    Instruction::IF_LTZ,
-    Instruction::IF_GEZ,
-    Instruction::IF_GTZ,
-    Instruction::IF_LEZ,
-    Instruction::UNUSED_3E,
-    Instruction::UNUSED_3F,
-    Instruction::UNUSED_40,
-    Instruction::UNUSED_41,
-    Instruction::UNUSED_42,
-    Instruction::UNUSED_43,
-    Instruction::AGET,
-    Instruction::AGET_WIDE,
-    Instruction::AGET_OBJECT,
-    Instruction::AGET_BOOLEAN,
-    Instruction::AGET_BYTE,
-    Instruction::AGET_CHAR,
-    Instruction::AGET_SHORT,
-    Instruction::APUT,
-    Instruction::APUT_WIDE,
-    Instruction::APUT_OBJECT,
-    Instruction::APUT_BOOLEAN,
-    Instruction::APUT_BYTE,
-    Instruction::APUT_CHAR,
-    Instruction::APUT_SHORT,
-    Instruction::IGET,
-    Instruction::IGET_WIDE,
-    Instruction::IGET_OBJECT,
-    Instruction::IGET_BOOLEAN,
-    Instruction::IGET_BYTE,
-    Instruction::IGET_CHAR,
-    Instruction::IGET_SHORT,
-    Instruction::IPUT,
-    Instruction::IPUT_WIDE,
-    Instruction::IPUT_OBJECT,
-    Instruction::IPUT_BOOLEAN,
-    Instruction::IPUT_BYTE,
-    Instruction::IPUT_CHAR,
-    Instruction::IPUT_SHORT,
-    Instruction::SGET,
-    Instruction::SGET_WIDE,
-    Instruction::SGET_OBJECT,
-    Instruction::SGET_BOOLEAN,
-    Instruction::SGET_BYTE,
-    Instruction::SGET_CHAR,
-    Instruction::SGET_SHORT,
-    Instruction::SPUT,
-    Instruction::SPUT_WIDE,
-    Instruction::SPUT_OBJECT,
-    Instruction::SPUT_BOOLEAN,
-    Instruction::SPUT_BYTE,
-    Instruction::SPUT_CHAR,
-    Instruction::SPUT_SHORT,
-    Instruction::INVOKE_VIRTUAL,
-    Instruction::INVOKE_SUPER,
-    Instruction::INVOKE_DIRECT,
-    Instruction::INVOKE_STATIC,
-    Instruction::INVOKE_INTERFACE,
-    Instruction::RETURN_VOID_NO_BARRIER,
-    Instruction::INVOKE_VIRTUAL_RANGE,
-    Instruction::INVOKE_SUPER_RANGE,
-    Instruction::INVOKE_DIRECT_RANGE,
-    Instruction::INVOKE_STATIC_RANGE,
-    Instruction::INVOKE_INTERFACE_RANGE,
-    Instruction::UNUSED_79,
-    Instruction::UNUSED_7A,
-    Instruction::NEG_INT,
-    Instruction::NOT_INT,
-    Instruction::NEG_LONG,
-    Instruction::NOT_LONG,
-    Instruction::NEG_FLOAT,
-    Instruction::NEG_DOUBLE,
-    Instruction::INT_TO_LONG,
-    Instruction::INT_TO_FLOAT,
-    Instruction::INT_TO_DOUBLE,
-    Instruction::LONG_TO_INT,
-    Instruction::LONG_TO_FLOAT,
-    Instruction::LONG_TO_DOUBLE,
-    Instruction::FLOAT_TO_INT,
-    Instruction::FLOAT_TO_LONG,
-    Instruction::FLOAT_TO_DOUBLE,
-    Instruction::DOUBLE_TO_INT,
-    Instruction::DOUBLE_TO_LONG,
-    Instruction::DOUBLE_TO_FLOAT,
-    Instruction::INT_TO_BYTE,
-    Instruction::INT_TO_CHAR,
-    Instruction::INT_TO_SHORT,
-    Instruction::ADD_INT,
-    Instruction::SUB_INT,
-    Instruction::MUL_INT,
-    Instruction::DIV_INT,
-    Instruction::REM_INT,
-    Instruction::AND_INT,
-    Instruction::OR_INT,
-    Instruction::XOR_INT,
-    Instruction::SHL_INT,
-    Instruction::SHR_INT,
-    Instruction::USHR_INT,
-    Instruction::ADD_LONG,
-    Instruction::SUB_LONG,
-    Instruction::MUL_LONG,
-    Instruction::DIV_LONG,
-    Instruction::REM_LONG,
-    Instruction::AND_LONG,
-    Instruction::OR_LONG,
-    Instruction::XOR_LONG,
-    Instruction::SHL_LONG,
-    Instruction::SHR_LONG,
-    Instruction::USHR_LONG,
-    Instruction::ADD_FLOAT,
-    Instruction::SUB_FLOAT,
-    Instruction::MUL_FLOAT,
-    Instruction::DIV_FLOAT,
-    Instruction::REM_FLOAT,
-    Instruction::ADD_DOUBLE,
-    Instruction::SUB_DOUBLE,
-    Instruction::MUL_DOUBLE,
-    Instruction::DIV_DOUBLE,
-    Instruction::REM_DOUBLE,
-    Instruction::ADD_INT_2ADDR,
-    Instruction::SUB_INT_2ADDR,
-    Instruction::MUL_INT_2ADDR,
-    Instruction::DIV_INT_2ADDR,
-    Instruction::REM_INT_2ADDR,
-    Instruction::AND_INT_2ADDR,
-    Instruction::OR_INT_2ADDR,
-    Instruction::XOR_INT_2ADDR,
-    Instruction::SHL_INT_2ADDR,
-    Instruction::SHR_INT_2ADDR,
-    Instruction::USHR_INT_2ADDR,
-    Instruction::ADD_LONG_2ADDR,
-    Instruction::SUB_LONG_2ADDR,
-    Instruction::MUL_LONG_2ADDR,
-    Instruction::DIV_LONG_2ADDR,
-    Instruction::REM_LONG_2ADDR,
-    Instruction::AND_LONG_2ADDR,
-    Instruction::OR_LONG_2ADDR,
-    Instruction::XOR_LONG_2ADDR,
-    Instruction::SHL_LONG_2ADDR,
-    Instruction::SHR_LONG_2ADDR,
-    Instruction::USHR_LONG_2ADDR,
-    Instruction::ADD_FLOAT_2ADDR,
-    Instruction::SUB_FLOAT_2ADDR,
-    Instruction::MUL_FLOAT_2ADDR,
-    Instruction::DIV_FLOAT_2ADDR,
-    Instruction::REM_FLOAT_2ADDR,
-    Instruction::ADD_DOUBLE_2ADDR,
-    Instruction::SUB_DOUBLE_2ADDR,
-    Instruction::MUL_DOUBLE_2ADDR,
-    Instruction::DIV_DOUBLE_2ADDR,
-    Instruction::REM_DOUBLE_2ADDR,
-    Instruction::ADD_INT_LIT16,
-    Instruction::RSUB_INT,
-    Instruction::MUL_INT_LIT16,
-    Instruction::DIV_INT_LIT16,
-    Instruction::REM_INT_LIT16,
-    Instruction::AND_INT_LIT16,
-    Instruction::OR_INT_LIT16,
-    Instruction::XOR_INT_LIT16,
-    Instruction::ADD_INT_LIT8,
-    Instruction::RSUB_INT_LIT8,
-    Instruction::MUL_INT_LIT8,
-    Instruction::DIV_INT_LIT8,
-    Instruction::REM_INT_LIT8,
-    Instruction::AND_INT_LIT8,
-    Instruction::OR_INT_LIT8,
-    Instruction::XOR_INT_LIT8,
-    Instruction::SHL_INT_LIT8,
-    Instruction::SHR_INT_LIT8,
-    Instruction::USHR_INT_LIT8,
-    Instruction::IGET_QUICK,
-    Instruction::IGET_WIDE_QUICK,
-    Instruction::IGET_OBJECT_QUICK,
-    Instruction::IPUT_QUICK,
-    Instruction::IPUT_WIDE_QUICK,
-    Instruction::IPUT_OBJECT_QUICK,
-    Instruction::INVOKE_VIRTUAL_QUICK,
-    Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
-    Instruction::IPUT_BOOLEAN_QUICK,
-    Instruction::IPUT_BYTE_QUICK,
-    Instruction::IPUT_CHAR_QUICK,
-    Instruction::IPUT_SHORT_QUICK,
-    Instruction::IGET_BOOLEAN_QUICK,
-    Instruction::IGET_BYTE_QUICK,
-    Instruction::IGET_CHAR_QUICK,
-    Instruction::IGET_SHORT_QUICK,
-    Instruction::INVOKE_LAMBDA,
-    Instruction::UNUSED_F4,
-    Instruction::CAPTURE_VARIABLE,
-    Instruction::CREATE_LAMBDA,
-    Instruction::LIBERATE_VARIABLE,
-    Instruction::BOX_LAMBDA,
-    Instruction::UNBOX_LAMBDA,
-    Instruction::UNUSED_FA,
-    Instruction::UNUSED_FB,
-    Instruction::UNUSED_FC,
-    Instruction::UNUSED_FD,
-    Instruction::UNUSED_FE,
-    Instruction::UNUSED_FF,
-    // ----- ExtendedMIROpcode -----
-    kMirOpPhi,
-    kMirOpCopy,
-    kMirOpFusedCmplFloat,
-    kMirOpFusedCmpgFloat,
-    kMirOpFusedCmplDouble,
-    kMirOpFusedCmpgDouble,
-    kMirOpFusedCmpLong,
-    kMirOpNop,
-    kMirOpNullCheck,
-    kMirOpRangeCheck,
-    kMirOpDivZeroCheck,
-    kMirOpCheck,
-    kMirOpSelect,
-};
-
-static int kInvokeOpcodes[] = {
-    Instruction::INVOKE_VIRTUAL,
-    Instruction::INVOKE_SUPER,
-    Instruction::INVOKE_DIRECT,
-    Instruction::INVOKE_STATIC,
-    Instruction::INVOKE_INTERFACE,
-    Instruction::INVOKE_VIRTUAL_RANGE,
-    Instruction::INVOKE_SUPER_RANGE,
-    Instruction::INVOKE_DIRECT_RANGE,
-    Instruction::INVOKE_STATIC_RANGE,
-    Instruction::INVOKE_INTERFACE_RANGE,
-    Instruction::INVOKE_VIRTUAL_QUICK,
-    Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
-};
-
-// TODO: Add support for lambda opcodes to the quick compiler.
-static const int kUnsupportedLambdaOpcodes[] = {
-    Instruction::INVOKE_LAMBDA,
-    Instruction::CREATE_LAMBDA,
-    Instruction::BOX_LAMBDA,
-    Instruction::UNBOX_LAMBDA,
-};
-
-// Unsupported opcodes. Null can be used when everything is supported. Size of the lists is
-// recorded below.
-static const int* kUnsupportedOpcodes[] = {
-    // 0 = kNone.
-    kAllOpcodes,
-    // 1 = kArm, unused (will use kThumb2).
-    kAllOpcodes,
-    // 2 = kArm64.
-    kUnsupportedLambdaOpcodes,
-    // 3 = kThumb2.
-    kUnsupportedLambdaOpcodes,
-    // 4 = kX86.
-    kUnsupportedLambdaOpcodes,
-    // 5 = kX86_64.
-    kUnsupportedLambdaOpcodes,
-    // 6 = kMips.
-    kUnsupportedLambdaOpcodes,
-    // 7 = kMips64.
-    kUnsupportedLambdaOpcodes,
-};
-static_assert(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), "kUnsupportedOpcodes unexpected");
-
-// Size of the arrays stored above.
-static const size_t kUnsupportedOpcodesSize[] = {
-    // 0 = kNone.
-    arraysize(kAllOpcodes),
-    // 1 = kArm, unused (will use kThumb2).
-    arraysize(kAllOpcodes),
-    // 2 = kArm64.
-    arraysize(kUnsupportedLambdaOpcodes),
-    // 3 = kThumb2.
-    arraysize(kUnsupportedLambdaOpcodes),
-    // 4 = kX86.
-    arraysize(kUnsupportedLambdaOpcodes),
-    // 5 = kX86_64.
-    arraysize(kUnsupportedLambdaOpcodes),
-    // 6 = kMips.
-    arraysize(kUnsupportedLambdaOpcodes),
-    // 7 = kMips64.
-    arraysize(kUnsupportedLambdaOpcodes),
-};
-static_assert(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
-              "kUnsupportedOpcodesSize unexpected");
-
-// The maximum amount of Dalvik register in a method for which we will start compiling. Tries to
-// avoid an abort when we need to manage more SSA registers than we can.
-static constexpr size_t kMaxAllowedDalvikRegisters = INT16_MAX / 2;
-
-static bool CanCompileShorty(const char* shorty, InstructionSet instruction_set) {
-  const char* supported_types = kSupportedTypes[instruction_set];
-  if (supported_types == nullptr) {
-    // Everything available.
-    return true;
-  }
-
-  uint32_t shorty_size = strlen(shorty);
-  CHECK_GE(shorty_size, 1u);
-
-  for (uint32_t i = 0; i < shorty_size; i++) {
-    if (strchr(supported_types, shorty[i]) == nullptr) {
-      return false;
-    }
-  }
-  return true;
-}
-
-bool QuickCompiler::CanCompileInstruction(const MIR* mir,
-                                          const DexFile& dex_file,
-                                          CompilationUnit* cu) const {
-  switch (mir->dalvikInsn.opcode) {
-    // Quick compiler won't support new instruction semantics to invoke-super into an interface
-    // method
-    case Instruction::INVOKE_SUPER:  // Fall-through
-    case Instruction::INVOKE_SUPER_RANGE: {
-      DCHECK(mir->dalvikInsn.IsInvoke());
-      uint32_t invoke_method_idx = mir->dalvikInsn.vB;
-      const DexFile::MethodId& method_id = dex_file.GetMethodId(invoke_method_idx);
-      const DexFile::ClassDef* class_def = dex_file.FindClassDef(method_id.class_idx_);
-      // False if we are an interface i.e. !(java_access_flags & kAccInterface)
-      return class_def != nullptr && ((class_def->GetJavaAccessFlags() & kAccInterface) == 0);
-    }
-    case Instruction::NEW_INSTANCE: {
-      uint32_t type_idx = mir->dalvikInsn.vB;
-      if (cu->compiler_driver->IsStringTypeIndex(type_idx, cu->dex_file)) {
-        return false;
-      }
-      return true;
-    }
-    default:
-      return true;
-  }
-}
-
-// Skip the method that we do not support currently.
-bool QuickCompiler::CanCompileMethod(uint32_t method_idx,
-                                     const DexFile& dex_file,
-                                     CompilationUnit* cu) const {
-  // This is a limitation in mir_graph. See MirGraph::SetNumSSARegs.
-  if (cu->mir_graph->GetNumOfCodeAndTempVRs() > kMaxAllowedDalvikRegisters) {
-    VLOG(compiler) << "Too many dalvik registers : " << cu->mir_graph->GetNumOfCodeAndTempVRs();
-    return false;
-  }
-
-  // Since the quick compiler doesn't (and never will) support default methods we always need to
-  // scan opcodes.
-
-  // Check if we can compile the prototype.
-  const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
-  if (!CanCompileShorty(shorty, cu->instruction_set)) {
-    VLOG(compiler) << "Unsupported shorty : " << shorty;
-    return false;
-  }
-
-  const int *unsupport_list = kUnsupportedOpcodes[cu->instruction_set];
-  int unsupport_list_size = kUnsupportedOpcodesSize[cu->instruction_set];
-
-  for (unsigned int idx = 0; idx < cu->mir_graph->GetNumBlocks(); idx++) {
-    BasicBlock* bb = cu->mir_graph->GetBasicBlock(idx);
-    if (bb == nullptr) continue;
-    if (bb->block_type == kDead) continue;
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      int opcode = mir->dalvikInsn.opcode;
-      // Check if we support the byte code.
-      if (std::find(unsupport_list, unsupport_list + unsupport_list_size, opcode)
-          != unsupport_list + unsupport_list_size) {
-        if (!MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
-          VLOG(compiler) << "Unsupported dalvik byte code : "
-              << mir->dalvikInsn.opcode;
-        } else {
-          VLOG(compiler) << "Unsupported extended MIR opcode : "
-              << MIRGraph::extended_mir_op_names_[opcode - kMirOpFirst];
-        }
-        return false;
-      } else if (!CanCompileInstruction(mir, dex_file, cu)) {
-        VLOG(compiler) << "Cannot compile dalvik opcode : " << mir->dalvikInsn.opcode;
-        return false;
-      }
-      // Check if it invokes a prototype that we cannot support.
-      if (std::find(kInvokeOpcodes, kInvokeOpcodes + arraysize(kInvokeOpcodes), opcode)
-          != kInvokeOpcodes + arraysize(kInvokeOpcodes)) {
-        uint32_t invoke_method_idx = mir->dalvikInsn.vB;
-        const char* invoke_method_shorty = dex_file.GetMethodShorty(
-            dex_file.GetMethodId(invoke_method_idx));
-        if (!CanCompileShorty(invoke_method_shorty, cu->instruction_set)) {
-          VLOG(compiler) << "Unsupported to invoke '"
-              << PrettyMethod(invoke_method_idx, dex_file)
-              << "' with shorty : " << invoke_method_shorty;
-          return false;
-        }
-      }
-    }
-  }
-  return true;
-}
-
-void QuickCompiler::InitCompilationUnit(CompilationUnit& cu) const {
-  // Disable optimizations according to instruction set.
-  cu.disable_opt |= kDisabledOptimizationsPerISA[cu.instruction_set];
-  if (Runtime::Current()->UseJit()) {
-    // Disable these optimizations for JIT until quickened byte codes are done being implemented.
-    // TODO: Find a cleaner way to do this.
-    cu.disable_opt |= 1u << kLocalValueNumbering;
-  }
-}
-
-void QuickCompiler::Init() {
-  CHECK(GetCompilerDriver()->GetCompilerContext() == nullptr);
-}
-
-void QuickCompiler::UnInit() const {
-  CHECK(GetCompilerDriver()->GetCompilerContext() == nullptr);
-}
-
-/* Default optimizer/debug setting for the compiler. */
-static uint32_t kCompilerOptimizerDisableFlags = 0 |  // Disable specific optimizations
-  // (1 << kLoadStoreElimination) |
-  // (1 << kLoadHoisting) |
-  // (1 << kSuppressLoads) |
-  // (1 << kNullCheckElimination) |
-  // (1 << kClassInitCheckElimination) |
-  // (1 << kGlobalValueNumbering) |
-  // (1 << kGvnDeadCodeElimination) |
-  // (1 << kLocalValueNumbering) |
-  // (1 << kPromoteRegs) |
-  // (1 << kTrackLiveTemps) |
-  // (1 << kSafeOptimizations) |
-  // (1 << kBBOpt) |
-  // (1 << kSuspendCheckElimination) |
-  // (1 << kMatch) |
-  // (1 << kPromoteCompilerTemps) |
-  // (1 << kSuppressExceptionEdges) |
-  // (1 << kSuppressMethodInlining) |
-  0;
-
-static uint32_t kCompilerDebugFlags = 0 |     // Enable debug/testing modes
-  // (1 << kDebugDisplayMissingTargets) |
-  // (1 << kDebugVerbose) |
-  // (1 << kDebugDumpCFG) |
-  // (1 << kDebugSlowFieldPath) |
-  // (1 << kDebugSlowInvokePath) |
-  // (1 << kDebugSlowStringPath) |
-  // (1 << kDebugSlowestFieldPath) |
-  // (1 << kDebugSlowestStringPath) |
-  // (1 << kDebugExerciseResolveMethod) |
-  // (1 << kDebugVerifyDataflow) |
-  // (1 << kDebugShowMemoryUsage) |
-  // (1 << kDebugShowNops) |
-  // (1 << kDebugCountOpcodes) |
-  // (1 << kDebugDumpCheckStats) |
-  // (1 << kDebugShowSummaryMemoryUsage) |
-  // (1 << kDebugShowFilterStats) |
-  // (1 << kDebugTimings) |
-  // (1 << kDebugCodegenDump) |
-  0;
-
-CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item,
-                                       uint32_t access_flags,
-                                       InvokeType invoke_type,
-                                       uint16_t class_def_idx,
-                                       uint32_t method_idx,
-                                       jobject class_loader,
-                                       const DexFile& dex_file,
-                                       Handle<mirror::DexCache> dex_cache) const {
-  if (kPoisonHeapReferences) {
-    VLOG(compiler) << "Skipping method : " << PrettyMethod(method_idx, dex_file)
-                   << "  Reason = Quick does not support heap poisoning.";
-    return nullptr;
-  }
-
-  if (kEmitCompilerReadBarrier) {
-    VLOG(compiler) << "Skipping method : " << PrettyMethod(method_idx, dex_file)
-                   << "  Reason = Quick does not support read barrier.";
-    return nullptr;
-  }
-
-  // TODO: check method fingerprint here to determine appropriate backend type.  Until then, use
-  // build default.
-  CompilerDriver* driver = GetCompilerDriver();
-
-  VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
-  if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
-    return nullptr;
-  }
-
-  DCHECK(driver->GetCompilerOptions().IsCompilationEnabled());
-  DCHECK(!driver->GetVerifiedMethod(&dex_file, method_idx)->HasRuntimeThrow());
-
-  Runtime* const runtime = Runtime::Current();
-  ClassLinker* const class_linker = runtime->GetClassLinker();
-  InstructionSet instruction_set = driver->GetInstructionSet();
-  if (instruction_set == kArm) {
-    instruction_set = kThumb2;
-  }
-  CompilationUnit cu(runtime->GetArenaPool(), instruction_set, driver, class_linker);
-  cu.dex_file = &dex_file;
-  cu.class_def_idx = class_def_idx;
-  cu.method_idx = method_idx;
-  cu.access_flags = access_flags;
-  cu.invoke_type = invoke_type;
-  cu.shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
-
-  CHECK((cu.instruction_set == kThumb2) ||
-        (cu.instruction_set == kArm64) ||
-        (cu.instruction_set == kX86) ||
-        (cu.instruction_set == kX86_64) ||
-        (cu.instruction_set == kMips) ||
-        (cu.instruction_set == kMips64));
-
-  // TODO: set this from command line
-  constexpr bool compiler_flip_match = false;
-  const std::string compiler_method_match = "";
-
-  bool use_match = !compiler_method_match.empty();
-  bool match = use_match && (compiler_flip_match ^
-      (PrettyMethod(method_idx, dex_file).find(compiler_method_match) != std::string::npos));
-  if (!use_match || match) {
-    cu.disable_opt = kCompilerOptimizerDisableFlags;
-    cu.enable_debug = kCompilerDebugFlags;
-    cu.verbose = VLOG_IS_ON(compiler) ||
-        (cu.enable_debug & (1 << kDebugVerbose));
-  }
-
-  if (driver->GetCompilerOptions().HasVerboseMethods()) {
-    cu.verbose = driver->GetCompilerOptions().IsVerboseMethod(PrettyMethod(method_idx, dex_file));
-  }
-
-  if (cu.verbose) {
-    cu.enable_debug |= (1 << kDebugCodegenDump);
-  }
-
-  /*
-   * TODO: rework handling of optimization and debug flags.  Should we split out
-   * MIR and backend flags?  Need command-line setting as well.
-   */
-
-  InitCompilationUnit(cu);
-
-  cu.StartTimingSplit("BuildMIRGraph");
-  cu.mir_graph.reset(new MIRGraph(&cu, &cu.arena));
-
-  /*
-   * After creation of the MIR graph, also create the code generator.
-   * The reason we do this is that optimizations on the MIR graph may need to get information
-   * that is only available if a CG exists.
-   */
-  cu.cg.reset(GetCodeGenerator(&cu, nullptr));
-
-  /* Gathering opcode stats? */
-  if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
-    cu.mir_graph->EnableOpcodeCounting();
-  }
-
-  /* Build the raw MIR graph */
-  cu.mir_graph->InlineMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx,
-                             class_loader, dex_file, dex_cache);
-
-  if (!CanCompileMethod(method_idx, dex_file, &cu)) {
-    VLOG(compiler)  << cu.instruction_set << ": Cannot compile method : "
-        << PrettyMethod(method_idx, dex_file);
-    cu.EndTiming();
-    return nullptr;
-  }
-
-  cu.NewTimingSplit("MIROpt:CheckFilters");
-  std::string skip_message;
-  if (cu.mir_graph->SkipCompilation(&skip_message)) {
-    VLOG(compiler) << cu.instruction_set << ": Skipping method : "
-        << PrettyMethod(method_idx, dex_file) << "  Reason = " << skip_message;
-    cu.EndTiming();
-    return nullptr;
-  }
-
-  /* Create the pass driver and launch it */
-  PassDriverMEOpts pass_driver(GetPreOptPassManager(), GetPostOptPassManager(), &cu);
-  pass_driver.Launch();
-
-  if (cu.enable_debug & (1 << kDebugDumpCheckStats)) {
-    cu.mir_graph->DumpCheckStats();
-  }
-
-  if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
-    cu.mir_graph->ShowOpcodeStats();
-  }
-
-  /* Reassociate sreg names with original Dalvik vreg names. */
-  cu.mir_graph->RemapRegLocations();
-
-  /* Free Arenas from the cu.arena_stack for reuse by the cu.arena in the codegen. */
-  if (cu.enable_debug & (1 << kDebugShowMemoryUsage)) {
-    if (cu.arena_stack.PeakBytesAllocated() > 1 * 1024 * 1024) {
-      MemStats stack_stats(cu.arena_stack.GetPeakStats());
-      LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(stack_stats);
-    }
-  }
-  cu.arena_stack.Reset();
-
-  CompiledMethod* result = nullptr;
-
-  if (cu.mir_graph->PuntToInterpreter()) {
-    VLOG(compiler) << cu.instruction_set << ": Punted method to interpreter: "
-        << PrettyMethod(method_idx, dex_file);
-    cu.EndTiming();
-    return nullptr;
-  }
-
-  cu.cg->Materialize();
-
-  cu.NewTimingSplit("Dedupe");  /* deduping takes up the vast majority of time in GetCompiledMethod(). */
-  result = cu.cg->GetCompiledMethod();
-  cu.NewTimingSplit("Cleanup");
-
-  if (result) {
-    VLOG(compiler) << cu.instruction_set << ": Compiled " << PrettyMethod(method_idx, dex_file);
-  } else {
-    VLOG(compiler) << cu.instruction_set << ": Deferred " << PrettyMethod(method_idx, dex_file);
-  }
-
-  if (cu.enable_debug & (1 << kDebugShowMemoryUsage)) {
-    if (cu.arena.BytesAllocated() > (1 * 1024 *1024)) {
-      MemStats mem_stats(cu.arena.GetMemStats());
-      LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats);
-    }
-  }
-
-  if (cu.enable_debug & (1 << kDebugShowSummaryMemoryUsage)) {
-    LOG(INFO) << "MEMINFO " << cu.arena.BytesAllocated() << " " << cu.mir_graph->GetNumBlocks()
-                    << " " << PrettyMethod(method_idx, dex_file);
-  }
-
-  cu.EndTiming();
-  driver->GetTimingsLogger()->AddLogger(cu.timings);
-  return result;
-}
-
-CompiledMethod* QuickCompiler::JniCompile(uint32_t access_flags,
-                                          uint32_t method_idx,
-                                          const DexFile& dex_file) const {
-  return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
-}
-
-uintptr_t QuickCompiler::GetEntryPointOf(ArtMethod* method) const {
-  return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
-      InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
-}
-
-Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu,
-                                         void* compilation_unit ATTRIBUTE_UNUSED) {
-  Mir2Lir* mir_to_lir = nullptr;
-  switch (cu->instruction_set) {
-#ifdef ART_ENABLE_CODEGEN_arm
-    case kThumb2:
-      mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
-      break;
-#endif  // ART_ENABLE_CODEGEN_arm
-#ifdef ART_ENABLE_CODEGEN_arm64
-    case kArm64:
-      mir_to_lir = Arm64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
-      break;
-#endif  // ART_ENABLE_CODEGEN_arm64
-#if defined(ART_ENABLE_CODEGEN_mips) || defined(ART_ENABLE_CODEGEN_mips64)
-      // Intentional 2 level ifdef. Want to fail on mips64 if it is not enabled, even if mips is
-      // and vice versa.
-#ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
-      // Fall-through.
-#endif  // ART_ENABLE_CODEGEN_mips
-#ifdef ART_ENABLE_CODEGEN_mips64
-    case kMips64:
-#endif  // ART_ENABLE_CODEGEN_mips64
-      mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
-      break;
-#endif  // ART_ENABLE_CODEGEN_mips || ART_ENABLE_CODEGEN_mips64
-#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
-      // Intentional 2 level ifdef. Want to fail on x86_64 if it is not enabled, even if x86 is
-      // and vice versa.
-#ifdef ART_ENABLE_CODEGEN_x86
-    case kX86:
-      // Fall-through.
-#endif  // ART_ENABLE_CODEGEN_x86
-#ifdef ART_ENABLE_CODEGEN_x86_64
-    case kX86_64:
-#endif  // ART_ENABLE_CODEGEN_x86_64
-      mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
-      break;
-#endif  // ART_ENABLE_CODEGEN_x86 || ART_ENABLE_CODEGEN_x86_64
-    default:
-      LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
-  }
-
-  /* The number of compiler temporaries depends on backend so set it up now if possible */
-  if (mir_to_lir) {
-    size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
-    bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
-    CHECK(set_max);
-  }
-  return mir_to_lir;
-}
-
-QuickCompiler::QuickCompiler(CompilerDriver* driver) : Compiler(driver, 100) {
-  const auto& compiler_options = driver->GetCompilerOptions();
-  auto* pass_manager_options = compiler_options.GetPassManagerOptions();
-  pre_opt_pass_manager_.reset(new PassManager(*pass_manager_options));
-  CHECK(pre_opt_pass_manager_.get() != nullptr);
-  PassDriverMEOpts::SetupPasses(pre_opt_pass_manager_.get());
-  pre_opt_pass_manager_->CreateDefaultPassList();
-  if (pass_manager_options->GetPrintPassOptions()) {
-    PassDriverMEOpts::PrintPassOptions(pre_opt_pass_manager_.get());
-  }
-  // TODO: Different options for pre vs post opts?
-  post_opt_pass_manager_.reset(new PassManager(PassManagerOptions()));
-  CHECK(post_opt_pass_manager_.get() != nullptr);
-  PassDriverMEPostOpt::SetupPasses(post_opt_pass_manager_.get());
-  post_opt_pass_manager_->CreateDefaultPassList();
-  if (pass_manager_options->GetPrintPassOptions()) {
-    PassDriverMEPostOpt::PrintPassOptions(post_opt_pass_manager_.get());
-  }
-}
-
-QuickCompiler::~QuickCompiler() {
-}
-
-Compiler* CreateQuickCompiler(CompilerDriver* driver) {
-  return QuickCompiler::Create(driver);
-}
-
-Compiler* QuickCompiler::Create(CompilerDriver* driver) {
-  return new QuickCompiler(driver);
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
deleted file mode 100644
index f32cf86..0000000
--- a/compiler/dex/quick/quick_compiler.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_QUICK_COMPILER_H_
-#define ART_COMPILER_DEX_QUICK_QUICK_COMPILER_H_
-
-#include "compiler.h"
-#include "dex/mir_graph.h"
-
-namespace art {
-
-namespace mirror {
-class DexCache;
-}
-
-class Compiler;
-class CompilerDriver;
-class Mir2Lir;
-class PassManager;
-
-class QuickCompiler : public Compiler {
- public:
-  virtual ~QuickCompiler();
-
-  void Init() OVERRIDE;
-
-  void UnInit() const OVERRIDE;
-
-  bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const
-      OVERRIDE;
-
-  CompiledMethod* Compile(const DexFile::CodeItem* code_item,
-                          uint32_t access_flags,
-                          InvokeType invoke_type,
-                          uint16_t class_def_idx,
-                          uint32_t method_idx,
-                          jobject class_loader,
-                          const DexFile& dex_file,
-                          Handle<mirror::DexCache> dex_cache) const OVERRIDE;
-
-  CompiledMethod* JniCompile(uint32_t access_flags,
-                             uint32_t method_idx,
-                             const DexFile& dex_file) const OVERRIDE;
-
-  uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
-  static Mir2Lir* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit);
-
-  void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE;
-
-  static Compiler* Create(CompilerDriver* driver);
-
-  const PassManager* GetPreOptPassManager() const {
-    return pre_opt_pass_manager_.get();
-  }
-  const PassManager* GetPostOptPassManager() const {
-    return post_opt_pass_manager_.get();
-  }
-
- protected:
-  explicit QuickCompiler(CompilerDriver* driver);
-
- private:
-  bool CanCompileInstruction(const MIR* mir, const DexFile& dex_file, CompilationUnit* cu) const;
-
-  std::unique_ptr<PassManager> pre_opt_pass_manager_;
-  std::unique_ptr<PassManager> post_opt_pass_manager_;
-  DISALLOW_COPY_AND_ASSIGN(QuickCompiler);
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_QUICK_COMPILER_H_
diff --git a/compiler/dex/quick/quick_compiler_factory.h b/compiler/dex/quick/quick_compiler_factory.h
deleted file mode 100644
index 31ee1cf..0000000
--- a/compiler/dex/quick/quick_compiler_factory.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_QUICK_COMPILER_FACTORY_H_
-#define ART_COMPILER_DEX_QUICK_QUICK_COMPILER_FACTORY_H_
-
-namespace art {
-
-class Compiler;
-class CompilerDriver;
-
-Compiler* CreateQuickCompiler(CompilerDriver* driver);
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_QUICK_COMPILER_FACTORY_H_
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
deleted file mode 100644
index dceb118..0000000
--- a/compiler/dex/quick/ralloc_util.cc
+++ /dev/null
@@ -1,1560 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains register alloction support. */
-
-#include "mir_to_lir-inl.h"
-
-#include "base/stringprintf.h"
-#include "dex/compiler_ir.h"
-#include "dex/dataflow_iterator-inl.h"
-#include "dex/mir_graph.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "utils/dex_cache_arrays_layout-inl.h"
-
-namespace art {
-
-/*
- * Free all allocated temps in the temp pools.  Note that this does
- * not affect the "liveness" of a temp register, which will stay
- * live until it is either explicitly killed or reallocated.
- */
-void Mir2Lir::ResetRegPool() {
-  for (RegisterInfo* info : tempreg_info_) {
-    info->MarkFree();
-  }
-  // Reset temp tracking sanity check.
-  if (kIsDebugBuild) {
-    live_sreg_ = INVALID_SREG;
-  }
-}
-
-Mir2Lir::RegisterInfo::RegisterInfo(RegStorage r, const ResourceMask& mask)
-  : reg_(r), is_temp_(false), wide_value_(false), dirty_(false), aliased_(false), partner_(r),
-    s_reg_(INVALID_SREG), def_use_mask_(mask), master_(this), def_start_(nullptr),
-    def_end_(nullptr), alias_chain_(nullptr) {
-  switch (r.StorageSize()) {
-    case 0: storage_mask_ = 0xffffffff; break;
-    case 4: storage_mask_ = 0x00000001; break;
-    case 8: storage_mask_ = 0x00000003; break;
-    case 16: storage_mask_ = 0x0000000f; break;
-    case 32: storage_mask_ = 0x000000ff; break;
-    case 64: storage_mask_ = 0x0000ffff; break;
-    case 128: storage_mask_ = 0xffffffff; break;
-  }
-  used_storage_ = r.Valid() ? ~storage_mask_ : storage_mask_;
-  liveness_ = used_storage_;
-}
-
-Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
-                                    const ArrayRef<const RegStorage>& core_regs,
-                                    const ArrayRef<const RegStorage>& core64_regs,
-                                    const ArrayRef<const RegStorage>& sp_regs,
-                                    const ArrayRef<const RegStorage>& dp_regs,
-                                    const ArrayRef<const RegStorage>& reserved_regs,
-                                    const ArrayRef<const RegStorage>& reserved64_regs,
-                                    const ArrayRef<const RegStorage>& core_temps,
-                                    const ArrayRef<const RegStorage>& core64_temps,
-                                    const ArrayRef<const RegStorage>& sp_temps,
-                                    const ArrayRef<const RegStorage>& dp_temps) :
-    core_regs_(arena->Adapter()), next_core_reg_(0),
-    core64_regs_(arena->Adapter()), next_core64_reg_(0),
-    sp_regs_(arena->Adapter()), next_sp_reg_(0),
-    dp_regs_(arena->Adapter()), next_dp_reg_(0), m2l_(m2l)  {
-  // Initialize the fast lookup map.
-  m2l_->reginfo_map_.clear();
-  m2l_->reginfo_map_.resize(RegStorage::kMaxRegs, nullptr);
-
-  // Construct the register pool.
-  core_regs_.reserve(core_regs.size());
-  for (const RegStorage& reg : core_regs) {
-    RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
-    m2l_->reginfo_map_[reg.GetReg()] = info;
-    core_regs_.push_back(info);
-  }
-  core64_regs_.reserve(core64_regs.size());
-  for (const RegStorage& reg : core64_regs) {
-    RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
-    m2l_->reginfo_map_[reg.GetReg()] = info;
-    core64_regs_.push_back(info);
-  }
-  sp_regs_.reserve(sp_regs.size());
-  for (const RegStorage& reg : sp_regs) {
-    RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
-    m2l_->reginfo_map_[reg.GetReg()] = info;
-    sp_regs_.push_back(info);
-  }
-  dp_regs_.reserve(dp_regs.size());
-  for (const RegStorage& reg : dp_regs) {
-    RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg));
-    m2l_->reginfo_map_[reg.GetReg()] = info;
-    dp_regs_.push_back(info);
-  }
-
-  // Keep special registers from being allocated.
-  for (RegStorage reg : reserved_regs) {
-    m2l_->MarkInUse(reg);
-  }
-  for (RegStorage reg : reserved64_regs) {
-    m2l_->MarkInUse(reg);
-  }
-
-  // Mark temp regs - all others not in use can be used for promotion
-  for (RegStorage reg : core_temps) {
-    m2l_->MarkTemp(reg);
-  }
-  for (RegStorage reg : core64_temps) {
-    m2l_->MarkTemp(reg);
-  }
-  for (RegStorage reg : sp_temps) {
-    m2l_->MarkTemp(reg);
-  }
-  for (RegStorage reg : dp_temps) {
-    m2l_->MarkTemp(reg);
-  }
-
-  // Add an entry for InvalidReg with zero'd mask.
-  RegisterInfo* invalid_reg = new (arena) RegisterInfo(RegStorage::InvalidReg(), kEncodeNone);
-  m2l_->reginfo_map_[RegStorage::InvalidReg().GetReg()] = invalid_reg;
-
-  // Existence of core64 registers implies wide references.
-  if (core64_regs_.size() != 0) {
-    ref_regs_ = &core64_regs_;
-    next_ref_reg_ = &next_core64_reg_;
-  } else {
-    ref_regs_ = &core_regs_;
-    next_ref_reg_ = &next_core_reg_;
-  }
-}
-
-void Mir2Lir::DumpRegPool(ArenaVector<RegisterInfo*>* regs) {
-  LOG(INFO) << "================================================";
-  for (RegisterInfo* info : *regs) {
-    LOG(INFO) << StringPrintf(
-        "R[%d:%d:%c]: T:%d, U:%d, W:%d, p:%d, LV:%d, D:%d, SR:%d, DEF:%d",
-        info->GetReg().GetReg(), info->GetReg().GetRegNum(), info->GetReg().IsFloat() ?  'f' : 'c',
-        info->IsTemp(), info->InUse(), info->IsWide(), info->Partner().GetReg(), info->IsLive(),
-        info->IsDirty(), info->SReg(), info->DefStart() != nullptr);
-  }
-  LOG(INFO) << "================================================";
-}
-
-void Mir2Lir::DumpCoreRegPool() {
-  DumpRegPool(&reg_pool_->core_regs_);
-  DumpRegPool(&reg_pool_->core64_regs_);
-}
-
-void Mir2Lir::DumpFpRegPool() {
-  DumpRegPool(&reg_pool_->sp_regs_);
-  DumpRegPool(&reg_pool_->dp_regs_);
-}
-
-void Mir2Lir::DumpRegPools() {
-  LOG(INFO) << "Core registers";
-  DumpCoreRegPool();
-  LOG(INFO) << "FP registers";
-  DumpFpRegPool();
-}
-
-void Mir2Lir::Clobber(RegStorage reg) {
-  if (UNLIKELY(reg.IsPair())) {
-    DCHECK(!GetRegInfo(reg.GetLow())->IsAliased());
-    Clobber(reg.GetLow());
-    DCHECK(!GetRegInfo(reg.GetHigh())->IsAliased());
-    Clobber(reg.GetHigh());
-  } else {
-    RegisterInfo* info = GetRegInfo(reg);
-    if (info->IsTemp() && !info->IsDead()) {
-      if (info->GetReg().NotExactlyEquals(info->Partner())) {
-        ClobberBody(GetRegInfo(info->Partner()));
-      }
-      ClobberBody(info);
-      if (info->IsAliased()) {
-        ClobberAliases(info, info->StorageMask());
-      } else {
-        RegisterInfo* master = info->Master();
-        if (info != master) {
-          ClobberBody(info->Master());
-          ClobberAliases(info->Master(), info->StorageMask());
-        }
-      }
-    }
-  }
-}
-
-void Mir2Lir::ClobberAliases(RegisterInfo* info, uint32_t clobber_mask) {
-  for (RegisterInfo* alias = info->GetAliasChain(); alias != nullptr;
-       alias = alias->GetAliasChain()) {
-    DCHECK(!alias->IsAliased());  // Only the master should be marked as alised.
-    // Only clobber if we have overlap.
-    if ((alias->StorageMask() & clobber_mask) != 0) {
-      ClobberBody(alias);
-    }
-  }
-}
-
-/*
- * Break the association between a Dalvik vreg and a physical temp register of either register
- * class.
- * TODO: Ideally, the public version of this code should not exist.  Besides its local usage
- * in the register utilities, is is also used by code gen routines to work around a deficiency in
- * local register allocation, which fails to distinguish between the "in" and "out" identities
- * of Dalvik vregs.  This can result in useless register copies when the same Dalvik vreg
- * is used both as the source and destination register of an operation in which the type
- * changes (for example: INT_TO_FLOAT v1, v1).  Revisit when improved register allocation is
- * addressed.
- */
-void Mir2Lir::ClobberSReg(int s_reg) {
-  if (s_reg != INVALID_SREG) {
-    if (kIsDebugBuild && s_reg == live_sreg_) {
-      live_sreg_ = INVALID_SREG;
-    }
-    for (RegisterInfo* info : tempreg_info_) {
-      if (info->SReg() == s_reg) {
-        if (info->GetReg().NotExactlyEquals(info->Partner())) {
-          // Dealing with a pair - clobber the other half.
-          DCHECK(!info->IsAliased());
-          ClobberBody(GetRegInfo(info->Partner()));
-        }
-        ClobberBody(info);
-        if (info->IsAliased()) {
-          ClobberAliases(info, info->StorageMask());
-        }
-      }
-    }
-  }
-}
-
-/*
- * SSA names associated with the initial definitions of Dalvik
- * registers are the same as the Dalvik register number (and
- * thus take the same position in the promotion_map.  However,
- * the special Method* and compiler temp resisters use negative
- * v_reg numbers to distinguish them and can have an arbitrary
- * ssa name (above the last original Dalvik register).  This function
- * maps SSA names to positions in the promotion_map array.
- */
-int Mir2Lir::SRegToPMap(int s_reg) {
-  DCHECK_LT(s_reg, mir_graph_->GetNumSSARegs());
-  DCHECK_GE(s_reg, 0);
-  int v_reg = mir_graph_->SRegToVReg(s_reg);
-  return v_reg;
-}
-
-// TODO: refactor following Alloc/Record routines - much commonality.
-void Mir2Lir::RecordCorePromotion(RegStorage reg, int s_reg) {
-  int p_map_idx = SRegToPMap(s_reg);
-  int v_reg = mir_graph_->SRegToVReg(s_reg);
-  int reg_num = reg.GetRegNum();
-  GetRegInfo(reg)->MarkInUse();
-  core_spill_mask_ |= (1 << reg_num);
-  // Include reg for later sort
-  core_vmap_table_.push_back(reg_num << VREG_NUM_WIDTH | (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
-  num_core_spills_++;
-  promotion_map_[p_map_idx].core_location = kLocPhysReg;
-  promotion_map_[p_map_idx].core_reg = reg_num;
-}
-
-/* Reserve a callee-save register.  Return InvalidReg if none available */
-RegStorage Mir2Lir::AllocPreservedCoreReg(int s_reg) {
-  RegStorage res;
-  /*
-   * Note: it really doesn't matter much whether we allocate from the core or core64
-   * pool for 64-bit targets - but for some targets it does matter whether allocations
-   * happens from the single or double pool.  This entire section of code could stand
-   * a good refactoring.
-   */
-  for (RegisterInfo* info : reg_pool_->core_regs_) {
-    if (!info->IsTemp() && !info->InUse()) {
-      res = info->GetReg();
-      RecordCorePromotion(res, s_reg);
-      break;
-    }
-  }
-  return res;
-}
-
-void Mir2Lir::RecordFpPromotion(RegStorage reg, int s_reg) {
-  DCHECK_NE(cu_->instruction_set, kThumb2);
-  int p_map_idx = SRegToPMap(s_reg);
-  int v_reg = mir_graph_->SRegToVReg(s_reg);
-  int reg_num = reg.GetRegNum();
-  GetRegInfo(reg)->MarkInUse();
-  fp_spill_mask_ |= (1 << reg_num);
-  // Include reg for later sort
-  fp_vmap_table_.push_back(reg_num << VREG_NUM_WIDTH | (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
-  num_fp_spills_++;
-  promotion_map_[p_map_idx].fp_location = kLocPhysReg;
-  promotion_map_[p_map_idx].fp_reg = reg.GetReg();
-}
-
-// Reserve a callee-save floating point.
-RegStorage Mir2Lir::AllocPreservedFpReg(int s_reg) {
-  /*
-   * For targets other than Thumb2, it doesn't matter whether we allocate from
-   * the sp_regs_ or dp_regs_ pool.  Some refactoring is in order here.
-   */
-  DCHECK_NE(cu_->instruction_set, kThumb2);
-  RegStorage res;
-  for (RegisterInfo* info : reg_pool_->sp_regs_) {
-    if (!info->IsTemp() && !info->InUse()) {
-      res = info->GetReg();
-      RecordFpPromotion(res, s_reg);
-      break;
-    }
-  }
-  return res;
-}
-
-// TODO: this is Thumb2 only.  Remove when DoPromotion refactored.
-RegStorage Mir2Lir::AllocPreservedDouble(int s_reg ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble";
-  UNREACHABLE();
-}
-
-// TODO: this is Thumb2 only.  Remove when DoPromotion refactored.
-RegStorage Mir2Lir::AllocPreservedSingle(int s_reg ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle";
-  UNREACHABLE();
-}
-
-
-RegStorage Mir2Lir::AllocTempBody(ArenaVector<RegisterInfo*>& regs, int* next_temp, bool required) {
-  int num_regs = regs.size();
-  int next = *next_temp;
-  for (int i = 0; i< num_regs; i++) {
-    if (next >= num_regs) {
-      next = 0;
-    }
-    RegisterInfo* info = regs[next];
-    // Try to allocate a register that doesn't hold a live value.
-    if (info->IsTemp() && !info->InUse() && info->IsDead()) {
-      // If it's wide, split it up.
-      if (info->IsWide()) {
-        // If the pair was associated with a wide value, unmark the partner as well.
-        if (info->SReg() != INVALID_SREG) {
-          RegisterInfo* partner = GetRegInfo(info->Partner());
-          DCHECK_EQ(info->GetReg().GetRegNum(), partner->Partner().GetRegNum());
-          DCHECK(partner->IsWide());
-          partner->SetIsWide(false);
-        }
-        info->SetIsWide(false);
-      }
-      Clobber(info->GetReg());
-      info->MarkInUse();
-      *next_temp = next + 1;
-      return info->GetReg();
-    }
-    next++;
-  }
-  next = *next_temp;
-  // No free non-live regs.  Anything we can kill?
-  for (int i = 0; i< num_regs; i++) {
-    if (next >= num_regs) {
-      next = 0;
-    }
-    RegisterInfo* info = regs[next];
-    if (info->IsTemp() && !info->InUse()) {
-      // Got one.  Kill it.
-      ClobberSReg(info->SReg());
-      Clobber(info->GetReg());
-      info->MarkInUse();
-      if (info->IsWide()) {
-        RegisterInfo* partner = GetRegInfo(info->Partner());
-        DCHECK_EQ(info->GetReg().GetRegNum(), partner->Partner().GetRegNum());
-        DCHECK(partner->IsWide());
-        info->SetIsWide(false);
-        partner->SetIsWide(false);
-      }
-      *next_temp = next + 1;
-      return info->GetReg();
-    }
-    next++;
-  }
-  if (required) {
-    CodegenDump();
-    DumpRegPools();
-    LOG(FATAL) << "No free temp registers";
-  }
-  return RegStorage::InvalidReg();  // No register available
-}
-
-RegStorage Mir2Lir::AllocTemp(bool required) {
-  return AllocTempBody(reg_pool_->core_regs_, &reg_pool_->next_core_reg_, required);
-}
-
-RegStorage Mir2Lir::AllocTempWide(bool required) {
-  RegStorage res;
-  if (reg_pool_->core64_regs_.size() != 0) {
-    res = AllocTempBody(reg_pool_->core64_regs_, &reg_pool_->next_core64_reg_, required);
-  } else {
-    RegStorage low_reg = AllocTemp();
-    RegStorage high_reg = AllocTemp();
-    res = RegStorage::MakeRegPair(low_reg, high_reg);
-  }
-  if (required) {
-    CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kIgnoreRef, FPCheck::kCheckNotFP);
-  }
-  return res;
-}
-
-RegStorage Mir2Lir::AllocTempRef(bool required) {
-  RegStorage res = AllocTempBody(*reg_pool_->ref_regs_, reg_pool_->next_ref_reg_, required);
-  if (required) {
-    DCHECK(!res.IsPair());
-    CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckRef, FPCheck::kCheckNotFP);
-  }
-  return res;
-}
-
-RegStorage Mir2Lir::AllocTempSingle(bool required) {
-  RegStorage res = AllocTempBody(reg_pool_->sp_regs_, &reg_pool_->next_sp_reg_, required);
-  if (required) {
-    DCHECK(res.IsSingle()) << "Reg: 0x" << std::hex << res.GetRawBits();
-    CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
-  }
-  return res;
-}
-
-RegStorage Mir2Lir::AllocTempDouble(bool required) {
-  RegStorage res = AllocTempBody(reg_pool_->dp_regs_, &reg_pool_->next_dp_reg_, required);
-  if (required) {
-    DCHECK(res.IsDouble()) << "Reg: 0x" << std::hex << res.GetRawBits();
-    CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP);
-  }
-  return res;
-}
-
-RegStorage Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class, bool required) {
-  DCHECK_NE(reg_class, kRefReg);  // NOTE: the Dalvik width of a reference is always 32 bits.
-  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
-    return AllocTempDouble(required);
-  }
-  return AllocTempWide(required);
-}
-
-RegStorage Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class, bool required) {
-  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
-    return AllocTempSingle(required);
-  } else if (reg_class == kRefReg) {
-    return AllocTempRef(required);
-  }
-  return AllocTemp(required);
-}
-
-RegStorage Mir2Lir::FindLiveReg(ArenaVector<RegisterInfo*>& regs, int s_reg) {
-  RegStorage res;
-  for (RegisterInfo* info : regs) {
-    if ((info->SReg() == s_reg) && info->IsLive()) {
-      res = info->GetReg();
-      break;
-    }
-  }
-  return res;
-}
-
-RegStorage Mir2Lir::AllocLiveReg(int s_reg, int reg_class, bool wide) {
-  RegStorage reg;
-  if (reg_class == kRefReg) {
-    reg = FindLiveReg(*reg_pool_->ref_regs_, s_reg);
-    CheckRegStorage(reg, WidenessCheck::kCheckNotWide, RefCheck::kCheckRef, FPCheck::kCheckNotFP);
-  }
-  if (!reg.Valid() && ((reg_class == kAnyReg) || (reg_class == kFPReg))) {
-    reg = FindLiveReg(wide ? reg_pool_->dp_regs_ : reg_pool_->sp_regs_, s_reg);
-  }
-  if (!reg.Valid() && (reg_class != kFPReg)) {
-    if (cu_->target64) {
-      reg = FindLiveReg(wide || reg_class == kRefReg ? reg_pool_->core64_regs_ :
-                                                       reg_pool_->core_regs_, s_reg);
-    } else {
-      reg = FindLiveReg(reg_pool_->core_regs_, s_reg);
-    }
-  }
-  if (reg.Valid()) {
-    if (wide && !reg.IsFloat() && !cu_->target64) {
-      // Only allow reg pairs for core regs on 32-bit targets.
-      RegStorage high_reg = FindLiveReg(reg_pool_->core_regs_, s_reg + 1);
-      if (high_reg.Valid()) {
-        reg = RegStorage::MakeRegPair(reg, high_reg);
-        MarkWide(reg);
-      } else {
-        // Only half available.
-        reg = RegStorage::InvalidReg();
-      }
-    }
-    if (reg.Valid() && (wide != GetRegInfo(reg)->IsWide())) {
-      // Width mismatch - don't try to reuse.
-      reg = RegStorage::InvalidReg();
-    }
-  }
-  if (reg.Valid()) {
-    if (reg.IsPair()) {
-      RegisterInfo* info_low = GetRegInfo(reg.GetLow());
-      RegisterInfo* info_high = GetRegInfo(reg.GetHigh());
-      if (info_low->IsTemp()) {
-        info_low->MarkInUse();
-      }
-      if (info_high->IsTemp()) {
-        info_high->MarkInUse();
-      }
-    } else {
-      RegisterInfo* info = GetRegInfo(reg);
-      if (info->IsTemp()) {
-        info->MarkInUse();
-      }
-    }
-  } else {
-    // Either not found, or something didn't match up. Clobber to prevent any stale instances.
-    ClobberSReg(s_reg);
-    if (wide) {
-      ClobberSReg(s_reg + 1);
-    }
-  }
-  CheckRegStorage(reg, WidenessCheck::kIgnoreWide,
-                  reg_class == kRefReg ? RefCheck::kCheckRef : RefCheck::kIgnoreRef,
-                  FPCheck::kIgnoreFP);
-  return reg;
-}
-
-void Mir2Lir::FreeTemp(RegStorage reg) {
-  if (reg.IsPair()) {
-    FreeTemp(reg.GetLow());
-    FreeTemp(reg.GetHigh());
-  } else {
-    RegisterInfo* p = GetRegInfo(reg);
-    if (p->IsTemp()) {
-      p->MarkFree();
-      p->SetIsWide(false);
-      p->SetPartner(reg);
-    }
-  }
-}
-
-void Mir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
-  DCHECK(rl_keep.wide);
-  DCHECK(rl_free.wide);
-  int free_low = rl_free.reg.GetLowReg();
-  int free_high = rl_free.reg.GetHighReg();
-  int keep_low = rl_keep.reg.GetLowReg();
-  int keep_high = rl_keep.reg.GetHighReg();
-  if ((free_low != keep_low) && (free_low != keep_high) &&
-      (free_high != keep_low) && (free_high != keep_high)) {
-    // No overlap, free both
-    FreeTemp(rl_free.reg);
-  }
-}
-
-bool Mir2Lir::IsLive(RegStorage reg) {
-  bool res;
-  if (reg.IsPair()) {
-    RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
-    RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
-    DCHECK_EQ(p_lo->IsLive(), p_hi->IsLive());
-    res = p_lo->IsLive() || p_hi->IsLive();
-  } else {
-    RegisterInfo* p = GetRegInfo(reg);
-    res = p->IsLive();
-  }
-  return res;
-}
-
-bool Mir2Lir::IsTemp(RegStorage reg) {
-  bool res;
-  if (reg.IsPair()) {
-    RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
-    RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
-    res = p_lo->IsTemp() || p_hi->IsTemp();
-  } else {
-    RegisterInfo* p = GetRegInfo(reg);
-    res = p->IsTemp();
-  }
-  return res;
-}
-
-bool Mir2Lir::IsPromoted(RegStorage reg) {
-  bool res;
-  if (reg.IsPair()) {
-    RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
-    RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
-    res = !p_lo->IsTemp() || !p_hi->IsTemp();
-  } else {
-    RegisterInfo* p = GetRegInfo(reg);
-    res = !p->IsTemp();
-  }
-  return res;
-}
-
-bool Mir2Lir::IsDirty(RegStorage reg) {
-  bool res;
-  if (reg.IsPair()) {
-    RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
-    RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
-    res = p_lo->IsDirty() || p_hi->IsDirty();
-  } else {
-    RegisterInfo* p = GetRegInfo(reg);
-    res = p->IsDirty();
-  }
-  return res;
-}
-
-/*
- * Similar to AllocTemp(), but forces the allocation of a specific
- * register.  No check is made to see if the register was previously
- * allocated.  Use with caution.
- */
-void Mir2Lir::LockTemp(RegStorage reg) {
-  DCHECK(IsTemp(reg));
-  if (reg.IsPair()) {
-    RegisterInfo* p_lo = GetRegInfo(reg.GetLow());
-    RegisterInfo* p_hi = GetRegInfo(reg.GetHigh());
-    p_lo->MarkInUse();
-    p_lo->MarkDead();
-    p_hi->MarkInUse();
-    p_hi->MarkDead();
-  } else {
-    RegisterInfo* p = GetRegInfo(reg);
-    p->MarkInUse();
-    p->MarkDead();
-  }
-}
-
-void Mir2Lir::ResetDef(RegStorage reg) {
-  if (reg.IsPair()) {
-    GetRegInfo(reg.GetLow())->ResetDefBody();
-    GetRegInfo(reg.GetHigh())->ResetDefBody();
-  } else {
-    GetRegInfo(reg)->ResetDefBody();
-  }
-}
-
-void Mir2Lir::NullifyRange(RegStorage reg, int s_reg) {
-  RegisterInfo* info = nullptr;
-  RegStorage rs = reg.IsPair() ? reg.GetLow() : reg;
-  if (IsTemp(rs)) {
-    info = GetRegInfo(reg);
-  }
-  if ((info != nullptr) && (info->DefStart() != nullptr) && (info->DefEnd() != nullptr)) {
-    DCHECK_EQ(info->SReg(), s_reg);  // Make sure we're on the same page.
-    for (LIR* p = info->DefStart();; p = p->next) {
-      NopLIR(p);
-      if (p == info->DefEnd()) {
-        break;
-      }
-    }
-  }
-}
-
-/*
- * Mark the beginning and end LIR of a def sequence.  Note that
- * on entry start points to the LIR prior to the beginning of the
- * sequence.
- */
-void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) {
-  DCHECK(!rl.wide);
-  DCHECK(start && start->next);
-  DCHECK(finish);
-  RegisterInfo* p = GetRegInfo(rl.reg);
-  p->SetDefStart(start->next);
-  p->SetDefEnd(finish);
-}
-
-/*
- * Mark the beginning and end LIR of a def sequence.  Note that
- * on entry start points to the LIR prior to the beginning of the
- * sequence.
- */
-void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) {
-  DCHECK(rl.wide);
-  DCHECK(start && start->next);
-  DCHECK(finish);
-  RegisterInfo* p;
-  if (rl.reg.IsPair()) {
-    p = GetRegInfo(rl.reg.GetLow());
-    ResetDef(rl.reg.GetHigh());  // Only track low of pair
-  } else {
-    p = GetRegInfo(rl.reg);
-  }
-  p->SetDefStart(start->next);
-  p->SetDefEnd(finish);
-}
-
-void Mir2Lir::ResetDefLoc(RegLocation rl) {
-  DCHECK(!rl.wide);
-  if (IsTemp(rl.reg) && !(cu_->disable_opt & (1 << kSuppressLoads))) {
-    NullifyRange(rl.reg, rl.s_reg_low);
-  }
-  ResetDef(rl.reg);
-}
-
-void Mir2Lir::ResetDefLocWide(RegLocation rl) {
-  DCHECK(rl.wide);
-  // If pair, only track low reg of pair.
-  RegStorage rs = rl.reg.IsPair() ? rl.reg.GetLow() : rl.reg;
-  if (IsTemp(rs) && !(cu_->disable_opt & (1 << kSuppressLoads))) {
-    NullifyRange(rs, rl.s_reg_low);
-  }
-  ResetDef(rs);
-}
-
-void Mir2Lir::ResetDefTracking() {
-  for (RegisterInfo* info : tempreg_info_) {
-    info->ResetDefBody();
-  }
-}
-
-void Mir2Lir::ClobberAllTemps() {
-  for (RegisterInfo* info : tempreg_info_) {
-    ClobberBody(info);
-  }
-}
-
-void Mir2Lir::FlushRegWide(RegStorage reg) {
-  if (reg.IsPair()) {
-    RegisterInfo* info1 = GetRegInfo(reg.GetLow());
-    RegisterInfo* info2 = GetRegInfo(reg.GetHigh());
-    DCHECK(info1 && info2 && info1->IsWide() && info2->IsWide() &&
-           (info1->Partner().ExactlyEquals(info2->GetReg())) &&
-           (info2->Partner().ExactlyEquals(info1->GetReg())));
-    if ((info1->IsLive() && info1->IsDirty()) || (info2->IsLive() && info2->IsDirty())) {
-      if (!(info1->IsTemp() && info2->IsTemp())) {
-        /* Should not happen.  If it does, there's a problem in eval_loc */
-        LOG(FATAL) << "Long half-temp, half-promoted";
-      }
-
-      info1->SetIsDirty(false);
-      info2->SetIsDirty(false);
-      if (mir_graph_->SRegToVReg(info2->SReg()) < mir_graph_->SRegToVReg(info1->SReg())) {
-        info1 = info2;
-      }
-      int v_reg = mir_graph_->SRegToVReg(info1->SReg());
-      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-      StoreBaseDisp(TargetPtrReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile);
-    }
-  } else {
-    RegisterInfo* info = GetRegInfo(reg);
-    if (info->IsLive() && info->IsDirty()) {
-      info->SetIsDirty(false);
-      int v_reg = mir_graph_->SRegToVReg(info->SReg());
-      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-      StoreBaseDisp(TargetPtrReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile);
-    }
-  }
-}
-
-void Mir2Lir::FlushReg(RegStorage reg) {
-  DCHECK(!reg.IsPair());
-  RegisterInfo* info = GetRegInfo(reg);
-  if (info->IsLive() && info->IsDirty()) {
-    info->SetIsDirty(false);
-    int v_reg = mir_graph_->SRegToVReg(info->SReg());
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    StoreBaseDisp(TargetPtrReg(kSp), VRegOffset(v_reg), reg, kWord, kNotVolatile);
-  }
-}
-
-void Mir2Lir::FlushSpecificReg(RegisterInfo* info) {
-  if (info->IsWide()) {
-    FlushRegWide(info->GetReg());
-  } else {
-    FlushReg(info->GetReg());
-  }
-}
-
-void Mir2Lir::FlushAllRegs() {
-  for (RegisterInfo* info : tempreg_info_) {
-    if (info->IsDirty() && info->IsLive()) {
-      FlushSpecificReg(info);
-    }
-    info->MarkDead();
-    info->SetIsWide(false);
-  }
-}
-
-
-bool Mir2Lir::RegClassMatches(int reg_class, RegStorage reg) {
-  if (reg_class == kAnyReg) {
-    return true;
-  } else if ((reg_class == kCoreReg) || (reg_class == kRefReg)) {
-    /*
-     * For this purpose, consider Core and Ref to be the same class. We aren't dealing
-     * with width here - that should be checked at a higher level (if needed).
-     */
-    return !reg.IsFloat();
-  } else {
-    return reg.IsFloat();
-  }
-}
-
-void Mir2Lir::MarkLive(RegLocation loc) {
-  RegStorage reg = loc.reg;
-  if (!IsTemp(reg)) {
-    return;
-  }
-  int s_reg = loc.s_reg_low;
-  if (s_reg == INVALID_SREG) {
-    // Can't be live if no associated sreg.
-    if (reg.IsPair()) {
-      GetRegInfo(reg.GetLow())->MarkDead();
-      GetRegInfo(reg.GetHigh())->MarkDead();
-    } else {
-      GetRegInfo(reg)->MarkDead();
-    }
-  } else {
-    if (reg.IsPair()) {
-      RegisterInfo* info_lo = GetRegInfo(reg.GetLow());
-      RegisterInfo* info_hi = GetRegInfo(reg.GetHigh());
-      if (info_lo->IsLive() && (info_lo->SReg() == s_reg) && info_hi->IsLive() &&
-          (info_hi->SReg() == s_reg)) {
-        return;  // Already live.
-      }
-      ClobberSReg(s_reg);
-      ClobberSReg(s_reg + 1);
-      info_lo->MarkLive(s_reg);
-      info_hi->MarkLive(s_reg + 1);
-    } else {
-      RegisterInfo* info = GetRegInfo(reg);
-      if (info->IsLive() && (info->SReg() == s_reg)) {
-        return;  // Already live.
-      }
-      ClobberSReg(s_reg);
-      if (loc.wide) {
-        ClobberSReg(s_reg + 1);
-      }
-      info->MarkLive(s_reg);
-    }
-    if (loc.wide) {
-      MarkWide(reg);
-    } else {
-      MarkNarrow(reg);
-    }
-  }
-}
-
-void Mir2Lir::MarkTemp(RegStorage reg) {
-  DCHECK(!reg.IsPair());
-  RegisterInfo* info = GetRegInfo(reg);
-  tempreg_info_.push_back(info);
-  info->SetIsTemp(true);
-}
-
-void Mir2Lir::UnmarkTemp(RegStorage reg) {
-  DCHECK(!reg.IsPair());
-  RegisterInfo* info = GetRegInfo(reg);
-  auto pos = std::find(tempreg_info_.begin(), tempreg_info_.end(), info);
-  DCHECK(pos != tempreg_info_.end());
-  tempreg_info_.erase(pos);
-  info->SetIsTemp(false);
-}
-
-void Mir2Lir::MarkWide(RegStorage reg) {
-  if (reg.IsPair()) {
-    RegisterInfo* info_lo = GetRegInfo(reg.GetLow());
-    RegisterInfo* info_hi = GetRegInfo(reg.GetHigh());
-    // Unpair any old partners.
-    if (info_lo->IsWide() && info_lo->Partner().NotExactlyEquals(info_hi->GetReg())) {
-      GetRegInfo(info_lo->Partner())->SetIsWide(false);
-    }
-    if (info_hi->IsWide() && info_hi->Partner().NotExactlyEquals(info_lo->GetReg())) {
-      GetRegInfo(info_hi->Partner())->SetIsWide(false);
-    }
-    info_lo->SetIsWide(true);
-    info_hi->SetIsWide(true);
-    info_lo->SetPartner(reg.GetHigh());
-    info_hi->SetPartner(reg.GetLow());
-  } else {
-    RegisterInfo* info = GetRegInfo(reg);
-    info->SetIsWide(true);
-    info->SetPartner(reg);
-  }
-}
-
-void Mir2Lir::MarkNarrow(RegStorage reg) {
-  DCHECK(!reg.IsPair());
-  RegisterInfo* info = GetRegInfo(reg);
-  info->SetIsWide(false);
-  info->SetPartner(reg);
-}
-
-void Mir2Lir::MarkClean(RegLocation loc) {
-  if (loc.reg.IsPair()) {
-    RegisterInfo* info = GetRegInfo(loc.reg.GetLow());
-    info->SetIsDirty(false);
-    info = GetRegInfo(loc.reg.GetHigh());
-    info->SetIsDirty(false);
-  } else {
-    RegisterInfo* info = GetRegInfo(loc.reg);
-    info->SetIsDirty(false);
-  }
-}
-
-// FIXME: need to verify rules/assumptions about how wide values are treated in 64BitSolos.
-void Mir2Lir::MarkDirty(RegLocation loc) {
-  if (loc.home) {
-    // If already home, can't be dirty
-    return;
-  }
-  if (loc.reg.IsPair()) {
-    RegisterInfo* info = GetRegInfo(loc.reg.GetLow());
-    info->SetIsDirty(true);
-    info = GetRegInfo(loc.reg.GetHigh());
-    info->SetIsDirty(true);
-  } else {
-    RegisterInfo* info = GetRegInfo(loc.reg);
-    info->SetIsDirty(true);
-  }
-}
-
-void Mir2Lir::MarkInUse(RegStorage reg) {
-  if (reg.IsPair()) {
-    GetRegInfo(reg.GetLow())->MarkInUse();
-    GetRegInfo(reg.GetHigh())->MarkInUse();
-  } else {
-    GetRegInfo(reg)->MarkInUse();
-  }
-}
-
-bool Mir2Lir::CheckCorePoolSanity() {
-  for (RegisterInfo* info : tempreg_info_) {
-    int my_sreg = info->SReg();
-    if (info->IsTemp() && info->IsLive() && info->IsWide() && my_sreg != INVALID_SREG) {
-      RegStorage my_reg = info->GetReg();
-      RegStorage partner_reg = info->Partner();
-      RegisterInfo* partner = GetRegInfo(partner_reg);
-      DCHECK(partner != nullptr);
-      DCHECK(partner->IsWide());
-      DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
-      DCHECK(partner->IsLive());
-      int partner_sreg = partner->SReg();
-      int diff = my_sreg - partner_sreg;
-      DCHECK((diff == 0) || (diff == -1) || (diff == 1));
-    }
-    if (info->Master() != info) {
-      // Aliased.
-      if (info->IsLive() && (info->SReg() != INVALID_SREG)) {
-        // If I'm live, master should not be live, but should show liveness in alias set.
-        DCHECK_EQ(info->Master()->SReg(), INVALID_SREG);
-        DCHECK(!info->Master()->IsDead());
-      }
-// TODO: Add checks in !info->IsDead() case to ensure every live bit is owned by exactly 1 reg.
-    }
-    if (info->IsAliased()) {
-      // Has child aliases.
-      DCHECK_EQ(info->Master(), info);
-      if (info->IsLive() && (info->SReg() != INVALID_SREG)) {
-        // Master live, no child should be dead - all should show liveness in set.
-        for (RegisterInfo* p = info->GetAliasChain(); p != nullptr; p = p->GetAliasChain()) {
-          DCHECK(!p->IsDead());
-          DCHECK_EQ(p->SReg(), INVALID_SREG);
-        }
-      } else if (!info->IsDead()) {
-        // Master not live, one or more aliases must be.
-        bool live_alias = false;
-        for (RegisterInfo* p = info->GetAliasChain(); p != nullptr; p = p->GetAliasChain()) {
-          live_alias |= p->IsLive();
-        }
-        DCHECK(live_alias);
-      }
-    }
-    if (info->IsLive() && (info->SReg() == INVALID_SREG)) {
-      // If not fully live, should have INVALID_SREG and def's should be null.
-      DCHECK(info->DefStart() == nullptr);
-      DCHECK(info->DefEnd() == nullptr);
-    }
-  }
-  return true;
-}
-
-/*
- * Return an updated location record with current in-register status.
- * If the value lives in live temps, reflect that fact.  No code
- * is generated.  If the live value is part of an older pair,
- * clobber both low and high.
- * TUNING: clobbering both is a bit heavy-handed, but the alternative
- * is a bit complex when dealing with FP regs.  Examine code to see
- * if it's worthwhile trying to be more clever here.
- */
-RegLocation Mir2Lir::UpdateLoc(RegLocation loc) {
-  DCHECK(!loc.wide);
-  DCHECK(CheckCorePoolSanity());
-  if (loc.location != kLocPhysReg) {
-    DCHECK((loc.location == kLocDalvikFrame) ||
-         (loc.location == kLocCompilerTemp));
-    RegStorage reg = AllocLiveReg(loc.s_reg_low, loc.ref ? kRefReg : kAnyReg, false);
-    if (reg.Valid()) {
-      bool match = true;
-      RegisterInfo* info = GetRegInfo(reg);
-      match &= !reg.IsPair();
-      match &= !info->IsWide();
-      if (match) {
-        loc.location = kLocPhysReg;
-        loc.reg = reg;
-      } else {
-        Clobber(reg);
-        FreeTemp(reg);
-      }
-    }
-    CheckRegLocation(loc);
-  }
-  return loc;
-}
-
-RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) {
-  DCHECK(loc.wide);
-  DCHECK(CheckCorePoolSanity());
-  if (loc.location != kLocPhysReg) {
-    DCHECK((loc.location == kLocDalvikFrame) ||
-         (loc.location == kLocCompilerTemp));
-    RegStorage reg = AllocLiveReg(loc.s_reg_low, kAnyReg, true);
-    if (reg.Valid()) {
-      bool match = true;
-      if (reg.IsPair()) {
-        // If we've got a register pair, make sure that it was last used as the same pair.
-        RegisterInfo* info_lo = GetRegInfo(reg.GetLow());
-        RegisterInfo* info_hi = GetRegInfo(reg.GetHigh());
-        match &= info_lo->IsWide();
-        match &= info_hi->IsWide();
-        match &= (info_lo->Partner().ExactlyEquals(info_hi->GetReg()));
-        match &= (info_hi->Partner().ExactlyEquals(info_lo->GetReg()));
-      } else {
-        RegisterInfo* info = GetRegInfo(reg);
-        match &= info->IsWide();
-        match &= (info->GetReg().ExactlyEquals(info->Partner()));
-      }
-      if (match) {
-        loc.location = kLocPhysReg;
-        loc.reg = reg;
-      } else {
-        Clobber(reg);
-        FreeTemp(reg);
-      }
-    }
-    CheckRegLocation(loc);
-  }
-  return loc;
-}
-
-/* For use in cases we don't know (or care) width */
-RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) {
-  if (loc.wide)
-    return UpdateLocWide(loc);
-  else
-    return UpdateLoc(loc);
-}
-
-RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
-  DCHECK(loc.wide);
-
-  loc = UpdateLocWide(loc);
-
-  /* If already in registers, we can assume proper form.  Right reg class? */
-  if (loc.location == kLocPhysReg) {
-    if (!RegClassMatches(reg_class, loc.reg)) {
-      // Wrong register class.  Reallocate and transfer ownership.
-      RegStorage new_regs = AllocTypedTempWide(loc.fp, reg_class);
-      // Clobber the old regs.
-      Clobber(loc.reg);
-      // ...and mark the new ones live.
-      loc.reg = new_regs;
-      MarkWide(loc.reg);
-      MarkLive(loc);
-    }
-    CheckRegLocation(loc);
-    return loc;
-  }
-
-  DCHECK_NE(loc.s_reg_low, INVALID_SREG);
-  DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
-
-  loc.reg = AllocTypedTempWide(loc.fp, reg_class);
-  MarkWide(loc.reg);
-
-  if (update) {
-    loc.location = kLocPhysReg;
-    MarkLive(loc);
-  }
-  CheckRegLocation(loc);
-  return loc;
-}
-
-RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
-  // Narrow reg_class if the loc is a ref.
-  if (loc.ref && reg_class == kAnyReg) {
-    reg_class = kRefReg;
-  }
-
-  if (loc.wide) {
-    return EvalLocWide(loc, reg_class, update);
-  }
-
-  loc = UpdateLoc(loc);
-
-  if (loc.location == kLocPhysReg) {
-    if (!RegClassMatches(reg_class, loc.reg)) {
-      // Wrong register class.  Reallocate and transfer ownership.
-      RegStorage new_reg = AllocTypedTemp(loc.fp, reg_class);
-      // Clobber the old reg.
-      Clobber(loc.reg);
-      // ...and mark the new one live.
-      loc.reg = new_reg;
-      MarkLive(loc);
-    }
-    CheckRegLocation(loc);
-    return loc;
-  }
-
-  DCHECK_NE(loc.s_reg_low, INVALID_SREG);
-
-  loc.reg = AllocTypedTemp(loc.fp, reg_class);
-  CheckRegLocation(loc);
-
-  if (update) {
-    loc.location = kLocPhysReg;
-    MarkLive(loc);
-  }
-  CheckRegLocation(loc);
-  return loc;
-}
-
-void Mir2Lir::AnalyzeMIR(RefCounts* core_counts, MIR* mir, uint32_t weight) {
-  // NOTE: This should be in sync with functions that actually generate code for
-  // the opcodes below. However, if we get this wrong, the generated code will
-  // still be correct even if it may be sub-optimal.
-  int opcode = mir->dalvikInsn.opcode;
-  bool uses_method = false;
-  bool uses_pc_rel_load = false;
-  uint32_t dex_cache_array_offset = std::numeric_limits<uint32_t>::max();
-  switch (opcode) {
-    case Instruction::CHECK_CAST:
-    case Instruction::INSTANCE_OF: {
-      if ((opcode == Instruction::CHECK_CAST) &&
-          (mir->optimization_flags & MIR_IGNORE_CHECK_CAST) != 0) {
-        break;  // No code generated.
-      }
-      uint32_t type_idx =
-          (opcode == Instruction::CHECK_CAST) ? mir->dalvikInsn.vB : mir->dalvikInsn.vC;
-      bool type_known_final, type_known_abstract, use_declaring_class;
-      bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(
-          cu_->method_idx, *cu_->dex_file, type_idx,
-          &type_known_final, &type_known_abstract, &use_declaring_class);
-      if (opcode == Instruction::CHECK_CAST && !needs_access_check &&
-          cu_->compiler_driver->IsSafeCast(
-              mir_graph_->GetCurrentDexCompilationUnit(), mir->offset)) {
-        break;  // No code generated.
-      }
-      if (!needs_access_check && !use_declaring_class && CanUseOpPcRelDexCacheArrayLoad()) {
-        uses_pc_rel_load = true;  // And ignore method use in slow path.
-        dex_cache_array_offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
-      } else {
-        uses_method = true;
-      }
-      break;
-    }
-
-    case Instruction::CONST_CLASS:
-      if (CanUseOpPcRelDexCacheArrayLoad() &&
-          cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
-                                                           mir->dalvikInsn.vB)) {
-        uses_pc_rel_load = true;  // And ignore method use in slow path.
-        dex_cache_array_offset = dex_cache_arrays_layout_.TypeOffset(mir->dalvikInsn.vB);
-      } else {
-        uses_method = true;
-      }
-      break;
-
-    case Instruction::CONST_STRING:
-    case Instruction::CONST_STRING_JUMBO:
-      if (CanUseOpPcRelDexCacheArrayLoad()) {
-        uses_pc_rel_load = true;  // And ignore method use in slow path.
-        dex_cache_array_offset = dex_cache_arrays_layout_.StringOffset(mir->dalvikInsn.vB);
-      } else {
-        uses_method = true;
-      }
-      break;
-
-    case Instruction::INVOKE_VIRTUAL:
-    case Instruction::INVOKE_SUPER:
-    case Instruction::INVOKE_DIRECT:
-    case Instruction::INVOKE_STATIC:
-    case Instruction::INVOKE_INTERFACE:
-    case Instruction::INVOKE_VIRTUAL_RANGE:
-    case Instruction::INVOKE_SUPER_RANGE:
-    case Instruction::INVOKE_DIRECT_RANGE:
-    case Instruction::INVOKE_STATIC_RANGE:
-    case Instruction::INVOKE_INTERFACE_RANGE:
-    case Instruction::INVOKE_VIRTUAL_QUICK:
-    case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
-      const MirMethodLoweringInfo& info = mir_graph_->GetMethodLoweringInfo(mir);
-      InvokeType sharp_type = info.GetSharpType();
-      if (info.IsIntrinsic()) {
-        // Nothing to do, if an intrinsic uses ArtMethod* it's in the slow-path - don't count it.
-      } else if (!info.FastPath() || (sharp_type != kStatic && sharp_type != kDirect)) {
-        // Nothing to do, the generated code or entrypoint uses method from the stack.
-      } else if (info.DirectCode() != 0 && info.DirectMethod() != 0) {
-        // Nothing to do, the generated code uses method from the stack.
-      } else if (CanUseOpPcRelDexCacheArrayLoad()) {
-        uses_pc_rel_load = true;
-        dex_cache_array_offset = dex_cache_arrays_layout_.MethodOffset(mir->dalvikInsn.vB);
-      } else {
-        uses_method = true;
-      }
-      break;
-    }
-
-    case Instruction::NEW_INSTANCE:
-    case Instruction::NEW_ARRAY:
-    case Instruction::FILLED_NEW_ARRAY:
-    case Instruction::FILLED_NEW_ARRAY_RANGE:
-      uses_method = true;
-      break;
-    case Instruction::FILL_ARRAY_DATA:
-      // Nothing to do, the entrypoint uses method from the stack.
-      break;
-    case Instruction::THROW:
-      // Nothing to do, the entrypoint uses method from the stack.
-      break;
-
-    case Instruction::SGET:
-    case Instruction::SGET_WIDE:
-    case Instruction::SGET_OBJECT:
-    case Instruction::SGET_BOOLEAN:
-    case Instruction::SGET_BYTE:
-    case Instruction::SGET_CHAR:
-    case Instruction::SGET_SHORT:
-    case Instruction::SPUT:
-    case Instruction::SPUT_WIDE:
-    case Instruction::SPUT_OBJECT:
-    case Instruction::SPUT_BOOLEAN:
-    case Instruction::SPUT_BYTE:
-    case Instruction::SPUT_CHAR:
-    case Instruction::SPUT_SHORT: {
-      const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
-      bool fast = IsInstructionSGet(static_cast<Instruction::Code>(opcode))
-          ? field_info.FastGet()
-          : field_info.FastPut();
-      if (fast && (cu_->enable_debug & (1 << kDebugSlowFieldPath)) == 0) {
-        if (!field_info.IsReferrersClass() && CanUseOpPcRelDexCacheArrayLoad()) {
-          uses_pc_rel_load = true;  // And ignore method use in slow path.
-          dex_cache_array_offset = dex_cache_arrays_layout_.TypeOffset(field_info.StorageIndex());
-        } else {
-          uses_method = true;
-        }
-      } else {
-        // Nothing to do, the entrypoint uses method from the stack.
-      }
-      break;
-    }
-
-    default:
-      break;
-  }
-  if (uses_method) {
-    core_counts[SRegToPMap(mir_graph_->GetMethodLoc().s_reg_low)].count += weight;
-  }
-  if (uses_pc_rel_load) {
-    if (pc_rel_temp_ != nullptr) {
-      core_counts[SRegToPMap(pc_rel_temp_->s_reg_low)].count += weight;
-      DCHECK_NE(dex_cache_array_offset, std::numeric_limits<uint32_t>::max());
-      dex_cache_arrays_min_offset_ = std::min(dex_cache_arrays_min_offset_, dex_cache_array_offset);
-    } else {
-      // Nothing to do, using PC-relative addressing without promoting base PC to register.
-    }
-  }
-}
-
-/* USE SSA names to count references of base Dalvik v_regs. */
-void Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) {
-  for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
-    RegLocation loc = mir_graph_->reg_location_[i];
-    RefCounts* counts = loc.fp ? fp_counts : core_counts;
-    int p_map_idx = SRegToPMap(loc.s_reg_low);
-    int use_count = mir_graph_->GetUseCount(i);
-    if (loc.fp) {
-      if (loc.wide) {
-        if (WideFPRsAreAliases()) {
-          // Floats and doubles can be counted together.
-          counts[p_map_idx].count += use_count;
-        } else {
-          // Treat doubles as a unit, using upper half of fp_counts array.
-          counts[p_map_idx + num_regs].count += use_count;
-        }
-        i++;
-      } else {
-        counts[p_map_idx].count += use_count;
-      }
-    } else {
-      if (loc.wide && WideGPRsAreAliases()) {
-        i++;
-      }
-      if (!IsInexpensiveConstant(loc)) {
-        counts[p_map_idx].count += use_count;
-      }
-    }
-  }
-
-  // Now analyze the ArtMethod* and pc_rel_temp_ uses.
-  DCHECK_EQ(core_counts[SRegToPMap(mir_graph_->GetMethodLoc().s_reg_low)].count, 0);
-  if (pc_rel_temp_ != nullptr) {
-    DCHECK_EQ(core_counts[SRegToPMap(pc_rel_temp_->s_reg_low)].count, 0);
-  }
-  PreOrderDfsIterator iter(mir_graph_);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    if (bb->block_type == kDead) {
-      continue;
-    }
-    uint32_t weight = mir_graph_->GetUseCountWeight(bb);
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      AnalyzeMIR(core_counts, mir, weight);
-    }
-  }
-}
-
-/* qsort callback function, sort descending */
-static int SortCounts(const void *val1, const void *val2) {
-  const Mir2Lir::RefCounts* op1 = reinterpret_cast<const Mir2Lir::RefCounts*>(val1);
-  const Mir2Lir::RefCounts* op2 = reinterpret_cast<const Mir2Lir::RefCounts*>(val2);
-  // Note that we fall back to sorting on reg so we get stable output on differing qsort
-  // implementations (such as on host and target or between local host and build servers).
-  // Note also that if a wide val1 and a non-wide val2 have the same count, then val1 always
-  // ``loses'' (as STARTING_WIDE_SREG is or-ed in val1->s_reg).
-  return (op1->count == op2->count)
-          ? (op1->s_reg - op2->s_reg)
-          : (op1->count < op2->count ? 1 : -1);
-}
-
-void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) {
-  LOG(INFO) << msg;
-  for (int i = 0; i < size; i++) {
-    if ((arr[i].s_reg & STARTING_WIDE_SREG) != 0) {
-      LOG(INFO) << "s_reg[64_" << (arr[i].s_reg & ~STARTING_WIDE_SREG) << "]: " << arr[i].count;
-    } else {
-      LOG(INFO) << "s_reg[32_" << arr[i].s_reg << "]: " << arr[i].count;
-    }
-  }
-}
-
-/*
- * Note: some portions of this code required even if the kPromoteRegs
- * optimization is disabled.
- */
-void Mir2Lir::DoPromotion() {
-  int num_regs = mir_graph_->GetNumOfCodeAndTempVRs();
-  const int promotion_threshold = 1;
-  // Allocate the promotion map - one entry for each Dalvik vReg or compiler temp
-  promotion_map_ = arena_->AllocArray<PromotionMap>(num_regs, kArenaAllocRegAlloc);
-
-  // Allow target code to add any special registers
-  AdjustSpillMask();
-
-  /*
-   * Simple register promotion. Just do a static count of the uses
-   * of Dalvik registers.  Note that we examine the SSA names, but
-   * count based on original Dalvik register name.  Count refs
-   * separately based on type in order to give allocation
-   * preference to fp doubles - which must be allocated sequential
-   * physical single fp registers starting with an even-numbered
-   * reg.
-   * TUNING: replace with linear scan once we have the ability
-   * to describe register live ranges for GC.
-   */
-  size_t core_reg_count_size = WideGPRsAreAliases() ? num_regs : num_regs * 2;
-  size_t fp_reg_count_size = WideFPRsAreAliases() ? num_regs : num_regs * 2;
-  RefCounts *core_regs = arena_->AllocArray<RefCounts>(core_reg_count_size, kArenaAllocRegAlloc);
-  RefCounts *fp_regs = arena_->AllocArray<RefCounts>(fp_reg_count_size, kArenaAllocRegAlloc);
-  // Set ssa names for original Dalvik registers
-  for (int i = 0; i < num_regs; i++) {
-    core_regs[i].s_reg = fp_regs[i].s_reg = i;
-  }
-
-  // Duplicate in upper half to represent possible wide starting sregs.
-  for (size_t i = num_regs; i < fp_reg_count_size; i++) {
-    fp_regs[i].s_reg = fp_regs[i - num_regs].s_reg | STARTING_WIDE_SREG;
-  }
-  for (size_t i = num_regs; i < core_reg_count_size; i++) {
-    core_regs[i].s_reg = core_regs[i - num_regs].s_reg | STARTING_WIDE_SREG;
-  }
-
-  // Sum use counts of SSA regs by original Dalvik vreg.
-  CountRefs(core_regs, fp_regs, num_regs);
-
-  // Sort the count arrays
-  qsort(core_regs, core_reg_count_size, sizeof(RefCounts), SortCounts);
-  qsort(fp_regs, fp_reg_count_size, sizeof(RefCounts), SortCounts);
-
-  if (cu_->verbose) {
-    DumpCounts(core_regs, core_reg_count_size, "Core regs after sort");
-    DumpCounts(fp_regs, fp_reg_count_size, "Fp regs after sort");
-  }
-
-  if (!(cu_->disable_opt & (1 << kPromoteRegs))) {
-    // Promote fp regs
-    for (size_t i = 0; (i < fp_reg_count_size) && (fp_regs[i].count >= promotion_threshold); i++) {
-      int low_sreg = fp_regs[i].s_reg & ~STARTING_WIDE_SREG;
-      size_t p_map_idx = SRegToPMap(low_sreg);
-      RegStorage reg = RegStorage::InvalidReg();
-      if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
-        // TODO: break out the Thumb2-specific code.
-        if (cu_->instruction_set == kThumb2) {
-          bool wide = fp_regs[i].s_reg & STARTING_WIDE_SREG;
-          if (wide) {
-            if (promotion_map_[p_map_idx + 1].fp_location != kLocPhysReg) {
-              // Ignore result - if can't alloc double may still be able to alloc singles.
-              AllocPreservedDouble(low_sreg);
-            }
-            // Continue regardless of success - might still be able to grab a single.
-            continue;
-          } else {
-            reg = AllocPreservedSingle(low_sreg);
-          }
-        } else {
-          reg = AllocPreservedFpReg(low_sreg);
-        }
-        if (!reg.Valid()) {
-           break;  // No more left
-        }
-      }
-    }
-
-    // Promote core regs
-    for (size_t i = 0; (i < core_reg_count_size) &&
-         (core_regs[i].count >= promotion_threshold); i++) {
-      int low_sreg = core_regs[i].s_reg & ~STARTING_WIDE_SREG;
-      size_t p_map_idx = SRegToPMap(low_sreg);
-      if (promotion_map_[p_map_idx].core_location != kLocPhysReg) {
-        RegStorage reg = AllocPreservedCoreReg(low_sreg);
-        if (!reg.Valid()) {
-           break;  // No more left
-        }
-      }
-    }
-  }
-
-  // Now, update SSA names to new home locations
-  for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
-    RegLocation *curr = &mir_graph_->reg_location_[i];
-    int p_map_idx = SRegToPMap(curr->s_reg_low);
-    int reg_num = curr->fp ? promotion_map_[p_map_idx].fp_reg : promotion_map_[p_map_idx].core_reg;
-    bool wide = curr->wide || (cu_->target64 && curr->ref);
-    RegStorage reg = RegStorage::InvalidReg();
-    if (curr->fp && promotion_map_[p_map_idx].fp_location == kLocPhysReg) {
-      if (wide && cu_->instruction_set == kThumb2) {
-        if (promotion_map_[p_map_idx + 1].fp_location == kLocPhysReg) {
-          int high_reg = promotion_map_[p_map_idx+1].fp_reg;
-          // TODO: move target-specific restrictions out of here.
-          if (((reg_num & 0x1) == 0) && ((reg_num + 1) == high_reg)) {
-            reg = RegStorage::FloatSolo64(RegStorage::RegNum(reg_num) >> 1);
-          }
-        }
-      } else {
-        reg = wide ? RegStorage::FloatSolo64(reg_num) : RegStorage::FloatSolo32(reg_num);
-      }
-    } else if (!curr->fp && promotion_map_[p_map_idx].core_location == kLocPhysReg) {
-      if (wide && !cu_->target64) {
-        if (promotion_map_[p_map_idx + 1].core_location == kLocPhysReg) {
-          int high_reg = promotion_map_[p_map_idx+1].core_reg;
-          reg = RegStorage(RegStorage::k64BitPair, reg_num, high_reg);
-        }
-      } else {
-        reg = wide ? RegStorage::Solo64(reg_num) : RegStorage::Solo32(reg_num);
-      }
-    }
-    if (reg.Valid()) {
-      curr->reg = reg;
-      curr->location = kLocPhysReg;
-      curr->home = true;
-    }
-  }
-  if (cu_->verbose) {
-    DumpPromotionMap();
-  }
-}
-
-/* Returns sp-relative offset in bytes for a VReg */
-int Mir2Lir::VRegOffset(int v_reg) {
-  const DexFile::CodeItem* code_item = mir_graph_->GetCurrentDexCompilationUnit()->GetCodeItem();
-  return StackVisitor::GetVRegOffsetFromQuickCode(code_item, core_spill_mask_,
-                                                  fp_spill_mask_, frame_size_, v_reg,
-                                                  cu_->instruction_set);
-}
-
-/* Returns sp-relative offset in bytes for a SReg */
-int Mir2Lir::SRegOffset(int s_reg) {
-  return VRegOffset(mir_graph_->SRegToVReg(s_reg));
-}
-
-/* Mark register usage state and return long retloc */
-RegLocation Mir2Lir::GetReturnWide(RegisterClass reg_class) {
-  RegLocation res;
-  switch (reg_class) {
-    case kRefReg: LOG(FATAL); break;
-    case kFPReg: res = LocCReturnDouble(); break;
-    default: res = LocCReturnWide(); break;
-  }
-  Clobber(res.reg);
-  LockTemp(res.reg);
-  MarkWide(res.reg);
-  CheckRegLocation(res);
-  return res;
-}
-
-RegLocation Mir2Lir::GetReturn(RegisterClass reg_class) {
-  RegLocation res;
-  switch (reg_class) {
-    case kRefReg: res = LocCReturnRef(); break;
-    case kFPReg: res = LocCReturnFloat(); break;
-    default: res = LocCReturn(); break;
-  }
-  Clobber(res.reg);
-  if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
-    MarkInUse(res.reg);
-  } else {
-    LockTemp(res.reg);
-  }
-  CheckRegLocation(res);
-  return res;
-}
-
-void Mir2Lir::SimpleRegAlloc() {
-  DoPromotion();
-
-  if (cu_->verbose && !(cu_->disable_opt & (1 << kPromoteRegs))) {
-    LOG(INFO) << "After Promotion";
-    mir_graph_->DumpRegLocTable(mir_graph_->reg_location_, mir_graph_->GetNumSSARegs());
-  }
-
-  /* Set the frame size */
-  frame_size_ = ComputeFrameSize();
-}
-
-/*
- * Get the "real" sreg number associated with an s_reg slot.  In general,
- * s_reg values passed through codegen are the SSA names created by
- * dataflow analysis and refer to slot numbers in the mir_graph_->reg_location
- * array.  However, renaming is accomplished by simply replacing RegLocation
- * entries in the reglocation[] array.  Therefore, when location
- * records for operands are first created, we need to ask the locRecord
- * identified by the dataflow pass what it's new name is.
- */
-int Mir2Lir::GetSRegHi(int lowSreg) {
-  return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
-}
-
-bool Mir2Lir::LiveOut(int s_reg ATTRIBUTE_UNUSED) {
-  // For now.
-  return true;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/resource_mask.cc b/compiler/dex/quick/resource_mask.cc
deleted file mode 100644
index 817a69a..0000000
--- a/compiler/dex/quick/resource_mask.cc
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <iomanip>
-
-#include "resource_mask.h"
-
-#include "base/bit_utils.h"
-#include "base/arena_allocator.h"
-#include "base/logging.h"
-
-namespace art {
-
-namespace {  // anonymous namespace
-
-constexpr ResourceMask kNoRegMasks[] = {
-    kEncodeNone,
-    kEncodeHeapRef,
-    kEncodeLiteral,
-    kEncodeDalvikReg,
-    ResourceMask::Bit(ResourceMask::kFPStatus),
-    ResourceMask::Bit(ResourceMask::kCCode),
-};
-// The 127-bit is the same as CLZ(masks_[1]) for a ResourceMask with only that bit set.
-static_assert(kNoRegMasks[127-ResourceMask::kHeapRef].Equals(
-    kEncodeHeapRef), "kNoRegMasks heap ref index unexpected");
-static_assert(kNoRegMasks[127-ResourceMask::kLiteral].Equals(
-    kEncodeLiteral), "kNoRegMasks literal index unexpected");
-static_assert(kNoRegMasks[127-ResourceMask::kDalvikReg].Equals(
-    kEncodeDalvikReg), "kNoRegMasks dalvik reg index unexpected");
-static_assert(kNoRegMasks[127-ResourceMask::kFPStatus].Equals(
-    ResourceMask::Bit(ResourceMask::kFPStatus)), "kNoRegMasks fp status index unexpected");
-static_assert(kNoRegMasks[127-ResourceMask::kCCode].Equals(
-    ResourceMask::Bit(ResourceMask::kCCode)), "kNoRegMasks ccode index unexpected");
-
-template <size_t special_bit>
-constexpr ResourceMask OneRegOneSpecial(size_t reg) {
-  return ResourceMask::Bit(reg).Union(ResourceMask::Bit(special_bit));
-}
-
-// NOTE: Working around gcc bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61484 .
-// This should be a two-dimensions array, kSingleRegMasks[][32] and each line should be
-// enclosed in an extra { }. However, gcc issues a bogus "error: array must be initialized
-// with a brace-enclosed initializer" for that, so we flatten this to a one-dimensional array.
-constexpr ResourceMask kSingleRegMasks[] = {
-#define DEFINE_LIST_32(fn) \
-    fn(0), fn(1), fn(2), fn(3), fn(4), fn(5), fn(6), fn(7),           \
-    fn(8), fn(9), fn(10), fn(11), fn(12), fn(13), fn(14), fn(15),     \
-    fn(16), fn(17), fn(18), fn(19), fn(20), fn(21), fn(22), fn(23),   \
-    fn(24), fn(25), fn(26), fn(27), fn(28), fn(29), fn(30), fn(31)
-    // NOTE: Each line is 512B of constant data, 3KiB in total.
-    DEFINE_LIST_32(ResourceMask::Bit),
-    DEFINE_LIST_32(OneRegOneSpecial<ResourceMask::kHeapRef>),
-    DEFINE_LIST_32(OneRegOneSpecial<ResourceMask::kLiteral>),
-    DEFINE_LIST_32(OneRegOneSpecial<ResourceMask::kDalvikReg>),
-    DEFINE_LIST_32(OneRegOneSpecial<ResourceMask::kFPStatus>),
-    DEFINE_LIST_32(OneRegOneSpecial<ResourceMask::kCCode>),
-#undef DEFINE_LIST_32
-};
-
-constexpr size_t SingleRegMaskIndex(size_t main_index, size_t sub_index) {
-  return main_index * 32u + sub_index;
-}
-
-// The 127-bit is the same as CLZ(masks_[1]) for a ResourceMask with only that bit set.
-static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kHeapRef, 0)].Equals(
-    OneRegOneSpecial<ResourceMask::kHeapRef>(0)), "kSingleRegMasks heap ref index unexpected");
-static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kLiteral, 0)].Equals(
-    OneRegOneSpecial<ResourceMask::kLiteral>(0)), "kSingleRegMasks literal index  unexpected");
-static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kDalvikReg, 0)].Equals(
-    OneRegOneSpecial<ResourceMask::kDalvikReg>(0)), "kSingleRegMasks dalvik reg index unexpected");
-static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kFPStatus, 0)].Equals(
-    OneRegOneSpecial<ResourceMask::kFPStatus>(0)), "kSingleRegMasks fp status index unexpected");
-static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kCCode, 0)].Equals(
-    OneRegOneSpecial<ResourceMask::kCCode>(0)), "kSingleRegMasks ccode index unexpected");
-
-// NOTE: arraysize(kNoRegMasks) multiplied by 32 due to the gcc bug workaround, see above.
-static_assert(arraysize(kSingleRegMasks) == arraysize(kNoRegMasks) * 32, "arraysizes unexpected");
-
-constexpr ResourceMask kTwoRegsMasks[] = {
-#define TWO(a, b) ResourceMask::Bit(a).Union(ResourceMask::Bit(b))
-    // NOTE: 16 * 15 / 2 = 120 entries, 16 bytes each, 1920B in total.
-    TWO(0, 1),
-    TWO(0, 2), TWO(1, 2),
-    TWO(0, 3), TWO(1, 3), TWO(2, 3),
-    TWO(0, 4), TWO(1, 4), TWO(2, 4), TWO(3, 4),
-    TWO(0, 5), TWO(1, 5), TWO(2, 5), TWO(3, 5), TWO(4, 5),
-    TWO(0, 6), TWO(1, 6), TWO(2, 6), TWO(3, 6), TWO(4, 6), TWO(5, 6),
-    TWO(0, 7), TWO(1, 7), TWO(2, 7), TWO(3, 7), TWO(4, 7), TWO(5, 7), TWO(6, 7),
-    TWO(0, 8), TWO(1, 8), TWO(2, 8), TWO(3, 8), TWO(4, 8), TWO(5, 8), TWO(6, 8), TWO(7, 8),
-    TWO(0, 9), TWO(1, 9), TWO(2, 9), TWO(3, 9), TWO(4, 9), TWO(5, 9), TWO(6, 9), TWO(7, 9),
-        TWO(8, 9),
-    TWO(0, 10), TWO(1, 10), TWO(2, 10), TWO(3, 10), TWO(4, 10), TWO(5, 10), TWO(6, 10), TWO(7, 10),
-        TWO(8, 10), TWO(9, 10),
-    TWO(0, 11), TWO(1, 11), TWO(2, 11), TWO(3, 11), TWO(4, 11), TWO(5, 11), TWO(6, 11), TWO(7, 11),
-        TWO(8, 11), TWO(9, 11), TWO(10, 11),
-    TWO(0, 12), TWO(1, 12), TWO(2, 12), TWO(3, 12), TWO(4, 12), TWO(5, 12), TWO(6, 12), TWO(7, 12),
-        TWO(8, 12), TWO(9, 12), TWO(10, 12), TWO(11, 12),
-    TWO(0, 13), TWO(1, 13), TWO(2, 13), TWO(3, 13), TWO(4, 13), TWO(5, 13), TWO(6, 13), TWO(7, 13),
-        TWO(8, 13), TWO(9, 13), TWO(10, 13), TWO(11, 13), TWO(12, 13),
-    TWO(0, 14), TWO(1, 14), TWO(2, 14), TWO(3, 14), TWO(4, 14), TWO(5, 14), TWO(6, 14), TWO(7, 14),
-        TWO(8, 14), TWO(9, 14), TWO(10, 14), TWO(11, 14), TWO(12, 14), TWO(13, 14),
-    TWO(0, 15), TWO(1, 15), TWO(2, 15), TWO(3, 15), TWO(4, 15), TWO(5, 15), TWO(6, 15), TWO(7, 15),
-        TWO(8, 15), TWO(9, 15), TWO(10, 15), TWO(11, 15), TWO(12, 15), TWO(13, 15), TWO(14, 15),
-#undef TWO
-};
-static_assert(arraysize(kTwoRegsMasks) ==  16 * 15 / 2, "arraysize of kTwoRegsMasks unexpected");
-
-constexpr size_t TwoRegsIndex(size_t higher, size_t lower) {
-  return (higher * (higher - 1)) / 2u + lower;
-}
-
-constexpr bool CheckTwoRegsMask(size_t higher, size_t lower) {
-  return ResourceMask::Bit(lower).Union(ResourceMask::Bit(higher)).Equals(
-      kTwoRegsMasks[TwoRegsIndex(higher, lower)]);
-}
-
-constexpr bool CheckTwoRegsMaskLine(size_t line, size_t lower = 0u) {
-  return (lower == line) ||
-      (CheckTwoRegsMask(line, lower) && CheckTwoRegsMaskLine(line, lower + 1u));
-}
-
-constexpr bool CheckTwoRegsMaskTable(size_t lines) {
-  return lines == 0 ||
-      (CheckTwoRegsMaskLine(lines - 1) && CheckTwoRegsMaskTable(lines - 1u));
-}
-
-static_assert(CheckTwoRegsMaskTable(16), "two regs masks table check failed");
-
-}  // anonymous namespace
-
-const ResourceMask* ResourceMaskCache::GetMask(const ResourceMask& mask) {
-  // Instead of having a deduplication map, we shall just use pre-defined constexpr
-  // masks for the common cases. At most one of the these special bits is allowed:
-  constexpr ResourceMask kAllowedSpecialBits = ResourceMask::Bit(ResourceMask::kFPStatus)
-      .Union(ResourceMask::Bit(ResourceMask::kCCode))
-      .Union(kEncodeHeapRef).Union(kEncodeLiteral).Union(kEncodeDalvikReg);
-  const ResourceMask* res = nullptr;
-  // Limit to low 32 regs and the kAllowedSpecialBits.
-  if ((mask.masks_[0] >> 32) == 0u && (mask.masks_[1] & ~kAllowedSpecialBits.masks_[1]) == 0u) {
-    // Check if it's only up to two registers.
-    uint32_t low_regs = static_cast<uint32_t>(mask.masks_[0]);
-    uint32_t low_regs_without_lowest = low_regs & (low_regs - 1u);
-    if (low_regs_without_lowest == 0u && IsPowerOfTwo(mask.masks_[1])) {
-      // 0 or 1 register, 0 or 1 bit from kAllowedBits. Use a pre-defined mask.
-      size_t index = (mask.masks_[1] != 0u) ? CLZ(mask.masks_[1]) : 0u;
-      DCHECK_LT(index, arraysize(kNoRegMasks));
-      res = (low_regs != 0) ? &kSingleRegMasks[SingleRegMaskIndex(index, CTZ(low_regs))]
-                            : &kNoRegMasks[index];
-    } else if (IsPowerOfTwo(low_regs_without_lowest) && mask.masks_[1] == 0u) {
-      // 2 registers and no other flags. Use predefined mask if higher reg is < 16.
-      if (low_regs_without_lowest < (1u << 16)) {
-        res = &kTwoRegsMasks[TwoRegsIndex(CTZ(low_regs_without_lowest), CTZ(low_regs))];
-      }
-    }
-  } else if (mask.Equals(kEncodeAll)) {
-    res = &kEncodeAll;
-  }
-  if (res != nullptr) {
-    DCHECK(res->Equals(mask))
-        << "(" << std::hex << std::setw(16) << mask.masks_[0]
-        << ", "<< std::hex << std::setw(16) << mask.masks_[1]
-        << ") != (" << std::hex << std::setw(16) << res->masks_[0]
-        << ", "<< std::hex << std::setw(16) << res->masks_[1] << ")";
-    return res;
-  }
-
-  // TODO: Deduplicate. (At least the most common masks.)
-  void* mem = allocator_->Alloc(sizeof(ResourceMask), kArenaAllocLIRResourceMask);
-  return new (mem) ResourceMask(mask);
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/resource_mask.h b/compiler/dex/quick/resource_mask.h
deleted file mode 100644
index 78e81b2..0000000
--- a/compiler/dex/quick/resource_mask.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_RESOURCE_MASK_H_
-#define ART_COMPILER_DEX_QUICK_RESOURCE_MASK_H_
-
-#include <stdint.h>
-
-#include "base/logging.h"
-#include "base/value_object.h"
-#include "dex/reg_storage.h"
-
-namespace art {
-
-class ArenaAllocator;
-
-/**
- * @brief Resource mask for LIR insn uses or defs.
- * @detail Def/Use mask used for checking dependencies between LIR insns in local
- * optimizations such as load hoisting.
- */
-class ResourceMask {
- private:
-  constexpr ResourceMask(uint64_t mask1, uint64_t mask2)
-      : masks_{ mask1, mask2 } {  // NOLINT
-  }
-
- public:
-  /*
-   * Def/Use encoding in 128-bit use_mask/def_mask.  Low positions used for target-specific
-   * registers (and typically use the register number as the position).  High positions
-   * reserved for common and abstract resources.
-   */
-  enum ResourceBit {
-    kMustNotAlias = 127,
-    kHeapRef = 126,         // Default memory reference type.
-    kLiteral = 125,         // Literal pool memory reference.
-    kDalvikReg = 124,       // Dalvik v_reg memory reference.
-    kFPStatus = 123,
-    kCCode = 122,
-    kLowestCommonResource = kCCode,
-    kHighestCommonResource = kMustNotAlias
-  };
-
-  // Default-constructible.
-  constexpr ResourceMask()
-    : masks_ { 0u, 0u } {
-  }
-
-  // Copy-constructible and copyable.
-  ResourceMask(const ResourceMask& other) = default;
-  ResourceMask& operator=(const ResourceMask& other) = default;
-
-  // Comparable by content.
-  bool operator==(const ResourceMask& other) {
-    return masks_[0] == other.masks_[0] && masks_[1] == other.masks_[1];
-  }
-
-  static constexpr ResourceMask RawMask(uint64_t mask1, uint64_t mask2) {
-    return ResourceMask(mask1, mask2);
-  }
-
-  static constexpr ResourceMask Bit(size_t bit) {
-    return ResourceMask(bit >= 64u ? 0u : UINT64_C(1) << bit,
-                        bit >= 64u ? UINT64_C(1) << (bit - 64u) : 0u);
-  }
-
-  // Two consecutive bits. The start_bit must be even.
-  static constexpr ResourceMask TwoBits(size_t start_bit) {
-    return
-        DCHECK_CONSTEXPR((start_bit & 1u) == 0u, << start_bit << " isn't even", Bit(0))
-        ResourceMask(start_bit >= 64u ? 0u : UINT64_C(3) << start_bit,
-                     start_bit >= 64u ? UINT64_C(3) << (start_bit - 64u) : 0u);
-  }
-
-  static constexpr ResourceMask NoBits() {
-    return ResourceMask(UINT64_C(0), UINT64_C(0));
-  }
-
-  static constexpr ResourceMask AllBits() {
-    return ResourceMask(~UINT64_C(0), ~UINT64_C(0));
-  }
-
-  constexpr ResourceMask Union(const ResourceMask& other) const {
-    return ResourceMask(masks_[0] | other.masks_[0], masks_[1] | other.masks_[1]);
-  }
-
-  constexpr ResourceMask Intersection(const ResourceMask& other) const {
-    return ResourceMask(masks_[0] & other.masks_[0], masks_[1] & other.masks_[1]);
-  }
-
-  constexpr ResourceMask Without(const ResourceMask& other) const {
-    return ResourceMask(masks_[0] & ~other.masks_[0], masks_[1] & ~other.masks_[1]);
-  }
-
-  constexpr bool Equals(const ResourceMask& other) const {
-    return masks_[0] == other.masks_[0] && masks_[1] == other.masks_[1];
-  }
-
-  constexpr bool Intersects(const ResourceMask& other) const {
-    return (masks_[0] & other.masks_[0]) != 0u || (masks_[1] & other.masks_[1]) != 0u;
-  }
-
-  void SetBit(size_t bit);
-
-  constexpr bool HasBit(size_t bit) const {
-    return (masks_[bit / 64u] & (UINT64_C(1) << (bit & 63u))) != 0u;
-  }
-
-  ResourceMask& SetBits(const ResourceMask& other) {
-    masks_[0] |= other.masks_[0];
-    masks_[1] |= other.masks_[1];
-    return *this;
-  }
-
-  ResourceMask& ClearBits(const ResourceMask& other) {
-    masks_[0] &= ~other.masks_[0];
-    masks_[1] &= ~other.masks_[1];
-    return *this;
-  }
-
- private:
-  uint64_t masks_[2];
-
-  friend class ResourceMaskCache;
-};
-std::ostream& operator<<(std::ostream& os, const ResourceMask::ResourceBit& rhs);
-
-inline void ResourceMask::SetBit(size_t bit) {
-  DCHECK_LE(bit, kHighestCommonResource);
-  masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
-}
-
-constexpr ResourceMask kEncodeNone = ResourceMask::NoBits();
-constexpr ResourceMask kEncodeAll = ResourceMask::AllBits();
-constexpr ResourceMask kEncodeHeapRef = ResourceMask::Bit(ResourceMask::kHeapRef);
-constexpr ResourceMask kEncodeLiteral = ResourceMask::Bit(ResourceMask::kLiteral);
-constexpr ResourceMask kEncodeDalvikReg = ResourceMask::Bit(ResourceMask::kDalvikReg);
-constexpr ResourceMask kEncodeMem = kEncodeLiteral.Union(kEncodeDalvikReg).Union(
-    kEncodeHeapRef).Union(ResourceMask::Bit(ResourceMask::kMustNotAlias));
-
-class ResourceMaskCache {
- public:
-  explicit ResourceMaskCache(ArenaAllocator* allocator)
-      : allocator_(allocator) {
-  }
-
-  const ResourceMask* GetMask(const ResourceMask& mask);
-
- private:
-  ArenaAllocator* allocator_;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_RESOURCE_MASK_H_
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
deleted file mode 100644
index 1c2a619..0000000
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ /dev/null
@@ -1,2073 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_x86.h"
-
-#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir.h"
-#include "oat.h"
-#include "oat_quick_method_header.h"
-#include "utils.h"
-#include "x86_lir.h"
-
-namespace art {
-
-#define MAX_ASSEMBLER_RETRIES 50
-
-const X86EncodingMap X86Mir2Lir::EncodingMap[kX86Last] = {
-  { kX8632BitData, kData,    IS_UNARY_OP,            { 0, 0, 0x00, 0, 0, 0, 0, 4, false }, "data",  "0x!0d" },
-  { kX86Bkpt,      kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xCC, 0, 0, 0, 0, 0, false }, "int 3", "" },
-  { kX86Nop,       kNop,     NO_OPERAND,             { 0, 0, 0x90, 0, 0, 0, 0, 0, false }, "nop",   "" },
-
-#define ENCODING_MAP(opname, mem_use, reg_def, uses_ccodes, \
-                     rm8_r8, rm32_r32, \
-                     r8_rm8, r32_rm32, \
-                     ax8_i8, ax32_i32, \
-                     rm8_i8, rm8_i8_modrm, \
-                     rm32_i32, rm32_i32_modrm, \
-                     rm32_i8, rm32_i8_modrm) \
-{ kX86 ## opname ## 8MR, kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0,             0, rm8_r8, 0, 0, 0,            0,      0, true }, #opname "8MR", "[!0r+!1d],!2r" }, \
-{ kX86 ## opname ## 8AR, kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0,             0, rm8_r8, 0, 0, 0,            0,      0, true }, #opname "8AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
-{ kX86 ## opname ## 8TR, kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_r8, 0, 0, 0,            0,      0, true }, #opname "8TR", "fs:[!0d],!1r" }, \
-{ kX86 ## opname ## 8RR, kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0, true }, #opname "8RR", "!0r,!1r" }, \
-{ kX86 ## opname ## 8RM, kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0, true }, #opname "8RM", "!0r,[!1r+!2d]" }, \
-{ kX86 ## opname ## 8RA, kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0, true }, #opname "8RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
-{ kX86 ## opname ## 8RT, kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r8_rm8, 0, 0, 0,            0,      0, true }, #opname "8RT", "!0r,fs:[!1d]" }, \
-{ kX86 ## opname ## 8RI, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm8_i8, 0, 0, rm8_i8_modrm, ax8_i8, 1, true }, #opname "8RI", "!0r,!1d" }, \
-{ kX86 ## opname ## 8MI, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm8_i8, 0, 0, rm8_i8_modrm, 0,      1, false}, #opname "8MI", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 8AI, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm8_i8, 0, 0, rm8_i8_modrm, 0,      1, false}, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 8TI, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0,      1, false}, #opname "8TI", "fs:[!0d],!1d" }, \
-  \
-{ kX86 ## opname ## 16MR,  kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_r32, 0, 0, 0,              0,        0, false }, #opname "16MR", "[!0r+!1d],!2r" }, \
-{ kX86 ## opname ## 16AR,  kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_r32, 0, 0, 0,              0,        0, false }, #opname "16AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
-{ kX86 ## opname ## 16TR,  kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_r32, 0, 0, 0,              0,        0, false }, #opname "16TR", "fs:[!0d],!1r" }, \
-{ kX86 ## opname ## 16RR,  kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    r32_rm32, 0, 0, 0,              0,        0, false }, #opname "16RR", "!0r,!1r" }, \
-{ kX86 ## opname ## 16RM,  kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    r32_rm32, 0, 0, 0,              0,        0, false }, #opname "16RM", "!0r,[!1r+!2d]" }, \
-{ kX86 ## opname ## 16RA,  kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0x66,          0,    r32_rm32, 0, 0, 0,              0,        0, false }, #opname "16RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
-{ kX86 ## opname ## 16RT,  kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, r32_rm32, 0, 0, 0,              0,        0, false }, #opname "16RT", "!0r,fs:[!1d]" }, \
-{ kX86 ## opname ## 16RI,  kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 2, false }, #opname "16RI", "!0r,!1d" }, \
-{ kX86 ## opname ## 16MI,  kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i32, 0, 0, rm32_i32_modrm, 0,        2, false }, #opname "16MI", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 16AI,  kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i32, 0, 0, rm32_i32_modrm, 0,        2, false }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 16TI,  kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i32, 0, 0, rm32_i32_modrm, 0,        2, false }, #opname "16TI", "fs:[!0d],!1d" }, \
-{ kX86 ## opname ## 16RI8, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "16RI8", "!0r,!1d" }, \
-{ kX86 ## opname ## 16MI8, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "16MI8", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 16AI8, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "16AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 16TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "16TI8", "fs:[!0d],!1d" }, \
-  \
-{ kX86 ## opname ## 32MR,  kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_r32, 0, 0, 0,              0,        0, false }, #opname "32MR", "[!0r+!1d],!2r" }, \
-{ kX86 ## opname ## 32AR,  kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0,             0, rm32_r32, 0, 0, 0,              0,        0, false }, #opname "32AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
-{ kX86 ## opname ## 32TR,  kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_r32, 0, 0, 0,              0,        0, false }, #opname "32TR", "fs:[!0d],!1r" }, \
-{ kX86 ## opname ## 32RR,  kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0, false }, #opname "32RR", "!0r,!1r" }, \
-{ kX86 ## opname ## 32RM,  kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0, false }, #opname "32RM", "!0r,[!1r+!2d]" }, \
-{ kX86 ## opname ## 32RA,  kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0, false }, #opname "32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
-{ kX86 ## opname ## 32RT,  kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r32_rm32, 0, 0, 0,              0,        0, false }, #opname "32RT", "!0r,fs:[!1d]" }, \
-{ kX86 ## opname ## 32RI,  kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 4, false }, #opname "32RI", "!0r,!1d" }, \
-{ kX86 ## opname ## 32MI,  kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4, false }, #opname "32MI", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 32AI,  kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4, false }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 32TI,  kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4, false }, #opname "32TI", "fs:[!0d],!1d" }, \
-{ kX86 ## opname ## 32RI8, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "32RI8", "!0r,!1d" }, \
-{ kX86 ## opname ## 32MI8, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "32MI8", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 32AI8, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "32AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 32TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "32TI8", "fs:[!0d],!1d" }, \
-  \
-{ kX86 ## opname ## 64MR,  kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_r32, 0, 0, 0,              0,        0, false }, #opname "64MR", "[!0r+!1d],!2r" }, \
-{ kX86 ## opname ## 64AR,  kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_r32, 0, 0, 0,              0,        0, false }, #opname "64AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
-{ kX86 ## opname ## 64TR,  kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, REX_W, rm32_r32, 0, 0, 0,              0,        0, false }, #opname "64TR", "fs:[!0d],!1r" }, \
-{ kX86 ## opname ## 64RR,  kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { REX_W,             0, r32_rm32, 0, 0, 0,              0,        0, false }, #opname "64RR", "!0r,!1r" }, \
-{ kX86 ## opname ## 64RM,  kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { REX_W,             0, r32_rm32, 0, 0, 0,              0,        0, false }, #opname "64RM", "!0r,[!1r+!2d]" }, \
-{ kX86 ## opname ## 64RA,  kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { REX_W,             0, r32_rm32, 0, 0, 0,              0,        0, false }, #opname "64RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
-{ kX86 ## opname ## 64RT,  kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, REX_W, r32_rm32, 0, 0, 0,              0,        0, false }, #opname "64RT", "!0r,fs:[!1d]" }, \
-{ kX86 ## opname ## 64RI,  kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 4, false }, #opname "64RI", "!0r,!1d" }, \
-{ kX86 ## opname ## 64MI,  kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4, false }, #opname "64MI", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 64AI,  kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4, false }, #opname "64AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 64TI,  kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, REX_W, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4, false }, #opname "64TI", "fs:[!0d],!1d" }, \
-{ kX86 ## opname ## 64RI8, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "64RI8", "!0r,!1d" }, \
-{ kX86 ## opname ## 64MI8, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "64MI8", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 64AI8, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { REX_W,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "64AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 64TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, REX_W, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1, false }, #opname "64TI8", "fs:[!0d],!1d" }
-
-ENCODING_MAP(Add, IS_LOAD | IS_STORE, REG_DEF0, 0,
-  0x00 /* RegMem8/Reg8 */,     0x01 /* RegMem32/Reg32 */,
-  0x02 /* Reg8/RegMem8 */,     0x03 /* Reg32/RegMem32 */,
-  0x04 /* Rax8/imm8 opcode */, 0x05 /* Rax32/imm32 */,
-  0x80, 0x0 /* RegMem8/imm8 */,
-  0x81, 0x0 /* RegMem32/imm32 */, 0x83, 0x0 /* RegMem32/imm8 */),
-ENCODING_MAP(Or, IS_LOAD | IS_STORE, REG_DEF0, 0,
-  0x08 /* RegMem8/Reg8 */,     0x09 /* RegMem32/Reg32 */,
-  0x0A /* Reg8/RegMem8 */,     0x0B /* Reg32/RegMem32 */,
-  0x0C /* Rax8/imm8 opcode */, 0x0D /* Rax32/imm32 */,
-  0x80, 0x1 /* RegMem8/imm8 */,
-  0x81, 0x1 /* RegMem32/imm32 */, 0x83, 0x1 /* RegMem32/imm8 */),
-ENCODING_MAP(Adc, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES,
-  0x10 /* RegMem8/Reg8 */,     0x11 /* RegMem32/Reg32 */,
-  0x12 /* Reg8/RegMem8 */,     0x13 /* Reg32/RegMem32 */,
-  0x14 /* Rax8/imm8 opcode */, 0x15 /* Rax32/imm32 */,
-  0x80, 0x2 /* RegMem8/imm8 */,
-  0x81, 0x2 /* RegMem32/imm32 */, 0x83, 0x2 /* RegMem32/imm8 */),
-ENCODING_MAP(Sbb, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES,
-  0x18 /* RegMem8/Reg8 */,     0x19 /* RegMem32/Reg32 */,
-  0x1A /* Reg8/RegMem8 */,     0x1B /* Reg32/RegMem32 */,
-  0x1C /* Rax8/imm8 opcode */, 0x1D /* Rax32/imm32 */,
-  0x80, 0x3 /* RegMem8/imm8 */,
-  0x81, 0x3 /* RegMem32/imm32 */, 0x83, 0x3 /* RegMem32/imm8 */),
-ENCODING_MAP(And, IS_LOAD | IS_STORE, REG_DEF0, 0,
-  0x20 /* RegMem8/Reg8 */,     0x21 /* RegMem32/Reg32 */,
-  0x22 /* Reg8/RegMem8 */,     0x23 /* Reg32/RegMem32 */,
-  0x24 /* Rax8/imm8 opcode */, 0x25 /* Rax32/imm32 */,
-  0x80, 0x4 /* RegMem8/imm8 */,
-  0x81, 0x4 /* RegMem32/imm32 */, 0x83, 0x4 /* RegMem32/imm8 */),
-ENCODING_MAP(Sub, IS_LOAD | IS_STORE, REG_DEF0, 0,
-  0x28 /* RegMem8/Reg8 */,     0x29 /* RegMem32/Reg32 */,
-  0x2A /* Reg8/RegMem8 */,     0x2B /* Reg32/RegMem32 */,
-  0x2C /* Rax8/imm8 opcode */, 0x2D /* Rax32/imm32 */,
-  0x80, 0x5 /* RegMem8/imm8 */,
-  0x81, 0x5 /* RegMem32/imm32 */, 0x83, 0x5 /* RegMem32/imm8 */),
-ENCODING_MAP(Xor, IS_LOAD | IS_STORE, REG_DEF0, 0,
-  0x30 /* RegMem8/Reg8 */,     0x31 /* RegMem32/Reg32 */,
-  0x32 /* Reg8/RegMem8 */,     0x33 /* Reg32/RegMem32 */,
-  0x34 /* Rax8/imm8 opcode */, 0x35 /* Rax32/imm32 */,
-  0x80, 0x6 /* RegMem8/imm8 */,
-  0x81, 0x6 /* RegMem32/imm32 */, 0x83, 0x6 /* RegMem32/imm8 */),
-ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
-  0x38 /* RegMem8/Reg8 */,     0x39 /* RegMem32/Reg32 */,
-  0x3A /* Reg8/RegMem8 */,     0x3B /* Reg32/RegMem32 */,
-  0x3C /* Rax8/imm8 opcode */, 0x3D /* Rax32/imm32 */,
-  0x80, 0x7 /* RegMem8/imm8 */,
-  0x81, 0x7 /* RegMem32/imm32 */, 0x83, 0x7 /* RegMem32/imm8 */),
-#undef ENCODING_MAP
-
-  { kX86Imul16RRI,   kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2, false }, "Imul16RRI", "!0r,!1r,!2d" },
-  { kX86Imul16RMI,   kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2, false }, "Imul16RMI", "!0r,[!1r+!2d],!3d" },
-  { kX86Imul16RAI,   kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2, false }, "Imul16RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
-
-  { kX86Imul32RRI,   kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4, false }, "Imul32RRI", "!0r,!1r,!2d" },
-  { kX86Imul32RMI,   kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4, false }, "Imul32RMI", "!0r,[!1r+!2d],!3d" },
-  { kX86Imul32RAI,   kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4, false }, "Imul32RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
-  { kX86Imul32RRI8,  kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1, false }, "Imul32RRI8", "!0r,!1r,!2d" },
-  { kX86Imul32RMI8,  kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1, false }, "Imul32RMI8", "!0r,[!1r+!2d],!3d" },
-  { kX86Imul32RAI8,  kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1, false }, "Imul32RAI8", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
-
-  { kX86Imul64RRI,   kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { REX_W, 0, 0x69, 0, 0, 0, 0, 4, false }, "Imul64RRI", "!0r,!1r,!2d" },
-  { kX86Imul64RMI,   kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { REX_W, 0, 0x69, 0, 0, 0, 0, 4, false }, "Imul64RMI", "!0r,[!1r+!2d],!3d" },
-  { kX86Imul64RAI,   kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { REX_W, 0, 0x69, 0, 0, 0, 0, 4, false }, "Imul64RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
-  { kX86Imul64RRI8,  kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { REX_W, 0, 0x6B, 0, 0, 0, 0, 1, false }, "Imul64RRI8", "!0r,!1r,!2d" },
-  { kX86Imul64RMI8,  kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { REX_W, 0, 0x6B, 0, 0, 0, 0, 1, false }, "Imul64RMI8", "!0r,[!1r+!2d],!3d" },
-  { kX86Imul64RAI8,  kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { REX_W, 0, 0x6B, 0, 0, 0, 0, 1, false }, "Imul64RAI8", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
-
-  { kX86Mov8MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0,             0, 0x88, 0, 0, 0, 0, 0, true }, "Mov8MR", "[!0r+!1d],!2r" },
-  { kX86Mov8AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0,             0, 0x88, 0, 0, 0, 0, 0, true }, "Mov8AR", "[!0r+!1r<<!2d+!3d],!4r" },
-  { kX86Mov8TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0, 0x88, 0, 0, 0, 0, 0, true }, "Mov8TR", "fs:[!0d],!1r" },
-  { kX86Mov8RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { 0,             0, 0x8A, 0, 0, 0, 0, 0, true }, "Mov8RR", "!0r,!1r" },
-  { kX86Mov8RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0,             0, 0x8A, 0, 0, 0, 0, 0, true }, "Mov8RM", "!0r,[!1r+!2d]" },
-  { kX86Mov8RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { 0,             0, 0x8A, 0, 0, 0, 0, 0, true }, "Mov8RA", "!0r,[!1r+!2r<<!3d+!4d]" },
-  { kX86Mov8RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, 0, 0x8A, 0, 0, 0, 0, 0, true }, "Mov8RT", "!0r,fs:[!1d]" },
-  { kX86Mov8RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { 0,             0, 0xB0, 0, 0, 0, 0, 1, true }, "Mov8RI", "!0r,!1d" },
-  { kX86Mov8MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { 0,             0, 0xC6, 0, 0, 0, 0, 1, false}, "Mov8MI", "[!0r+!1d],!2d" },
-  { kX86Mov8AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0,             0, 0xC6, 0, 0, 0, 0, 1, false}, "Mov8AI", "[!0r+!1r<<!2d+!3d],!4d" },
-  { kX86Mov8TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0, 0xC6, 0, 0, 0, 0, 1, false}, "Mov8TI", "fs:[!0d],!1d" },
-
-  { kX86Mov16MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0x66,          0,    0x89, 0, 0, 0, 0, 0, false }, "Mov16MR", "[!0r+!1d],!2r" },
-  { kX86Mov16AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0x66,          0,    0x89, 0, 0, 0, 0, 0, false }, "Mov16AR", "[!0r+!1r<<!2d+!3d],!4r" },
-  { kX86Mov16TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0x66, 0x89, 0, 0, 0, 0, 0, false }, "Mov16TR", "fs:[!0d],!1r" },
-  { kX86Mov16RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { 0x66,          0,    0x8B, 0, 0, 0, 0, 0, false }, "Mov16RR", "!0r,!1r" },
-  { kX86Mov16RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0x66,          0,    0x8B, 0, 0, 0, 0, 0, false }, "Mov16RM", "!0r,[!1r+!2d]" },
-  { kX86Mov16RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { 0x66,          0,    0x8B, 0, 0, 0, 0, 0, false }, "Mov16RA", "!0r,[!1r+!2r<<!3d+!4d]" },
-  { kX86Mov16RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, 0x66, 0x8B, 0, 0, 0, 0, 0, false }, "Mov16RT", "!0r,fs:[!1d]" },
-  { kX86Mov16RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { 0x66,          0,    0xB8, 0, 0, 0, 0, 2, false }, "Mov16RI", "!0r,!1d" },
-  { kX86Mov16MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { 0x66,          0,    0xC7, 0, 0, 0, 0, 2, false }, "Mov16MI", "[!0r+!1d],!2d" },
-  { kX86Mov16AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0x66,          0,    0xC7, 0, 0, 0, 0, 2, false }, "Mov16AI", "[!0r+!1r<<!2d+!3d],!4d" },
-  { kX86Mov16TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0x66, 0xC7, 0, 0, 0, 0, 2, false }, "Mov16TI", "fs:[!0d],!1d" },
-
-  { kX86Mov32MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0,             0, 0x89, 0, 0, 0, 0, 0, false }, "Mov32MR", "[!0r+!1d],!2r" },
-  { kX86Mov32AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0,             0, 0x89, 0, 0, 0, 0, 0, false }, "Mov32AR", "[!0r+!1r<<!2d+!3d],!4r" },
-  { kX86Movnti32MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,   { 0,             0, 0x0F, 0xC3, 0, 0, 0, 0, false }, "Movnti32MR", "[!0r+!1d],!2r" },
-  { kX86Movnti32AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,  { 0,             0, 0x0F, 0xC3, 0, 0, 0, 0, false }, "Movnti32AR", "[!0r+!1r<<!2d+!3d],!4r" },
-  { kX86Mov32TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0, 0x89, 0, 0, 0, 0, 0, false }, "Mov32TR", "fs:[!0d],!1r" },
-  { kX86Mov32RR, kRegReg,    IS_MOVE  | IS_BINARY_OP   | REG_DEF0_USE1,  { 0,             0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RR", "!0r,!1r" },
-  { kX86Mov32RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0,             0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RM", "!0r,[!1r+!2d]" },
-  { kX86Mov32RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { 0,             0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
-  { kX86Mov32RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, 0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RT", "!0r,fs:[!1d]" },
-  { kX86Mov32RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { 0,             0, 0xB8, 0, 0, 0, 0, 4, false }, "Mov32RI", "!0r,!1d" },
-  { kX86Mov32MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { 0,             0, 0xC7, 0, 0, 0, 0, 4, false }, "Mov32MI", "[!0r+!1d],!2d" },
-  { kX86Mov32AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0,             0, 0xC7, 0, 0, 0, 0, 4, false }, "Mov32AI", "[!0r+!1r<<!2d+!3d],!4d" },
-  { kX86Mov32TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0, 0xC7, 0, 0, 0, 0, 4, false }, "Mov32TI", "fs:[!0d],!1d" },
-
-  { kX86Lea32RM, kRegMem,               IS_TERTIARY_OP | REG_DEF0_USE1,  { 0,             0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea32RM", "!0r,[!1r+!2d]" },
-  { kX86Lea32RA, kRegArray,             IS_QUIN_OP | REG_DEF0_USE12,     { 0,             0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
-
-  { kX86Mov64MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { REX_W,             0, 0x89, 0, 0, 0, 0, 0, false }, "Mov64MR", "[!0r+!1d],!2r" },
-  { kX86Mov64AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { REX_W,             0, 0x89, 0, 0, 0, 0, 0, false }, "Mov64AR", "[!0r+!1r<<!2d+!3d],!4r" },
-  { kX86Movnti64MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,   { REX_W,             0, 0x0F, 0xC3, 0, 0, 0, 0, false }, "Movnti64MR", "[!0r+!1d],!2r" },
-  { kX86Movnti64AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,  { REX_W,             0, 0x0F, 0xC3, 0, 0, 0, 0, false }, "Movnti64AR", "[!0r+!1r<<!2d+!3d],!4r" },
-  { kX86Mov64TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, REX_W, 0x89, 0, 0, 0, 0, 0, false }, "Mov64TR", "fs:[!0d],!1r" },
-  { kX86Mov64RR, kRegReg,    IS_MOVE  | IS_BINARY_OP   | REG_DEF0_USE1,  { REX_W,             0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RR", "!0r,!1r" },
-  { kX86Mov64RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { REX_W,             0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RM", "!0r,[!1r+!2d]" },
-  { kX86Mov64RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { REX_W,             0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RA", "!0r,[!1r+!2r<<!3d+!4d]" },
-  { kX86Mov64RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, REX_W, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RT", "!0r,fs:[!1d]" },
-  { kX86Mov64RI32, kRegImm,             IS_BINARY_OP   | REG_DEF0,       { REX_W,             0, 0xC7, 0, 0, 0, 0, 4, false }, "Mov64RI32", "!0r,!1d" },
-  { kX86Mov64RI64, kMovRegQuadImm,      IS_TERTIARY_OP | REG_DEF0,       { REX_W,             0, 0xB8, 0, 0, 0, 0, 8, false }, "Mov64RI64", "!0r,!1q" },
-  { kX86Mov64MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { REX_W,             0, 0xC7, 0, 0, 0, 0, 4, false }, "Mov64MI", "[!0r+!1d],!2d" },
-  { kX86Mov64AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { REX_W,             0, 0xC7, 0, 0, 0, 0, 4, false }, "Mov64AI", "[!0r+!1r<<!2d+!3d],!4d" },
-  { kX86Mov64TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, REX_W, 0xC7, 0, 0, 0, 0, 4, false }, "Mov64TI", "fs:[!0d],!1d" },
-
-  { kX86Lea64RM, kRegMem,               IS_TERTIARY_OP | REG_DEF0_USE1,  { REX_W,             0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea64RM", "!0r,[!1r+!2d]" },
-  { kX86Lea64RA, kRegArray,             IS_QUIN_OP | REG_DEF0_USE12,     { REX_W,             0, 0x8D, 0, 0, 0, 0, 0, false }, "Lea64RA", "!0r,[!1r+!2r<<!3d+!4d]" },
-
-  { kX86Cmov32RRC, kRegRegCond, IS_TERTIARY_OP | REG_DEF0_USE01 | USES_CCODES, { 0,     0, 0x0F, 0x40, 0, 0, 0, 0, false }, "Cmovcc32RR", "!2c !0r,!1r" },
-  { kX86Cmov64RRC, kRegRegCond, IS_TERTIARY_OP | REG_DEF0_USE01 | USES_CCODES, { REX_W, 0, 0x0F, 0x40, 0, 0, 0, 0, false }, "Cmovcc64RR", "!2c !0r,!1r" },
-
-  { kX86Cmov32RMC, kRegMemCond, IS_QUAD_OP | IS_LOAD | REG_DEF0_USE01 | USES_CCODES, { 0,     0, 0x0F, 0x40, 0, 0, 0, 0, false }, "Cmovcc32RM", "!3c !0r,[!1r+!2d]" },
-  { kX86Cmov64RMC, kRegMemCond, IS_QUAD_OP | IS_LOAD | REG_DEF0_USE01 | USES_CCODES, { REX_W, 0, 0x0F, 0x40, 0, 0, 0, 0, false }, "Cmovcc64RM", "!3c !0r,[!1r+!2d]" },
-
-#define SHIFT_ENCODING_MAP(opname, modrm_opcode) \
-{ kX86 ## opname ## 8RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { 0,    0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1, true }, #opname "8RI", "!0r,!1d" }, \
-{ kX86 ## opname ## 8MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { 0,    0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1, true }, #opname "8MI", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 8AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { 0,    0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1, true }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 8RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0,    0, 0xD2, 0, 0, modrm_opcode, 0,    1, true }, #opname "8RC", "!0r,cl" }, \
-{ kX86 ## opname ## 8MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { 0,    0, 0xD2, 0, 0, modrm_opcode, 0,    1, true }, #opname "8MC", "[!0r+!1d],cl" }, \
-{ kX86 ## opname ## 8AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0,    0, 0xD2, 0, 0, modrm_opcode, 0,    1, true }, #opname "8AC", "[!0r+!1r<<!2d+!3d],cl" }, \
-  \
-{ kX86 ## opname ## 16RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1, false }, #opname "16RI", "!0r,!1d" }, \
-{ kX86 ## opname ## 16MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1, false }, #opname "16MI", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 16AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1, false }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 16RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0,    1, false }, #opname "16RC", "!0r,cl" }, \
-{ kX86 ## opname ## 16MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0,    1, false }, #opname "16MC", "[!0r+!1d],cl" }, \
-{ kX86 ## opname ## 16AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0,    1, false }, #opname "16AC", "[!0r+!1r<<!2d+!3d],cl" }, \
-  \
-{ kX86 ## opname ## 32RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { 0,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1, false }, #opname "32RI", "!0r,!1d" }, \
-{ kX86 ## opname ## 32MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { 0,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1, false }, #opname "32MI", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 32AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { 0,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1, false }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 32RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0, false }, #opname "32RC", "!0r,cl" }, \
-{ kX86 ## opname ## 32MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0, false }, #opname "32MC", "[!0r+!1d],cl" }, \
-{ kX86 ## opname ## 32AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0, false }, #opname "32AC", "[!0r+!1r<<!2d+!3d],cl" }, \
-  \
-{ kX86 ## opname ## 64RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { REX_W,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1, false }, #opname "64RI", "!0r,!1d" }, \
-{ kX86 ## opname ## 64MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { REX_W,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1, false }, #opname "64MI", "[!0r+!1d],!2d" }, \
-{ kX86 ## opname ## 64AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { REX_W,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1, false }, #opname "64AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
-{ kX86 ## opname ## 64RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { REX_W,    0, 0xD3, 0, 0, modrm_opcode, 0,    0, false }, #opname "64RC", "!0r,cl" }, \
-{ kX86 ## opname ## 64MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { REX_W,    0, 0xD3, 0, 0, modrm_opcode, 0,    0, false }, #opname "64MC", "[!0r+!1d],cl" }, \
-{ kX86 ## opname ## 64AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { REX_W,    0, 0xD3, 0, 0, modrm_opcode, 0,    0, false }, #opname "64AC", "[!0r+!1r<<!2d+!3d],cl" }
-
-  SHIFT_ENCODING_MAP(Rol, 0x0),
-  SHIFT_ENCODING_MAP(Ror, 0x1),
-  SHIFT_ENCODING_MAP(Rcl, 0x2),
-  SHIFT_ENCODING_MAP(Rcr, 0x3),
-  SHIFT_ENCODING_MAP(Sal, 0x4),
-  SHIFT_ENCODING_MAP(Shr, 0x5),
-  SHIFT_ENCODING_MAP(Sar, 0x7),
-#undef SHIFT_ENCODING_MAP
-
-  { kX86Cmc, kNullary, NO_OPERAND, { 0, 0, 0xF5, 0, 0, 0, 0, 0, false }, "Cmc", "" },
-  { kX86Shld32RRI,  kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01  | SETS_CCODES,            { 0,    0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld32RRI", "!0r,!1r,!2d" },
-  { kX86Shld32RRC,  kShiftRegRegCl,  IS_TERTIARY_OP | REG_DEF0_USE01  | REG_USEC | SETS_CCODES, { 0,    0, 0x0F, 0xA5, 0, 0, 0, 0, false }, "Shld32RRC", "!0r,!1r,cl" },
-  { kX86Shld32MRI,  kMemRegImm,      IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { 0,    0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld32MRI", "[!0r+!1d],!2r,!3d" },
-  { kX86Shrd32RRI,  kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01  | SETS_CCODES,            { 0,    0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd32RRI", "!0r,!1r,!2d" },
-  { kX86Shrd32RRC,  kShiftRegRegCl,  IS_TERTIARY_OP | REG_DEF0_USE01  | REG_USEC | SETS_CCODES, { 0,    0, 0x0F, 0xAD, 0, 0, 0, 0, false }, "Shrd32RRC", "!0r,!1r,cl" },
-  { kX86Shrd32MRI,  kMemRegImm,      IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { 0,    0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd32MRI", "[!0r+!1d],!2r,!3d" },
-  { kX86Shld64RRI,  kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01  | SETS_CCODES,            { REX_W,    0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld64RRI", "!0r,!1r,!2d" },
-  { kX86Shld64MRI,  kMemRegImm,      IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { REX_W,    0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld64MRI", "[!0r+!1d],!2r,!3d" },
-  { kX86Shrd64RRI,  kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01  | SETS_CCODES,            { REX_W,    0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd64RRI", "!0r,!1r,!2d" },
-  { kX86Shrd64MRI,  kMemRegImm,      IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { REX_W,    0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd64MRI", "[!0r+!1d],!2r,!3d" },
-
-  { kX86Test8RI,  kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0,     0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8RI", "!0r,!1d" },
-  { kX86Test8MI,  kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0,     0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8MI", "[!0r+!1d],!2d" },
-  { kX86Test8AI,  kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { 0,     0, 0xF6, 0, 0, 0, 0, 1, true }, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" },
-  { kX86Test16RI, kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0x66,  0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16RI", "!0r,!1d" },
-  { kX86Test16MI, kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0x66,  0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16MI", "[!0r+!1d],!2d" },
-  { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { 0x66,  0, 0xF7, 0, 0, 0, 0, 2, false }, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" },
-  { kX86Test32RI, kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0,     0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32RI", "!0r,!1d" },
-  { kX86Test32MI, kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0,     0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32MI", "[!0r+!1d],!2d" },
-  { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { 0,     0, 0xF7, 0, 0, 0, 0, 4, false }, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" },
-  { kX86Test64RI, kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64RI", "!0r,!1d" },
-  { kX86Test64MI, kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64MI", "[!0r+!1d],!2d" },
-  { kX86Test64AI, kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { REX_W, 0, 0xF7, 0, 0, 0, 0, 4, false }, "Test64AI", "[!0r+!1r<<!2d+!3d],!4d" },
-
-  { kX86Test32RR, kRegReg,             IS_BINARY_OP   | REG_USE01 | SETS_CCODES, { 0,     0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RR", "!0r,!1r" },
-  { kX86Test64RR, kRegReg,             IS_BINARY_OP   | REG_USE01 | SETS_CCODES, { REX_W, 0, 0x85, 0, 0, 0, 0, 0, false }, "Test64RR", "!0r,!1r" },
-  { kX86Test32RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | REG_USE01 | SETS_CCODES, { 0,     0, 0x85, 0, 0, 0, 0, 0, false }, "Test32RM", "!0r,[!1r+!2d]" },
-
-#define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \
-                           reg, reg_kind, reg_flags, \
-                           mem, mem_kind, mem_flags, \
-                           arr, arr_kind, arr_flags, imm, \
-                           b_flags, hw_flags, w_flags, \
-                           b_format, hw_format, w_format) \
-{ kX86 ## opname ## 8 ## reg,  reg_kind,                      reg_flags | b_flags  | sets_ccodes, { 0,    0, 0xF6, 0, 0, modrm, 0, imm << 0, true }, #opname "8" #reg, b_format "!0r" }, \
-{ kX86 ## opname ## 8 ## mem,  mem_kind, IS_LOAD | is_store | mem_flags | b_flags  | sets_ccodes, { 0,    0, 0xF6, 0, 0, modrm, 0, imm << 0, true }, #opname "8" #mem, b_format "[!0r+!1d]" }, \
-{ kX86 ## opname ## 8 ## arr,  arr_kind, IS_LOAD | is_store | arr_flags | b_flags  | sets_ccodes, { 0,    0, 0xF6, 0, 0, modrm, 0, imm << 0, true }, #opname "8" #arr, b_format "[!0r+!1r<<!2d+!3d]" }, \
-{ kX86 ## opname ## 16 ## reg, reg_kind,                      reg_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1, false }, #opname "16" #reg, hw_format "!0r" }, \
-{ kX86 ## opname ## 16 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1, false }, #opname "16" #mem, hw_format "[!0r+!1d]" }, \
-{ kX86 ## opname ## 16 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1, false }, #opname "16" #arr, hw_format "[!0r+!1r<<!2d+!3d]" }, \
-{ kX86 ## opname ## 32 ## reg, reg_kind,                      reg_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2, false }, #opname "32" #reg, w_format "!0r" }, \
-{ kX86 ## opname ## 32 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2, false }, #opname "32" #mem, w_format "[!0r+!1d]" }, \
-{ kX86 ## opname ## 32 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2, false }, #opname "32" #arr, w_format "[!0r+!1r<<!2d+!3d]" }, \
-{ kX86 ## opname ## 64 ## reg, reg_kind,                      reg_flags | w_flags  | sets_ccodes, { REX_W, 0, 0xF7, 0, 0, modrm, 0, imm << 2, false }, #opname "64" #reg, w_format "!0r" }, \
-{ kX86 ## opname ## 64 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | w_flags  | sets_ccodes, { REX_W, 0, 0xF7, 0, 0, modrm, 0, imm << 2, false }, #opname "64" #mem, w_format "[!0r+!1d]" }, \
-{ kX86 ## opname ## 64 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | w_flags  | sets_ccodes, { REX_W, 0, 0xF7, 0, 0, modrm, 0, imm << 2, false }, #opname "64" #arr, w_format "[!0r+!1r<<!2d+!3d]" }
-
-  UNARY_ENCODING_MAP(Not, 0x2, IS_STORE, 0,           R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""),
-  UNARY_ENCODING_MAP(Neg, 0x3, IS_STORE, SETS_CCODES, R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""),
-
-  UNARY_ENCODING_MAP(Mul,     0x4, 0, SETS_CCODES, DaR, kReg, IS_UNARY_OP | REG_USE0, DaM, kMem, IS_BINARY_OP | REG_USE0, DaA, kArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA,  REG_DEFAD_USEA,  "ax,al,", "dx:ax,ax,", "edx:eax,eax,"),
-  UNARY_ENCODING_MAP(Imul,    0x5, 0, SETS_CCODES, DaR, kReg, IS_UNARY_OP | REG_USE0, DaM, kMem, IS_BINARY_OP | REG_USE0, DaA, kArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA,  REG_DEFAD_USEA,  "ax,al,", "dx:ax,ax,", "edx:eax,eax,"),
-  UNARY_ENCODING_MAP(Divmod,  0x6, 0, SETS_CCODES, DaR, kReg, IS_UNARY_OP | REG_USE0, DaM, kMem, IS_BINARY_OP | REG_USE0, DaA, kArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
-  UNARY_ENCODING_MAP(Idivmod, 0x7, 0, SETS_CCODES, DaR, kReg, IS_UNARY_OP | REG_USE0, DaM, kMem, IS_BINARY_OP | REG_USE0, DaA, kArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
-#undef UNARY_ENCODING_MAP
-
-  { kx86Cdq32Da, kRegOpcode, NO_OPERAND | REG_DEFAD_USEA,                                  { 0,     0, 0x99, 0,    0, 0, 0, 0, false }, "Cdq", "" },
-  { kx86Cqo64Da, kRegOpcode, NO_OPERAND | REG_DEFAD_USEA,                                  { REX_W, 0, 0x99, 0,    0, 0, 0, 0, false }, "Cqo", "" },
-  { kX86Bswap32R, kRegOpcode, IS_UNARY_OP | REG_DEF0_USE0,                                 { 0,     0, 0x0F, 0xC8, 0, 0, 0, 0, false }, "Bswap32R", "!0r" },
-  { kX86Bswap64R, kRegOpcode, IS_UNARY_OP | REG_DEF0_USE0,                                 { REX_W, 0, 0x0F, 0xC8, 0, 0, 0, 0, false }, "Bswap64R", "!0r" },
-  { kX86Push32R,  kRegOpcode, IS_UNARY_OP | REG_USE0 | REG_USE_SP | REG_DEF_SP | IS_STORE, { 0,     0, 0x50, 0,    0, 0, 0, 0, false }, "Push32R",  "!0r" },
-  { kX86Pop32R,   kRegOpcode, IS_UNARY_OP | REG_DEF0 | REG_USE_SP | REG_DEF_SP | IS_LOAD,  { 0,     0, 0x58, 0,    0, 0, 0, 0, false }, "Pop32R",   "!0r" },
-
-#define EXT_0F_ENCODING_MAP(opname, prefix, opcode, reg_def) \
-{ kX86 ## opname ## RR, kRegReg,             IS_BINARY_OP   | reg_def | REG_USE1,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RR", "!0r,!1r" }, \
-{ kX86 ## opname ## RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE1,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RM", "!0r,[!1r+!2d]" }, \
-{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE12, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
-
-// This is a special encoding with r8_form on the second register only
-// for Movzx8 and Movsx8.
-#define EXT_0F_R8_FORM_ENCODING_MAP(opname, prefix, opcode, reg_def) \
-{ kX86 ## opname ## RR, kRegReg,             IS_BINARY_OP   | reg_def | REG_USE1,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, true }, #opname "RR", "!0r,!1r" }, \
-{ kX86 ## opname ## RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE1,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RM", "!0r,[!1r+!2d]" }, \
-{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE12, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
-
-#define EXT_0F_REX_W_ENCODING_MAP(opname, prefix, opcode, reg_def) \
-{ kX86 ## opname ## RR, kRegReg,             IS_BINARY_OP   | reg_def | REG_USE1,  { prefix, REX_W, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RR", "!0r,!1r" }, \
-{ kX86 ## opname ## RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE1,  { prefix, REX_W, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RM", "!0r,[!1r+!2d]" }, \
-{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE12, { prefix, REX_W, 0x0F, opcode, 0, 0, 0, 0, false }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
-
-#define EXT_0F_ENCODING2_MAP(opname, prefix, opcode, opcode2, reg_def) \
-{ kX86 ## opname ## RR, kRegReg,             IS_BINARY_OP   | reg_def | REG_USE1,  { prefix, 0, 0x0F, opcode, opcode2, 0, 0, 0, false }, #opname "RR", "!0r,!1r" }, \
-{ kX86 ## opname ## RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE1,  { prefix, 0, 0x0F, opcode, opcode2, 0, 0, 0, false }, #opname "RM", "!0r,[!1r+!2d]" }, \
-{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE12, { prefix, 0, 0x0F, opcode, opcode2, 0, 0, 0, false }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
-
-  EXT_0F_ENCODING_MAP(Movsd, 0xF2, 0x10, REG_DEF0),
-  { kX86MovsdMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0, false }, "MovsdMR", "[!0r+!1d],!2r" },
-  { kX86MovsdAR, kArrayReg, IS_STORE | IS_QUIN_OP     | REG_USE014, { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0, false }, "MovsdAR", "[!0r+!1r<<!2d+!3d],!4r" },
-
-  EXT_0F_ENCODING_MAP(Movss, 0xF3, 0x10, REG_DEF0),
-  { kX86MovssMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0, false }, "MovssMR", "[!0r+!1d],!2r" },
-  { kX86MovssAR, kArrayReg, IS_STORE | IS_QUIN_OP     | REG_USE014, { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0, false }, "MovssAR", "[!0r+!1r<<!2d+!3d],!4r" },
-
-  EXT_0F_ENCODING_MAP(Cvtsi2sd,  0xF2, 0x2A, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Cvtsi2ss,  0xF3, 0x2A, REG_DEF0),
-  EXT_0F_REX_W_ENCODING_MAP(Cvtsqi2sd,  0xF2, 0x2A, REG_DEF0),
-  EXT_0F_REX_W_ENCODING_MAP(Cvtsqi2ss,  0xF3, 0x2A, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Cvttsd2si, 0xF2, 0x2C, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Cvttss2si, 0xF3, 0x2C, REG_DEF0),
-  EXT_0F_REX_W_ENCODING_MAP(Cvttsd2sqi, 0xF2, 0x2C, REG_DEF0),
-  EXT_0F_REX_W_ENCODING_MAP(Cvttss2sqi, 0xF3, 0x2C, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Cvtsd2si,  0xF2, 0x2D, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Cvtss2si,  0xF3, 0x2D, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Ucomisd,   0x66, 0x2E, SETS_CCODES|REG_USE0),
-  EXT_0F_ENCODING_MAP(Ucomiss,   0x00, 0x2E, SETS_CCODES|REG_USE0),
-  EXT_0F_ENCODING_MAP(Comisd,    0x66, 0x2F, SETS_CCODES|REG_USE0),
-  EXT_0F_ENCODING_MAP(Comiss,    0x00, 0x2F, SETS_CCODES|REG_USE0),
-  EXT_0F_ENCODING_MAP(Orpd,      0x66, 0x56, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Orps,      0x00, 0x56, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Andpd,     0x66, 0x54, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Andps,     0x00, 0x54, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Xorpd,     0x66, 0x57, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Xorps,     0x00, 0x57, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Addsd,     0xF2, 0x58, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Addss,     0xF3, 0x58, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Mulsd,     0xF2, 0x59, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Mulss,     0xF3, 0x59, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Cvtsd2ss,  0xF2, 0x5A, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Cvtss2sd,  0xF3, 0x5A, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Subsd,     0xF2, 0x5C, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Subss,     0xF3, 0x5C, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Divsd,     0xF2, 0x5E, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Divss,     0xF3, 0x5E, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Punpcklbw, 0x66, 0x60, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Punpcklwd, 0x66, 0x61, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Punpckldq, 0x66, 0x62, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Punpcklqdq, 0x66, 0x6C, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Sqrtsd,    0xF2, 0x51, REG_DEF0_USE0),
-  EXT_0F_ENCODING2_MAP(Pmulld,   0x66, 0x38, 0x40, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Pmullw,    0x66, 0xD5, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Pmuludq,   0x66, 0xF4, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Mulps,     0x00, 0x59, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Mulpd,     0x66, 0x59, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Paddb,     0x66, 0xFC, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Paddw,     0x66, 0xFD, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Paddd,     0x66, 0xFE, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Paddq,     0x66, 0xD4, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Psadbw,    0x66, 0xF6, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Addps,     0x00, 0x58, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Addpd,     0x66, 0x58, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Psubb,     0x66, 0xF8, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Psubw,     0x66, 0xF9, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Psubd,     0x66, 0xFA, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Psubq,     0x66, 0xFB, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Subps,     0x00, 0x5C, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Subpd,     0x66, 0x5C, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Pand,      0x66, 0xDB, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Por,       0x66, 0xEB, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Pxor,      0x66, 0xEF, REG_DEF0_USE0),
-  EXT_0F_ENCODING2_MAP(Phaddw,   0x66, 0x38, 0x01, REG_DEF0_USE0),
-  EXT_0F_ENCODING2_MAP(Phaddd,   0x66, 0x38, 0x02, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Haddpd,    0x66, 0x7C, REG_DEF0_USE0),
-  EXT_0F_ENCODING_MAP(Haddps,    0xF2, 0x7C, REG_DEF0_USE0),
-
-  { kX86PextrbRRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0  | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x14, 0, 0, 1, false }, "PextbRRI", "!0r,!1r,!2d" },
-  { kX86PextrwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0  | REG_USE1, { 0x66, 0, 0x0F, 0xC5, 0x00, 0, 0, 1, false }, "PextwRRI", "!0r,!1r,!2d" },
-  { kX86PextrdRRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0  | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextdRRI", "!0r,!1r,!2d" },
-  { kX86PextrbMRI, kMemRegImm, IS_QUAD_OP     | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrbMRI", "[!0r+!1d],!2r,!3d" },
-  { kX86PextrwMRI, kMemRegImm, IS_QUAD_OP     | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x15, 0, 0, 1, false }, "PextrwMRI", "[!0r+!1d],!2r,!3d" },
-  { kX86PextrdMRI, kMemRegImm, IS_QUAD_OP     | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrdMRI", "[!0r+!1d],!2r,!3d" },
-
-  { kX86PshuflwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0xF2, 0, 0x0F, 0x70, 0, 0, 0, 1, false }, "PshuflwRRI", "!0r,!1r,!2d" },
-  { kX86PshufdRRI,  kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x70, 0, 0, 0, 1, false }, "PshuffRRI", "!0r,!1r,!2d" },
-
-  { kX86ShufpsRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE0 | REG_USE1, { 0x00, 0, 0x0F, 0xC6, 0, 0, 0, 1, false }, "ShufpsRRI", "!0r,!1r,!2d" },
-  { kX86ShufpdRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE0 | REG_USE1, { 0x66, 0, 0x0F, 0xC6, 0, 0, 0, 1, false }, "ShufpdRRI", "!0r,!1r,!2d" },
-
-  { kX86PsrawRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x71, 0, 4, 0, 1, false }, "PsrawRI", "!0r,!1d" },
-  { kX86PsradRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x72, 0, 4, 0, 1, false }, "PsradRI", "!0r,!1d" },
-  { kX86PsrlwRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x71, 0, 2, 0, 1, false }, "PsrlwRI", "!0r,!1d" },
-  { kX86PsrldRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x72, 0, 2, 0, 1, false }, "PsrldRI", "!0r,!1d" },
-  { kX86PsrlqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 2, 0, 1, false }, "PsrlqRI", "!0r,!1d" },
-  { kX86PsrldqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 3, 0, 1, false }, "PsrldqRI", "!0r,!1d" },
-  { kX86PsllwRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x71, 0, 6, 0, 1, false }, "PsllwRI", "!0r,!1d" },
-  { kX86PslldRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x72, 0, 6, 0, 1, false }, "PslldRI", "!0r,!1d" },
-  { kX86PsllqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 6, 0, 1, false }, "PsllqRI", "!0r,!1d" },
-
-  { kX86Fild32M,  kMem,     IS_LOAD    | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xDB, 0x00, 0, 0, 0, 0, false }, "Fild32M",  "[!0r,!1d]" },
-  { kX86Fild64M,  kMem,     IS_LOAD    | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xDF, 0x00, 0, 5, 0, 0, false }, "Fild64M",  "[!0r,!1d]" },
-  { kX86Fld32M,   kMem,     IS_LOAD    | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xD9, 0x00, 0, 0, 0, 0, false }, "Fld32M",   "[!0r,!1d]" },
-  { kX86Fld64M,   kMem,     IS_LOAD    | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xDD, 0x00, 0, 0, 0, 0, false }, "Fld64M",   "[!0r,!1d]" },
-  { kX86Fstp32M,  kMem,     IS_STORE   | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xD9, 0x00, 0, 3, 0, 0, false }, "Fstps32M", "[!0r,!1d]" },
-  { kX86Fstp64M,  kMem,     IS_STORE   | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xDD, 0x00, 0, 3, 0, 0, false }, "Fstpd64M", "[!0r,!1d]" },
-  { kX86Fst32M,   kMem,     IS_STORE   | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xD9, 0x00, 0, 2, 0, 0, false }, "Fsts32M",  "[!0r,!1d]" },
-  { kX86Fst64M,   kMem,     IS_STORE   | IS_BINARY_OP | REG_USE0 | USE_FP_STACK, { 0x0,  0,    0xDD, 0x00, 0, 2, 0, 0, false }, "Fstd64M",  "[!0r,!1d]" },
-  { kX86Fprem,    kNullary, NO_OPERAND | USE_FP_STACK,                          { 0xD9, 0,    0xF8, 0,    0, 0, 0, 0, false }, "Fprem64",  "" },
-  { kX86Fucompp,  kNullary, NO_OPERAND | USE_FP_STACK,                          { 0xDA, 0,    0xE9, 0,    0, 0, 0, 0, false }, "Fucompp",  "" },
-  { kX86Fstsw16R, kNullary, NO_OPERAND | REG_DEFA | USE_FP_STACK,               { 0x9B, 0xDF, 0xE0, 0,    0, 0, 0, 0, false }, "Fstsw16R", "ax" },
-
-  EXT_0F_ENCODING_MAP(Movdqa,    0x66, 0x6F, REG_DEF0),
-  { kX86MovdqaMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0x66, 0, 0x0F, 0x6F, 0, 0, 0, 0, false }, "MovdqaMR", "[!0r+!1d],!2r" },
-  { kX86MovdqaAR, kArrayReg, IS_STORE | IS_QUIN_OP     | REG_USE014, { 0x66, 0, 0x0F, 0x6F, 0, 0, 0, 0, false }, "MovdqaAR", "[!0r+!1r<<!2d+!3d],!4r" },
-
-
-  EXT_0F_ENCODING_MAP(Movups,    0x0, 0x10, REG_DEF0),
-  { kX86MovupsMR, kMemReg,      IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0x0, 0, 0x0F, 0x11, 0, 0, 0, 0, false }, "MovupsMR", "[!0r+!1d],!2r" },
-  { kX86MovupsAR, kArrayReg,    IS_STORE | IS_QUIN_OP     | REG_USE014, { 0x0, 0, 0x0F, 0x11, 0, 0, 0, 0, false }, "MovupsAR", "[!0r+!1r<<!2d+!3d],!4r" },
-
-  EXT_0F_ENCODING_MAP(Movaps,    0x0, 0x28, REG_DEF0),
-  { kX86MovapsMR, kMemReg,      IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0x0, 0, 0x0F, 0x29, 0, 0, 0, 0, false }, "MovapsMR", "[!0r+!1d],!2r" },
-  { kX86MovapsAR, kArrayReg,    IS_STORE | IS_QUIN_OP     | REG_USE014, { 0x0, 0, 0x0F, 0x29, 0, 0, 0, 0, false }, "MovapsAR", "[!0r+!1r<<!2d+!3d],!4r" },
-
-  { kX86MovlpsRM, kRegMem,      IS_LOAD | IS_TERTIARY_OP | REG_DEF0 | REG_USE01,  { 0x0, 0, 0x0F, 0x12, 0, 0, 0, 0, false }, "MovlpsRM", "!0r,[!1r+!2d]" },
-  { kX86MovlpsRA, kRegArray,    IS_LOAD | IS_QUIN_OP     | REG_DEF0 | REG_USE012, { 0x0, 0, 0x0F, 0x12, 0, 0, 0, 0, false }, "MovlpsRA", "!0r,[!1r+!2r<<!3d+!4d]" },
-  { kX86MovlpsMR, kMemReg,      IS_STORE | IS_TERTIARY_OP | REG_USE02,            { 0x0, 0, 0x0F, 0x13, 0, 0, 0, 0, false }, "MovlpsMR", "[!0r+!1d],!2r" },
-  { kX86MovlpsAR, kArrayReg,    IS_STORE | IS_QUIN_OP     | REG_USE014,           { 0x0, 0, 0x0F, 0x13, 0, 0, 0, 0, false }, "MovlpsAR", "[!0r+!1r<<!2d+!3d],!4r" },
-
-  { kX86MovhpsRM, kRegMem,      IS_LOAD | IS_TERTIARY_OP | REG_DEF0 | REG_USE01,  { 0x0, 0, 0x0F, 0x16, 0, 0, 0, 0, false }, "MovhpsRM", "!0r,[!1r+!2d]" },
-  { kX86MovhpsRA, kRegArray,    IS_LOAD | IS_QUIN_OP     | REG_DEF0 | REG_USE012, { 0x0, 0, 0x0F, 0x16, 0, 0, 0, 0, false }, "MovhpsRA", "!0r,[!1r+!2r<<!3d+!4d]" },
-  { kX86MovhpsMR, kMemReg,      IS_STORE | IS_TERTIARY_OP | REG_USE02,            { 0x0, 0, 0x0F, 0x17, 0, 0, 0, 0, false }, "MovhpsMR", "[!0r+!1d],!2r" },
-  { kX86MovhpsAR, kArrayReg,    IS_STORE | IS_QUIN_OP     | REG_USE014,           { 0x0, 0, 0x0F, 0x17, 0, 0, 0, 0, false }, "MovhpsAR", "[!0r+!1r<<!2d+!3d],!4r" },
-
-  EXT_0F_ENCODING_MAP(Movdxr,    0x66, 0x6E, REG_DEF0),
-  EXT_0F_REX_W_ENCODING_MAP(Movqxr, 0x66, 0x6E, REG_DEF0),
-  { kX86MovqrxRR, kRegRegStore, IS_BINARY_OP | REG_DEF0   | REG_USE1,   { 0x66, REX_W, 0x0F, 0x7E, 0, 0, 0, 0, false }, "MovqrxRR", "!0r,!1r" },
-  { kX86MovqrxMR, kMemReg,      IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0x66, REX_W, 0x0F, 0x7E, 0, 0, 0, 0, false }, "MovqrxMR", "[!0r+!1d],!2r" },
-  { kX86MovqrxAR, kArrayReg,    IS_STORE | IS_QUIN_OP     | REG_USE014, { 0x66, REX_W, 0x0F, 0x7E, 0, 0, 0, 0, false }, "MovqrxAR", "[!0r+!1r<<!2d+!3d],!4r" },
-
-  { kX86MovdrxRR, kRegRegStore, IS_BINARY_OP | REG_DEF0   | REG_USE1,   { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0, false }, "MovdrxRR", "!0r,!1r" },
-  { kX86MovdrxMR, kMemReg,      IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0, false }, "MovdrxMR", "[!0r+!1d],!2r" },
-  { kX86MovdrxAR, kArrayReg,    IS_STORE | IS_QUIN_OP     | REG_USE014, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0, false }, "MovdrxAR", "[!0r+!1r<<!2d+!3d],!4r" },
-
-  { kX86MovsxdRR, kRegReg,      IS_BINARY_OP | REG_DEF0 | REG_USE1,              { REX_W, 0, 0x63, 0, 0, 0, 0, 0, false }, "MovsxdRR", "!0r,!1r" },
-  { kX86MovsxdRM, kRegMem,      IS_LOAD | IS_TERTIARY_OP | REG_DEF0 | REG_USE1,  { REX_W, 0, 0x63, 0, 0, 0, 0, 0, false }, "MovsxdRM", "!0r,[!1r+!2d]" },
-  { kX86MovsxdRA, kRegArray,    IS_LOAD | IS_QUIN_OP     | REG_DEF0 | REG_USE12, { REX_W, 0, 0x63, 0, 0, 0, 0, 0, false }, "MovsxdRA", "!0r,[!1r+!2r<<!3d+!4d]" },
-
-  { kX86Set8R, kRegCond,   IS_BINARY_OP | REG_DEF0   | REG_USE0  | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, true  }, "Set8R", "!1c !0r" },
-  { kX86Set8M, kMemCond,   IS_STORE | IS_TERTIARY_OP | REG_USE0  | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, false }, "Set8M", "!2c [!0r+!1d]" },
-  { kX86Set8A, kArrayCond, IS_STORE | IS_QUIN_OP     | REG_USE01 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0, false }, "Set8A", "!4c [!0r+!1r<<!2d+!3d]" },
-
-  // TODO: load/store?
-  // Encode the modrm opcode as an extra opcode byte to avoid computation during assembly.
-  { kX86Lfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 5, 0, 0, false }, "Lfence", "" },
-  { kX86Mfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 6, 0, 0, false }, "Mfence", "" },
-  { kX86Sfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 7, 0, 0, false }, "Sfence", "" },
-  { kX86LockAdd32MI8, kMemImm,        IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0xF0, 0, 0x83, 0x0, 0x0, 0, 0, 1, false }, "LockAdd32MI8", "[!0r+!1d],!2d" },
-
-  EXT_0F_ENCODING_MAP(Imul16,  0x66, 0xAF, REG_USE0 | REG_DEF0 | SETS_CCODES),
-  EXT_0F_ENCODING_MAP(Imul32,  0x00, 0xAF, REG_USE0 | REG_DEF0 | SETS_CCODES),
-  EXT_0F_ENCODING_MAP(Imul64,  REX_W, 0xAF, REG_USE0 | REG_DEF0 | SETS_CCODES),
-
-  { kX86CmpxchgRR, kRegRegStore,  IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES,   { 0,    0, 0x0F, 0xB1, 0, 0, 0, 0, false }, "Cmpxchg", "!0r,!1r" },
-  { kX86CmpxchgMR, kMemReg,       IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0,    0, 0x0F, 0xB1, 0, 0, 0, 0, false }, "Cmpxchg", "[!0r+!1d],!2r" },
-  { kX86CmpxchgAR, kArrayReg,     IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES,    { 0,    0, 0x0F, 0xB1, 0, 0, 0, 0, false }, "Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
-  { kX86LockCmpxchgMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0, false }, "Lock Cmpxchg", "[!0r+!1d],!2r" },
-  { kX86LockCmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES,    { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0, false }, "Lock Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
-  { kX86LockCmpxchg64AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES,    { 0xF0, REX_W, 0x0F, 0xB1, 0, 0, 0, 0, false }, "Lock Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
-  { kX86LockCmpxchg64M, kMem,     IS_STORE | IS_BINARY_OP | REG_USE0 | REG_DEFAD_USEAD | REG_USEC | REG_USEB | SETS_CCODES, { 0xF0, 0, 0x0F, 0xC7, 0, 1, 0, 0, false }, "Lock Cmpxchg8b", "[!0r+!1d]" },
-  { kX86LockCmpxchg64A, kArray,   IS_STORE | IS_QUAD_OP | REG_USE01 | REG_DEFAD_USEAD | REG_USEC | REG_USEB | SETS_CCODES,  { 0xF0, 0, 0x0F, 0xC7, 0, 1, 0, 0, false }, "Lock Cmpxchg8b", "[!0r+!1r<<!2d+!3d]" },
-  { kX86XchgMR, kMemReg,          IS_STORE | IS_LOAD | IS_TERTIARY_OP | REG_DEF2 | REG_USE02,          { 0, 0, 0x87, 0, 0, 0, 0, 0, false }, "Xchg", "[!0r+!1d],!2r" },
-
-  EXT_0F_R8_FORM_ENCODING_MAP(Movzx8,  0x00, 0xB6, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Movzx16, 0x00, 0xB7, REG_DEF0),
-  EXT_0F_R8_FORM_ENCODING_MAP(Movsx8,  0x00, 0xBE, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Movsx16, 0x00, 0xBF, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Movzx8q,  REX_W, 0xB6, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Movzx16q, REX_W, 0xB7, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Movsx8q,  REX, 0xBE, REG_DEF0),
-  EXT_0F_ENCODING_MAP(Movsx16q, REX_W, 0xBF, REG_DEF0),
-#undef EXT_0F_ENCODING_MAP
-
-  { kX86Jcc8,  kJcc,  IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0,             0, 0x70, 0,    0, 0, 0, 0, false }, "Jcc8",  "!1c !0t" },
-  { kX86Jcc32, kJcc,  IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0,             0, 0x0F, 0x80, 0, 0, 0, 0, false }, "Jcc32", "!1c !0t" },
-  { kX86Jmp8,  kJmp,  IS_UNARY_OP  | IS_BRANCH | NEEDS_FIXUP,               { 0,             0, 0xEB, 0,    0, 0, 0, 0, false }, "Jmp8",  "!0t" },
-  { kX86Jmp32, kJmp,  IS_UNARY_OP  | IS_BRANCH | NEEDS_FIXUP,               { 0,             0, 0xE9, 0,    0, 0, 0, 0, false }, "Jmp32", "!0t" },
-  { kX86JmpR,  kJmp,  IS_UNARY_OP  | IS_BRANCH | REG_USE0,                  { 0,             0, 0xFF, 0,    0, 4, 0, 0, false }, "JmpR",  "!0r" },
-  { kX86Jecxz8, kJmp, NO_OPERAND   | IS_BRANCH | NEEDS_FIXUP | REG_USEC,    { 0,             0, 0xE3, 0,    0, 0, 0, 0, false }, "Jecxz", "!0t" },
-  { kX86JmpT,  kJmp,  IS_UNARY_OP  | IS_BRANCH | IS_LOAD,                   { THREAD_PREFIX, 0, 0xFF, 0,    0, 4, 0, 0, false }, "JmpT",  "fs:[!0d]" },
-  { kX86CallR, kCall, IS_UNARY_OP  | IS_BRANCH | REG_USE0,                  { 0,             0, 0xE8, 0,    0, 0, 0, 0, false }, "CallR", "!0r" },
-  { kX86CallM, kCall, IS_BINARY_OP | IS_BRANCH | IS_LOAD | REG_USE0,        { 0,             0, 0xFF, 0,    0, 2, 0, 0, false }, "CallM", "[!0r+!1d]" },
-  { kX86CallA, kCall, IS_QUAD_OP   | IS_BRANCH | IS_LOAD | REG_USE01,       { 0,             0, 0xFF, 0,    0, 2, 0, 0, false }, "CallA", "[!0r+!1r<<!2d+!3d]" },
-  { kX86CallT, kCall, IS_UNARY_OP  | IS_BRANCH | IS_LOAD,                   { THREAD_PREFIX, 0, 0xFF, 0,    0, 2, 0, 0, false }, "CallT", "fs:[!0d]" },
-  { kX86CallI, kCall, IS_UNARY_OP  | IS_BRANCH,                             { 0,             0, 0xE8, 0,    0, 0, 0, 4, false }, "CallI", "!0d" },
-  { kX86Ret,   kNullary, NO_OPERAND | IS_BRANCH,                            { 0,             0, 0xC3, 0,    0, 0, 0, 0, false }, "Ret", "" },
-
-  { kX86PcRelLoadRA,   kPcRel,  IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0, false }, "PcRelLoadRA",   "!0r,[!1r+!2r<<!3d+!4p]" },
-  { kX86PcRelAdr,      kPcRel,  IS_LOAD | IS_BINARY_OP | REG_DEF0,     { 0, 0, 0xB8, 0, 0, 0, 0, 4, false }, "PcRelAdr",      "!0r,!1p" },
-  { kX86RepneScasw,    kNullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0, false }, "RepNE ScasW", "" },
-};
-
-std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs) {
-  os << X86Mir2Lir::EncodingMap[rhs].name;
-  return os;
-}
-
-static bool NeedsRex(int32_t raw_reg) {
-  return raw_reg != kRIPReg && RegStorage::RegNum(raw_reg) > 7;
-}
-
-static uint8_t LowRegisterBits(int32_t raw_reg) {
-  uint8_t low_reg = RegStorage::RegNum(raw_reg) & kRegNumMask32;  // 3 bits
-  DCHECK_LT(low_reg, 8);
-  return low_reg;
-}
-
-static bool HasModrm(const X86EncodingMap* entry) {
-  switch (entry->kind) {
-    case kNullary: return false;
-    case kRegOpcode: return false;
-    default: return true;
-  }
-}
-
-static bool HasSib(const X86EncodingMap* entry) {
-  switch (entry->kind) {
-    case kArray: return true;
-    case kArrayReg: return true;
-    case kRegArray: return true;
-    case kArrayImm: return true;
-    case kRegArrayImm: return true;
-    case kShiftArrayImm: return true;
-    case kShiftArrayCl: return true;
-    case kArrayCond: return true;
-    case kCall:
-      switch (entry->opcode) {
-        case kX86CallA: return true;
-        default: return false;
-      }
-    case kPcRel:
-       switch (entry->opcode) {
-         case kX86PcRelLoadRA: return true;
-         default: return false;
-        }
-    default: return false;
-  }
-}
-
-static bool ModrmIsRegReg(const X86EncodingMap* entry) {
-  switch (entry->kind) {
-    // There is no modrm for this kind of instruction, therefore the reg doesn't form part of the
-    // modrm:
-    case kNullary: return true;
-    case kRegOpcode: return true;
-    case kMovRegImm: return true;
-    // Regular modrm value of 3 cases, when there is one register the other register holds an
-    // opcode so the base register is special.
-    case kReg: return true;
-    case kRegReg: return true;
-    case kRegRegStore: return true;
-    case kRegImm: return true;
-    case kRegRegImm: return true;
-    case kRegRegImmStore: return true;
-    case kShiftRegImm: return true;
-    case kShiftRegCl: return true;
-    case kRegCond: return true;
-    case kRegRegCond: return true;
-    case kShiftRegRegCl: return true;
-    case kJmp:
-      switch (entry->opcode) {
-        case kX86JmpR: return true;
-        default: return false;
-      }
-    case kCall:
-      switch (entry->opcode) {
-        case kX86CallR: return true;
-        default: return false;
-      }
-    default: return false;
-  }
-}
-
-static bool IsByteSecondOperand(const X86EncodingMap* entry) {
-  return StartsWith(entry->name, "Movzx8") || StartsWith(entry->name, "Movsx8");
-}
-
-size_t X86Mir2Lir::ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
-                               int32_t raw_base, int32_t displacement) {
-  bool has_modrm = HasModrm(entry);
-  bool has_sib = HasSib(entry);
-  bool r8_form = entry->skeleton.r8_form;
-  bool modrm_is_reg_reg = ModrmIsRegReg(entry);
-  if (has_sib) {
-    DCHECK(!modrm_is_reg_reg);
-  }
-  size_t size = 0;
-  if (entry->skeleton.prefix1 > 0) {
-    ++size;
-    if (entry->skeleton.prefix2 > 0) {
-      ++size;
-    }
-  }
-  if (cu_->target64 || kIsDebugBuild) {
-    bool registers_need_rex_prefix = NeedsRex(raw_reg) || NeedsRex(raw_index) || NeedsRex(raw_base);
-    if (r8_form) {
-      // Do we need an empty REX prefix to normalize byte registers?
-      registers_need_rex_prefix = registers_need_rex_prefix ||
-          (RegStorage::RegNum(raw_reg) >= 4 && !IsByteSecondOperand(entry));
-      registers_need_rex_prefix = registers_need_rex_prefix ||
-          (modrm_is_reg_reg && (RegStorage::RegNum(raw_base) >= 4));
-    }
-    if (registers_need_rex_prefix) {
-      DCHECK(cu_->target64) << "Attempt to use a 64-bit only addressable register "
-          << RegStorage::RegNum(raw_reg) << " with instruction " << entry->name;
-      if (entry->skeleton.prefix1 != REX_W && entry->skeleton.prefix2 != REX_W
-         && entry->skeleton.prefix1 != REX && entry->skeleton.prefix2 != REX) {
-        ++size;  // rex
-      }
-    }
-  }
-  ++size;  // opcode
-  if (entry->skeleton.opcode == 0x0F) {
-    ++size;
-    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
-      ++size;
-    }
-  }
-  if (has_modrm) {
-    ++size;  // modrm
-  }
-  if (!modrm_is_reg_reg) {
-    if (has_sib || (LowRegisterBits(raw_base) == rs_rX86_SP_32.GetRegNum())
-        || (cu_->target64 && entry->skeleton.prefix1 == THREAD_PREFIX)) {
-      // SP requires a SIB byte.
-      // GS access also needs a SIB byte for absolute adressing in 64-bit mode.
-      ++size;
-    }
-    if (displacement != 0 || LowRegisterBits(raw_base) == rs_rBP.GetRegNum()) {
-      // BP requires an explicit displacement, even when it's 0.
-      if (entry->opcode != kX86Lea32RA && entry->opcode != kX86Lea64RA &&
-          entry->opcode != kX86Lea32RM && entry->opcode != kX86Lea64RM) {
-        DCHECK_NE(entry->flags & (IS_LOAD | IS_STORE), UINT64_C(0)) << entry->name;
-      }
-      if (raw_base == kRIPReg) {
-        DCHECK(cu_->target64) <<
-          "Attempt to use a 64-bit RIP adressing with instruction " << entry->name;
-        size += 4;
-      } else {
-        size += IS_SIMM8(displacement) ? 1 : 4;
-      }
-    }
-  }
-  size += entry->skeleton.immediate_bytes;
-  return size;
-}
-
-size_t X86Mir2Lir::GetInsnSize(LIR* lir) {
-  DCHECK(!IsPseudoLirOp(lir->opcode));
-  const X86EncodingMap* entry = &X86Mir2Lir::EncodingMap[lir->opcode];
-  DCHECK_EQ(entry->opcode, lir->opcode) << entry->name;
-
-  switch (entry->kind) {
-    case kData:
-      return 4;  // 4 bytes of data.
-    case kNop:
-      return lir->operands[0];  // Length of nop is sole operand.
-    case kNullary:
-      return ComputeSize(entry, NO_REG, NO_REG, NO_REG, 0);
-    case kRegOpcode:  // lir operands - 0: reg
-      return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], 0);
-    case kReg:  // lir operands - 0: reg
-      return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], 0);
-    case kMem:  // lir operands - 0: base, 1: disp
-      return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], lir->operands[1]);
-    case kArray:  // lir operands - 0: base, 1: index, 2: scale, 3: disp
-      return ComputeSize(entry, NO_REG, lir->operands[1], lir->operands[0], lir->operands[3]);
-    case kMemReg:  // lir operands - 0: base, 1: disp, 2: reg
-      return ComputeSize(entry, lir->operands[2], NO_REG, lir->operands[0], lir->operands[1]);
-    case kMemRegImm:  // lir operands - 0: base, 1: disp, 2: reg 3: immediate
-      return ComputeSize(entry, lir->operands[2], NO_REG, lir->operands[0], lir->operands[1]);
-    case kArrayReg:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
-      return ComputeSize(entry, lir->operands[4], lir->operands[1], lir->operands[0],
-                         lir->operands[3]);
-    case kThreadReg:  // lir operands - 0: disp, 1: reg
-      // Thread displacement size is always 32bit.
-      return ComputeSize(entry, lir->operands[1], NO_REG, NO_REG, 0x12345678);
-    case kRegReg:  // lir operands - 0: reg1, 1: reg2
-      return ComputeSize(entry, lir->operands[0], NO_REG, lir->operands[1], 0);
-    case kRegRegStore:  // lir operands - 0: reg2, 1: reg1
-      return ComputeSize(entry, lir->operands[1], NO_REG, lir->operands[0], 0);
-    case kRegMem:  // lir operands - 0: reg, 1: base, 2: disp
-      return ComputeSize(entry, lir->operands[0], NO_REG, lir->operands[1], lir->operands[2]);
-    case kRegArray:   // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
-      return ComputeSize(entry, lir->operands[0], lir->operands[2], lir->operands[1],
-                         lir->operands[4]);
-    case kRegThread:  // lir operands - 0: reg, 1: disp
-      // Thread displacement size is always 32bit.
-      return ComputeSize(entry, lir->operands[0], NO_REG, NO_REG, 0x12345678);
-    case kRegImm: {  // lir operands - 0: reg, 1: immediate
-      size_t size = ComputeSize(entry, lir->operands[0], NO_REG, NO_REG, 0);
-      // AX opcodes don't require the modrm byte.
-      if (entry->skeleton.ax_opcode == 0) {
-        return size;
-      } else {
-        return size - (RegStorage::RegNum(lir->operands[0]) == rs_rAX.GetRegNum() ? 1 : 0);
-      }
-    }
-    case kMemImm:  // lir operands - 0: base, 1: disp, 2: immediate
-      return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], lir->operands[1]);
-    case kArrayImm:  // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
-      return ComputeSize(entry, NO_REG, lir->operands[1], lir->operands[0], lir->operands[3]);
-    case kThreadImm:  // lir operands - 0: disp, 1: imm
-      // Thread displacement size is always 32bit.
-      return ComputeSize(entry, NO_REG, NO_REG, NO_REG, 0x12345678);
-    case kRegRegImm:  // lir operands - 0: reg1, 1: reg2, 2: imm
-      // Note: RegRegImm form passes reg2 as index but encodes it using base.
-      return ComputeSize(entry, lir->operands[0], lir->operands[1], NO_REG, 0);
-    case kRegRegImmStore:  // lir operands - 0: reg2, 1: reg1, 2: imm
-      // Note: RegRegImmStore form passes reg1 as index but encodes it using base.
-      return ComputeSize(entry, lir->operands[1], lir->operands[0], NO_REG, 0);
-    case kRegMemImm:  // lir operands - 0: reg, 1: base, 2: disp, 3: imm
-      return ComputeSize(entry, lir->operands[0], NO_REG, lir->operands[1], lir->operands[2]);
-    case kRegArrayImm:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp, 5: imm
-      return ComputeSize(entry, lir->operands[0], lir->operands[2], lir->operands[1],
-                         lir->operands[4]);
-    case kMovRegImm:  // lir operands - 0: reg, 1: immediate
-    case kMovRegQuadImm:
-      return ((entry->skeleton.prefix1 != 0 || NeedsRex(lir->operands[0])) ? 1 : 0) + 1 +
-          entry->skeleton.immediate_bytes;
-    case kShiftRegImm:  // lir operands - 0: reg, 1: immediate
-      // Shift by immediate one has a shorter opcode.
-      return ComputeSize(entry, lir->operands[0], NO_REG, NO_REG, 0) -
-          (lir->operands[1] == 1 ? 1 : 0);
-    case kShiftMemImm:  // lir operands - 0: base, 1: disp, 2: immediate
-      // Shift by immediate one has a shorter opcode.
-      return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], lir->operands[1]) -
-          (lir->operands[2] == 1 ? 1 : 0);
-    case kShiftArrayImm:  // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
-      // Shift by immediate one has a shorter opcode.
-      return ComputeSize(entry, NO_REG, lir->operands[1], lir->operands[0], lir->operands[3]) -
-          (lir->operands[4] == 1 ? 1 : 0);
-    case kShiftRegCl:  // lir operands - 0: reg, 1: cl
-      DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(lir->operands[1]));
-      // Note: ShiftRegCl form passes reg as reg but encodes it using base.
-      return ComputeSize(entry, lir->operands[0], NO_REG, NO_REG, 0);
-    case kShiftMemCl:  // lir operands - 0: base, 1: disp, 2: cl
-      DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(lir->operands[2]));
-      return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], lir->operands[1]);
-    case kShiftArrayCl:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: cl
-      DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(lir->operands[4]));
-      return ComputeSize(entry, lir->operands[4], lir->operands[1], lir->operands[0],
-                         lir->operands[3]);
-    case kShiftRegRegCl:  // lir operands - 0: reg1, 1: reg2, 2: cl
-      DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(lir->operands[2]));
-      return ComputeSize(entry, lir->operands[0], NO_REG, lir->operands[1], 0);
-    case kRegCond:  // lir operands - 0: reg, 1: cond
-      return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], 0);
-    case kMemCond:  // lir operands - 0: base, 1: disp, 2: cond
-      return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], lir->operands[1]);
-    case kArrayCond:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: cond
-      DCHECK_EQ(false, entry->skeleton.r8_form);
-      return ComputeSize(entry, NO_REG, lir->operands[1], lir->operands[0], lir->operands[3]);
-    case kRegRegCond:  // lir operands - 0: reg1, 1: reg2, 2: cond
-      DCHECK_EQ(false, entry->skeleton.r8_form);
-      return ComputeSize(entry, lir->operands[0], NO_REG, lir->operands[1], 0);
-    case kRegMemCond:  // lir operands - 0: reg, 1: base, 2: disp, 3:cond
-      DCHECK_EQ(false, entry->skeleton.r8_form);
-      return ComputeSize(entry, lir->operands[0], NO_REG, lir->operands[1], lir->operands[2]);
-    case kJcc:
-      if (lir->opcode == kX86Jcc8) {
-        return 2;  // opcode + rel8
-      } else {
-        DCHECK(lir->opcode == kX86Jcc32);
-        return 6;  // 2 byte opcode + rel32
-      }
-    case kJmp:
-      if (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jecxz8) {
-        return 2;  // opcode + rel8
-      } else if (lir->opcode == kX86Jmp32) {
-        return 5;  // opcode + rel32
-      } else if (lir->opcode == kX86JmpT) {
-        // Thread displacement size is always 32bit.
-        return ComputeSize(entry, NO_REG, NO_REG, NO_REG, 0x12345678);
-      } else {
-        DCHECK(lir->opcode == kX86JmpR);
-        if (NeedsRex(lir->operands[0])) {
-          return 3;  // REX.B + opcode + modrm
-        } else {
-          return 2;  // opcode + modrm
-        }
-      }
-    case kCall:
-      switch (lir->opcode) {
-        case kX86CallI: return 5;  // opcode 0:disp
-        case kX86CallR: return 2;  // opcode modrm
-        case kX86CallM:  // lir operands - 0: base, 1: disp
-          return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], lir->operands[1]);
-        case kX86CallA:  // lir operands - 0: base, 1: index, 2: scale, 3: disp
-          return ComputeSize(entry, NO_REG, lir->operands[1], lir->operands[0], lir->operands[3]);
-        case kX86CallT:  // lir operands - 0: disp
-          // Thread displacement size is always 32bit.
-          return ComputeSize(entry, NO_REG, NO_REG, NO_REG, 0x12345678);
-        default:
-          break;
-      }
-      break;
-    case kPcRel:
-      if (entry->opcode == kX86PcRelLoadRA) {
-        // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
-        // Force the displacement size to 32bit, it will hold a computed offset later.
-        return ComputeSize(entry, lir->operands[0], lir->operands[2], lir->operands[1],
-                           0x12345678);
-      } else {
-        DCHECK_EQ(entry->opcode, kX86PcRelAdr);
-        return 5;  // opcode with reg + 4 byte immediate
-      }
-    case kUnimplemented:
-      break;
-  }
-  UNIMPLEMENTED(FATAL) << "Unimplemented size encoding for: " << entry->name;
-  return 0;
-}
-
-static uint8_t ModrmForDisp(int base, int disp) {
-  // BP requires an explicit disp, so do not omit it in the 0 case
-  if (disp == 0 && RegStorage::RegNum(base) != rs_rBP.GetRegNum()) {
-    return 0;
-  } else if (IS_SIMM8(disp)) {
-    return 1;
-  } else {
-    return 2;
-  }
-}
-
-void X86Mir2Lir::CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg) {
-  if (kIsDebugBuild) {
-    // Sanity check r8_form is correctly specified.
-    if (entry->skeleton.r8_form) {
-      CHECK(strchr(entry->name, '8') != nullptr) << entry->name;
-    } else {
-      if (entry->skeleton.immediate_bytes != 1) {  // Ignore ...I8 instructions.
-        if (!StartsWith(entry->name, "Movzx8") && !StartsWith(entry->name, "Movsx8")
-           && !StartsWith(entry->name, "Movzx8q") && !StartsWith(entry->name, "Movsx8q")) {
-          CHECK(strchr(entry->name, '8') == nullptr) << entry->name;
-        }
-      }
-    }
-    if (RegStorage::RegNum(raw_reg) >= 4) {
-      // ah, bh, ch and dh are not valid registers in 32-bit.
-      CHECK(cu_->target64 || !entry->skeleton.r8_form)
-               << "Invalid register " << static_cast<int>(RegStorage::RegNum(raw_reg))
-               << " for instruction " << entry->name << " in "
-               << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-    }
-  }
-}
-
-void X86Mir2Lir::EmitPrefix(const X86EncodingMap* entry,
-                            int32_t raw_reg_r, int32_t raw_reg_x, int32_t raw_reg_b) {
-  // REX.WRXB
-  // W - 64-bit operand
-  // R - MODRM.reg
-  // X - SIB.index
-  // B - MODRM.rm/SIB.base
-  bool w = (entry->skeleton.prefix1 == REX_W) || (entry->skeleton.prefix2 == REX_W);
-  bool r = NeedsRex(raw_reg_r);
-  bool x = NeedsRex(raw_reg_x);
-  bool b = NeedsRex(raw_reg_b);
-  bool r8_form = entry->skeleton.r8_form;
-  bool modrm_is_reg_reg = ModrmIsRegReg(entry);
-
-  uint8_t rex = 0;
-  if (r8_form) {
-    // Do we need an empty REX prefix to normalize byte register addressing?
-    if (RegStorage::RegNum(raw_reg_r) >= 4 && !IsByteSecondOperand(entry)) {
-      rex |= REX;  // REX.0000
-    } else if (modrm_is_reg_reg && RegStorage::RegNum(raw_reg_b) >= 4) {
-      rex |= REX;  // REX.0000
-    }
-  }
-  if (w) {
-    rex |= REX_W;  // REX.W000
-  }
-  if (r) {
-    rex |= REX_R;  // REX.0R00
-  }
-  if (x) {
-    rex |= REX_X;  // REX.00X0
-  }
-  if (b) {
-    rex |= REX_B;  // REX.000B
-  }
-  if (entry->skeleton.prefix1 != 0) {
-    if (cu_->target64 && entry->skeleton.prefix1 == THREAD_PREFIX) {
-      // 64 bit addresses by GS, not FS.
-      code_buffer_.push_back(THREAD_PREFIX_GS);
-    } else {
-      if (entry->skeleton.prefix1 == REX_W || entry->skeleton.prefix1 == REX) {
-        DCHECK(cu_->target64);
-        rex |= entry->skeleton.prefix1;
-        code_buffer_.push_back(rex);
-        rex = 0;
-      } else {
-        code_buffer_.push_back(entry->skeleton.prefix1);
-      }
-    }
-    if (entry->skeleton.prefix2 != 0) {
-      if (entry->skeleton.prefix2 == REX_W || entry->skeleton.prefix1 == REX) {
-        DCHECK(cu_->target64);
-        rex |= entry->skeleton.prefix2;
-        code_buffer_.push_back(rex);
-        rex = 0;
-      } else {
-        code_buffer_.push_back(entry->skeleton.prefix2);
-      }
-    }
-  } else {
-    DCHECK_EQ(0, entry->skeleton.prefix2);
-  }
-  if (rex != 0) {
-    DCHECK(cu_->target64);
-    code_buffer_.push_back(rex);
-  }
-}
-
-void X86Mir2Lir::EmitOpcode(const X86EncodingMap* entry) {
-  code_buffer_.push_back(entry->skeleton.opcode);
-  if (entry->skeleton.opcode == 0x0F) {
-    code_buffer_.push_back(entry->skeleton.extra_opcode1);
-    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
-      code_buffer_.push_back(entry->skeleton.extra_opcode2);
-    } else {
-      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-    }
-  } else {
-    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
-    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  }
-}
-
-void X86Mir2Lir::EmitPrefixAndOpcode(const X86EncodingMap* entry,
-                                     int32_t raw_reg_r, int32_t raw_reg_x, int32_t raw_reg_b) {
-  EmitPrefix(entry, raw_reg_r, raw_reg_x, raw_reg_b);
-  EmitOpcode(entry);
-}
-
-void X86Mir2Lir::EmitDisp(uint8_t base, int32_t disp) {
-  // BP requires an explicit disp, so do not omit it in the 0 case
-  if (disp == 0 && RegStorage::RegNum(base) != rs_rBP.GetRegNum()) {
-    return;
-  } else if (IS_SIMM8(disp)) {
-    code_buffer_.push_back(disp & 0xFF);
-  } else {
-    code_buffer_.push_back(disp & 0xFF);
-    code_buffer_.push_back((disp >> 8) & 0xFF);
-    code_buffer_.push_back((disp >> 16) & 0xFF);
-    code_buffer_.push_back((disp >> 24) & 0xFF);
-  }
-}
-
-void X86Mir2Lir::EmitModrmThread(uint8_t reg_or_opcode) {
-  if (cu_->target64) {
-    // Absolute adressing for GS access.
-    uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rX86_SP_32.GetRegNum();
-    code_buffer_.push_back(modrm);
-    uint8_t sib = (0/*TIMES_1*/ << 6) | (rs_rX86_SP_32.GetRegNum() << 3) | rs_rBP.GetRegNum();
-    code_buffer_.push_back(sib);
-  } else {
-    uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rBP.GetRegNum();
-    code_buffer_.push_back(modrm);
-  }
-}
-
-void X86Mir2Lir::EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int32_t disp) {
-  DCHECK_LT(reg_or_opcode, 8);
-  if (base == kRIPReg) {
-    // x86_64 RIP handling: always 32 bit displacement.
-    uint8_t modrm = (0x0 << 6) | (reg_or_opcode << 3) | 0x5;
-    code_buffer_.push_back(modrm);
-    code_buffer_.push_back(disp & 0xFF);
-    code_buffer_.push_back((disp >> 8) & 0xFF);
-    code_buffer_.push_back((disp >> 16) & 0xFF);
-    code_buffer_.push_back((disp >> 24) & 0xFF);
-  } else {
-    DCHECK_LT(base, 8);
-    uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | base;
-    code_buffer_.push_back(modrm);
-    if (base == rs_rX86_SP_32.GetRegNum()) {
-      // Special SIB for SP base
-      code_buffer_.push_back(0 << 6 | rs_rX86_SP_32.GetRegNum() << 3 | rs_rX86_SP_32.GetRegNum());
-    }
-    EmitDisp(base, disp);
-  }
-}
-
-void X86Mir2Lir::EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index,
-                                  int scale, int32_t disp) {
-  DCHECK_LT(RegStorage::RegNum(reg_or_opcode), 8);
-  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | RegStorage::RegNum(reg_or_opcode) << 3 |
-      rs_rX86_SP_32.GetRegNum();
-  code_buffer_.push_back(modrm);
-  DCHECK_LT(scale, 4);
-  DCHECK_LT(RegStorage::RegNum(index), 8);
-  DCHECK_LT(RegStorage::RegNum(base), 8);
-  uint8_t sib = (scale << 6) | (RegStorage::RegNum(index) << 3) | RegStorage::RegNum(base);
-  code_buffer_.push_back(sib);
-  EmitDisp(base, disp);
-}
-
-void X86Mir2Lir::EmitImm(const X86EncodingMap* entry, int64_t imm) {
-  switch (entry->skeleton.immediate_bytes) {
-    case 1:
-      DCHECK(IS_SIMM8(imm));
-      code_buffer_.push_back(imm & 0xFF);
-      break;
-    case 2:
-      DCHECK(IS_SIMM16(imm));
-      code_buffer_.push_back(imm & 0xFF);
-      code_buffer_.push_back((imm >> 8) & 0xFF);
-      break;
-    case 4:
-      DCHECK(IS_SIMM32(imm));
-      code_buffer_.push_back(imm & 0xFF);
-      code_buffer_.push_back((imm >> 8) & 0xFF);
-      code_buffer_.push_back((imm >> 16) & 0xFF);
-      code_buffer_.push_back((imm >> 24) & 0xFF);
-      break;
-    case 8:
-      code_buffer_.push_back(imm & 0xFF);
-      code_buffer_.push_back((imm >> 8) & 0xFF);
-      code_buffer_.push_back((imm >> 16) & 0xFF);
-      code_buffer_.push_back((imm >> 24) & 0xFF);
-      code_buffer_.push_back((imm >> 32) & 0xFF);
-      code_buffer_.push_back((imm >> 40) & 0xFF);
-      code_buffer_.push_back((imm >> 48) & 0xFF);
-      code_buffer_.push_back((imm >> 56) & 0xFF);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
-                 << ") for instruction: " << entry->name;
-      break;
-  }
-}
-
-void X86Mir2Lir::EmitNullary(const X86EncodingMap* entry) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, NO_REG);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitOpRegOpcode(const X86EncodingMap* entry, int32_t raw_reg) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, raw_reg);
-  // There's no 3-byte instruction with +rd
-  DCHECK(entry->skeleton.opcode != 0x0F ||
-         (entry->skeleton.extra_opcode1 != 0x38 && entry->skeleton.extra_opcode1 != 0x3A));
-  DCHECK(!RegStorage::IsFloat(raw_reg));
-  uint8_t low_reg = LowRegisterBits(raw_reg);
-  code_buffer_.back() += low_reg;
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, int32_t raw_reg) {
-  CheckValidByteRegister(entry, raw_reg);
-  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, raw_reg);
-  uint8_t low_reg = LowRegisterBits(raw_reg);
-  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | low_reg;
-  code_buffer_.push_back(modrm);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitOpMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefix(entry, NO_REG, NO_REG, raw_base);
-  code_buffer_.push_back(entry->skeleton.opcode);
-  DCHECK_NE(0x0F, entry->skeleton.opcode);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode1);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmDisp(entry->skeleton.modrm_opcode, low_base, disp);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitOpArray(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index,
-                             int scale, int32_t disp) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, NO_REG, raw_index, raw_base);
-  uint8_t low_index = LowRegisterBits(raw_index);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmSibDisp(entry->skeleton.modrm_opcode, low_base, low_index, scale, disp);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitMemReg(const X86EncodingMap* entry, int32_t raw_base, int32_t disp,
-                            int32_t raw_reg) {
-  CheckValidByteRegister(entry, raw_reg);
-  EmitPrefixAndOpcode(entry, raw_reg, NO_REG, raw_base);
-  uint8_t low_reg = LowRegisterBits(raw_reg);
-  uint8_t low_base = (raw_base == kRIPReg) ? raw_base : LowRegisterBits(raw_base);
-  EmitModrmDisp(low_reg, low_base, disp);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitRegMem(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base,
-                            int32_t disp) {
-  // Opcode will flip operands.
-  EmitMemReg(entry, raw_base, disp, raw_reg);
-}
-
-void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base,
-                              int32_t raw_index, int scale, int32_t disp) {
-  CheckValidByteRegister(entry, raw_reg);
-  EmitPrefixAndOpcode(entry, raw_reg, raw_index, raw_base);
-  uint8_t low_reg = LowRegisterBits(raw_reg);
-  uint8_t low_index = LowRegisterBits(raw_index);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmSibDisp(low_reg, low_base, low_index, scale, disp);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitArrayReg(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index,
-                              int scale, int32_t disp, int32_t raw_reg) {
-  // Opcode will flip operands.
-  EmitRegArray(entry, raw_reg, raw_base, raw_index, scale, disp);
-}
-
-void X86Mir2Lir::EmitMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp,
-                            int32_t imm) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, raw_base);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmDisp(entry->skeleton.modrm_opcode, low_base, disp);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  EmitImm(entry, imm);
-}
-
-void X86Mir2Lir::EmitArrayImm(const X86EncodingMap* entry,
-                              int32_t raw_base, int32_t raw_index, int scale, int32_t disp,
-                              int32_t imm) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, NO_REG, raw_index, raw_base);
-  uint8_t low_index = LowRegisterBits(raw_index);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmSibDisp(entry->skeleton.modrm_opcode, low_base, low_index, scale, disp);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  EmitImm(entry, imm);
-}
-
-void X86Mir2Lir::EmitRegThread(const X86EncodingMap* entry, int32_t raw_reg, int32_t disp) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  DCHECK_NE(entry->skeleton.prefix1, 0);
-  EmitPrefixAndOpcode(entry, raw_reg, NO_REG, NO_REG);
-  uint8_t low_reg = LowRegisterBits(raw_reg);
-  EmitModrmThread(low_reg);
-  code_buffer_.push_back(disp & 0xFF);
-  code_buffer_.push_back((disp >> 8) & 0xFF);
-  code_buffer_.push_back((disp >> 16) & 0xFF);
-  code_buffer_.push_back((disp >> 24) & 0xFF);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2) {
-  if (!IsByteSecondOperand(entry)) {
-    CheckValidByteRegister(entry, raw_reg1);
-  }
-  CheckValidByteRegister(entry, raw_reg2);
-  EmitPrefixAndOpcode(entry, raw_reg1, NO_REG, raw_reg2);
-  uint8_t low_reg1 = LowRegisterBits(raw_reg1);
-  uint8_t low_reg2 = LowRegisterBits(raw_reg2);
-  uint8_t modrm = (3 << 6) | (low_reg1 << 3) | low_reg2;
-  code_buffer_.push_back(modrm);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitRegRegImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2,
-                               int32_t imm) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, raw_reg1, NO_REG, raw_reg2);
-  uint8_t low_reg1 = LowRegisterBits(raw_reg1);
-  uint8_t low_reg2 = LowRegisterBits(raw_reg2);
-  uint8_t modrm = (3 << 6) | (low_reg1 << 3) | low_reg2;
-  code_buffer_.push_back(modrm);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  EmitImm(entry, imm);
-}
-
-void X86Mir2Lir::EmitRegMemImm(const X86EncodingMap* entry,
-                               int32_t raw_reg, int32_t raw_base, int disp, int32_t imm) {
-  DCHECK(!RegStorage::IsFloat(raw_reg));
-  CheckValidByteRegister(entry, raw_reg);
-  EmitPrefixAndOpcode(entry, raw_reg, NO_REG, raw_base);
-  uint8_t low_reg = LowRegisterBits(raw_reg);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmDisp(low_reg, low_base, disp);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  EmitImm(entry, imm);
-}
-
-void X86Mir2Lir::EmitMemRegImm(const X86EncodingMap* entry,
-                               int32_t raw_base, int32_t disp, int32_t raw_reg, int32_t imm) {
-  // Opcode will flip operands.
-  EmitRegMemImm(entry, raw_reg, raw_base, disp, imm);
-}
-
-void X86Mir2Lir::EmitRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm) {
-  CheckValidByteRegister(entry, raw_reg);
-  EmitPrefix(entry, NO_REG, NO_REG, raw_reg);
-  if (RegStorage::RegNum(raw_reg) == rs_rAX.GetRegNum() && entry->skeleton.ax_opcode != 0) {
-    code_buffer_.push_back(entry->skeleton.ax_opcode);
-  } else {
-    uint8_t low_reg = LowRegisterBits(raw_reg);
-    EmitOpcode(entry);
-    uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | low_reg;
-    code_buffer_.push_back(modrm);
-  }
-  EmitImm(entry, imm);
-}
-
-void X86Mir2Lir::EmitThreadImm(const X86EncodingMap* entry, int32_t disp, int32_t imm) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, NO_REG);
-  EmitModrmThread(entry->skeleton.modrm_opcode);
-  code_buffer_.push_back(disp & 0xFF);
-  code_buffer_.push_back((disp >> 8) & 0xFF);
-  code_buffer_.push_back((disp >> 16) & 0xFF);
-  code_buffer_.push_back((disp >> 24) & 0xFF);
-  EmitImm(entry, imm);
-  DCHECK_EQ(entry->skeleton.ax_opcode, 0);
-}
-
-void X86Mir2Lir::EmitMovRegImm(const X86EncodingMap* entry, int32_t raw_reg, int64_t imm) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefix(entry, NO_REG, NO_REG, raw_reg);
-  uint8_t low_reg = LowRegisterBits(raw_reg);
-  code_buffer_.push_back(0xB8 + low_reg);
-  switch (entry->skeleton.immediate_bytes) {
-    case 4:
-      code_buffer_.push_back(imm & 0xFF);
-      code_buffer_.push_back((imm >> 8) & 0xFF);
-      code_buffer_.push_back((imm >> 16) & 0xFF);
-      code_buffer_.push_back((imm >> 24) & 0xFF);
-      break;
-    case 8:
-      code_buffer_.push_back(imm & 0xFF);
-      code_buffer_.push_back((imm >> 8) & 0xFF);
-      code_buffer_.push_back((imm >> 16) & 0xFF);
-      code_buffer_.push_back((imm >> 24) & 0xFF);
-      code_buffer_.push_back((imm >> 32) & 0xFF);
-      code_buffer_.push_back((imm >> 40) & 0xFF);
-      code_buffer_.push_back((imm >> 48) & 0xFF);
-      code_buffer_.push_back((imm >> 56) & 0xFF);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported immediate size for EmitMovRegImm: "
-                 << static_cast<uint32_t>(entry->skeleton.immediate_bytes);
-  }
-}
-
-void X86Mir2Lir::EmitShiftRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm) {
-  CheckValidByteRegister(entry, raw_reg);
-  EmitPrefix(entry, NO_REG, NO_REG, raw_reg);
-  if (imm != 1) {
-    code_buffer_.push_back(entry->skeleton.opcode);
-  } else {
-    // Shorter encoding for 1 bit shift
-    code_buffer_.push_back(entry->skeleton.ax_opcode);
-  }
-  DCHECK_NE(0x0F, entry->skeleton.opcode);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode1);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  uint8_t low_reg = LowRegisterBits(raw_reg);
-  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | low_reg;
-  code_buffer_.push_back(modrm);
-  if (imm != 1) {
-    DCHECK_EQ(entry->skeleton.immediate_bytes, 1);
-    DCHECK(IS_SIMM8(imm));
-    code_buffer_.push_back(imm & 0xFF);
-  }
-}
-
-void X86Mir2Lir::EmitShiftRegCl(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_cl) {
-  CheckValidByteRegister(entry, raw_reg);
-  DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(raw_cl));
-  EmitPrefix(entry, NO_REG, NO_REG, raw_reg);
-  code_buffer_.push_back(entry->skeleton.opcode);
-  DCHECK_NE(0x0F, entry->skeleton.opcode);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode1);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  uint8_t low_reg = LowRegisterBits(raw_reg);
-  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | low_reg;
-  code_buffer_.push_back(modrm);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base,
-                                int32_t displacement, int32_t raw_cl) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(raw_cl));
-  EmitPrefix(entry, NO_REG, NO_REG, raw_base);
-  code_buffer_.push_back(entry->skeleton.opcode);
-  DCHECK_NE(0x0F, entry->skeleton.opcode);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode1);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmDisp(entry->skeleton.modrm_opcode, low_base, displacement);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitShiftRegRegCl(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t raw_cl) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(raw_cl));
-  EmitPrefixAndOpcode(entry, raw_reg1, NO_REG, raw_reg2);
-  uint8_t low_reg1 = LowRegisterBits(raw_reg1);
-  uint8_t low_reg2 = LowRegisterBits(raw_reg2);
-  uint8_t modrm = (3 << 6) | (low_reg1 << 3) | low_reg2;
-  code_buffer_.push_back(modrm);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp,
-                                 int32_t imm) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefix(entry, NO_REG, NO_REG, raw_base);
-  if (imm != 1) {
-    code_buffer_.push_back(entry->skeleton.opcode);
-  } else {
-    // Shorter encoding for 1 bit shift
-    code_buffer_.push_back(entry->skeleton.ax_opcode);
-  }
-  DCHECK_NE(0x0F, entry->skeleton.opcode);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode1);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmDisp(entry->skeleton.modrm_opcode, low_base, disp);
-  if (imm != 1) {
-    DCHECK_EQ(entry->skeleton.immediate_bytes, 1);
-    DCHECK(IS_SIMM8(imm));
-    code_buffer_.push_back(imm & 0xFF);
-  }
-}
-
-void X86Mir2Lir::EmitRegCond(const X86EncodingMap* entry, int32_t raw_reg, int32_t cc) {
-  CheckValidByteRegister(entry, raw_reg);
-  EmitPrefix(entry, NO_REG, NO_REG, raw_reg);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0x0F, entry->skeleton.opcode);
-  code_buffer_.push_back(0x0F);
-  DCHECK_EQ(0x90, entry->skeleton.extra_opcode1);
-  DCHECK_GE(cc, 0);
-  DCHECK_LT(cc, 16);
-  code_buffer_.push_back(0x90 | cc);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  uint8_t low_reg = LowRegisterBits(raw_reg);
-  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | low_reg;
-  code_buffer_.push_back(modrm);
-  DCHECK_EQ(entry->skeleton.immediate_bytes, 0);
-}
-
-void X86Mir2Lir::EmitMemCond(const X86EncodingMap* entry, int32_t raw_base, int32_t disp,
-                             int32_t cc) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  if (entry->skeleton.prefix1 != 0) {
-    code_buffer_.push_back(entry->skeleton.prefix1);
-    if (entry->skeleton.prefix2 != 0) {
-      code_buffer_.push_back(entry->skeleton.prefix2);
-    }
-  } else {
-    DCHECK_EQ(0, entry->skeleton.prefix2);
-  }
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0x0F, entry->skeleton.opcode);
-  code_buffer_.push_back(0x0F);
-  DCHECK_EQ(0x90, entry->skeleton.extra_opcode1);
-  DCHECK_GE(cc, 0);
-  DCHECK_LT(cc, 16);
-  code_buffer_.push_back(0x90 | cc);
-  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmDisp(entry->skeleton.modrm_opcode, low_base, disp);
-  DCHECK_EQ(entry->skeleton.immediate_bytes, 0);
-}
-
-void X86Mir2Lir::EmitRegRegCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2,
-                                int32_t cc) {
-  // Generate prefix and opcode without the condition.
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, raw_reg1, NO_REG, raw_reg2);
-
-  // Now add the condition. The last byte of opcode is the one that receives it.
-  DCHECK_GE(cc, 0);
-  DCHECK_LT(cc, 16);
-  code_buffer_.back() += cc;
-
-  // Not expecting to have to encode immediate or do anything special for ModR/M since there are
-  // two registers.
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-
-  // For register to register encoding, the mod is 3.
-  const uint8_t mod = (3 << 6);
-
-  // Encode the ModR/M byte now.
-  uint8_t low_reg1 = LowRegisterBits(raw_reg1);
-  uint8_t low_reg2 = LowRegisterBits(raw_reg2);
-  const uint8_t modrm = mod | (low_reg1 << 3) | low_reg2;
-  code_buffer_.push_back(modrm);
-}
-
-void X86Mir2Lir::EmitRegMemCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base,
-                                int32_t disp, int32_t cc) {
-  // Generate prefix and opcode without the condition.
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, raw_reg1, NO_REG, raw_base);
-
-  // Now add the condition. The last byte of opcode is the one that receives it.
-  DCHECK_GE(cc, 0);
-  DCHECK_LT(cc, 16);
-  code_buffer_.back() += cc;
-
-  // Not expecting to have to encode immediate or do anything special for ModR/M since there are
-  // two registers.
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-
-  uint8_t low_reg1 = LowRegisterBits(raw_reg1);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmDisp(low_reg1, low_base, disp);
-}
-
-void X86Mir2Lir::EmitJmp(const X86EncodingMap* entry, int32_t rel) {
-  if (entry->opcode == kX86Jmp8) {
-    DCHECK(IS_SIMM8(rel));
-    code_buffer_.push_back(0xEB);
-    code_buffer_.push_back(rel & 0xFF);
-  } else if (entry->opcode == kX86Jmp32) {
-    code_buffer_.push_back(0xE9);
-    code_buffer_.push_back(rel & 0xFF);
-    code_buffer_.push_back((rel >> 8) & 0xFF);
-    code_buffer_.push_back((rel >> 16) & 0xFF);
-    code_buffer_.push_back((rel >> 24) & 0xFF);
-  } else if (entry->opcode == kX86Jecxz8) {
-    DCHECK(IS_SIMM8(rel));
-    code_buffer_.push_back(0xE3);
-    code_buffer_.push_back(rel & 0xFF);
-  } else {
-    DCHECK(entry->opcode == kX86JmpR);
-    DCHECK_EQ(false, entry->skeleton.r8_form);
-    EmitPrefix(entry, NO_REG, NO_REG, rel);
-    code_buffer_.push_back(entry->skeleton.opcode);
-    uint8_t low_reg = LowRegisterBits(rel);
-    uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | low_reg;
-    code_buffer_.push_back(modrm);
-  }
-}
-
-void X86Mir2Lir::EmitJcc(const X86EncodingMap* entry, int32_t rel, int32_t cc) {
-  DCHECK_GE(cc, 0);
-  DCHECK_LT(cc, 16);
-  if (entry->opcode == kX86Jcc8) {
-    DCHECK(IS_SIMM8(rel));
-    code_buffer_.push_back(0x70 | cc);
-    code_buffer_.push_back(rel & 0xFF);
-  } else {
-    DCHECK(entry->opcode == kX86Jcc32);
-    code_buffer_.push_back(0x0F);
-    code_buffer_.push_back(0x80 | cc);
-    code_buffer_.push_back(rel & 0xFF);
-    code_buffer_.push_back((rel >> 8) & 0xFF);
-    code_buffer_.push_back((rel >> 16) & 0xFF);
-    code_buffer_.push_back((rel >> 24) & 0xFF);
-  }
-}
-
-void X86Mir2Lir::EmitCallMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, raw_base);
-  uint8_t low_base = LowRegisterBits(raw_base);
-  EmitModrmDisp(entry->skeleton.modrm_opcode, low_base, disp);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitCallImmediate(const X86EncodingMap* entry, int32_t disp) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, NO_REG);
-  DCHECK_EQ(4, entry->skeleton.immediate_bytes);
-  code_buffer_.push_back(disp & 0xFF);
-  code_buffer_.push_back((disp >> 8) & 0xFF);
-  code_buffer_.push_back((disp >> 16) & 0xFF);
-  code_buffer_.push_back((disp >> 24) & 0xFF);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-}
-
-void X86Mir2Lir::EmitCallThread(const X86EncodingMap* entry, int32_t disp) {
-  DCHECK_EQ(false, entry->skeleton.r8_form);
-  DCHECK_NE(entry->skeleton.prefix1, 0);
-  EmitPrefixAndOpcode(entry, NO_REG, NO_REG, NO_REG);
-  EmitModrmThread(entry->skeleton.modrm_opcode);
-  code_buffer_.push_back(disp & 0xFF);
-  code_buffer_.push_back((disp >> 8) & 0xFF);
-  code_buffer_.push_back((disp >> 16) & 0xFF);
-  code_buffer_.push_back((disp >> 24) & 0xFF);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-}
-
-void X86Mir2Lir::EmitPcRel(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base_or_table,
-                           int32_t raw_index, int scale, int32_t table_or_disp) {
-  int disp;
-  if (entry->opcode == kX86PcRelLoadRA) {
-    const SwitchTable* tab_rec = UnwrapPointer<SwitchTable>(table_or_disp);
-    disp = tab_rec->offset - tab_rec->anchor->offset;
-  } else {
-    DCHECK(entry->opcode == kX86PcRelAdr);
-    const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(raw_base_or_table);
-    disp = tab_rec->offset;
-  }
-  if (entry->opcode == kX86PcRelLoadRA) {
-    DCHECK_EQ(false, entry->skeleton.r8_form);
-    EmitPrefix(entry, raw_reg, raw_index, raw_base_or_table);
-    code_buffer_.push_back(entry->skeleton.opcode);
-    DCHECK_NE(0x0F, entry->skeleton.opcode);
-    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
-    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
-    uint8_t low_reg = LowRegisterBits(raw_reg);
-    uint8_t modrm = (2 << 6) | (low_reg << 3) | rs_rX86_SP_32.GetRegNum();
-    code_buffer_.push_back(modrm);
-    DCHECK_LT(scale, 4);
-    uint8_t low_base_or_table = LowRegisterBits(raw_base_or_table);
-    uint8_t low_index = LowRegisterBits(raw_index);
-    uint8_t sib = (scale << 6) | (low_index << 3) | low_base_or_table;
-    code_buffer_.push_back(sib);
-    DCHECK_EQ(0, entry->skeleton.immediate_bytes);
-  } else {
-    uint8_t low_reg = LowRegisterBits(raw_reg);
-    code_buffer_.push_back(entry->skeleton.opcode + low_reg);
-  }
-  code_buffer_.push_back(disp & 0xFF);
-  code_buffer_.push_back((disp >> 8) & 0xFF);
-  code_buffer_.push_back((disp >> 16) & 0xFF);
-  code_buffer_.push_back((disp >> 24) & 0xFF);
-  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
-  DCHECK_EQ(0, entry->skeleton.ax_opcode);
-}
-
-void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) {
-  UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " "
-                         << BuildInsnString(entry->fmt, lir, 0);
-  for (size_t i = 0; i < GetInsnSize(lir); ++i) {
-    code_buffer_.push_back(0xCC);  // push breakpoint instruction - int 3
-  }
-}
-
-/*
- * Assemble the LIR into binary instruction format.  Note that we may
- * discover that pc-relative displacements may not fit the selected
- * instruction.  In those cases we will try to substitute a new code
- * sequence or request that the trace be shortened and retried.
- */
-AssemblerStatus X86Mir2Lir::AssembleInstructions(LIR* first_lir_insn,
-                                                 CodeOffset start_addr ATTRIBUTE_UNUSED) {
-  LIR *lir;
-  AssemblerStatus res = kSuccess;  // Assume success
-
-  const bool kVerbosePcFixup = false;
-  for (lir = first_lir_insn; lir != nullptr; lir = NEXT_LIR(lir)) {
-    if (IsPseudoLirOp(lir->opcode)) {
-      continue;
-    }
-
-    if (lir->flags.is_nop) {
-      continue;
-    }
-
-    if (lir->flags.fixup != kFixupNone) {
-      switch (lir->opcode) {
-        case kX86Jcc8: {
-          LIR *target_lir = lir->target;
-          DCHECK(target_lir != nullptr);
-          int delta = 0;
-          CodeOffset pc;
-          if (IS_SIMM8(lir->operands[0])) {
-            pc = lir->offset + 2 /* opcode + rel8 */;
-          } else {
-            pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
-          }
-          CodeOffset target = target_lir->offset;
-          delta = target - pc;
-          if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
-            if (kVerbosePcFixup) {
-              LOG(INFO) << "Retry for JCC growth at " << lir->offset
-                  << " delta: " << delta << " old delta: " << lir->operands[0];
-            }
-            lir->opcode = kX86Jcc32;
-            lir->flags.size = GetInsnSize(lir);
-            DCHECK(lir->u.m.def_mask->Equals(kEncodeAll));
-            DCHECK(lir->u.m.use_mask->Equals(kEncodeAll));
-            res = kRetryAll;
-          }
-          if (kVerbosePcFixup) {
-            LOG(INFO) << "Source:";
-            DumpLIRInsn(lir, 0);
-            LOG(INFO) << "Target:";
-            DumpLIRInsn(target_lir, 0);
-            LOG(INFO) << "Delta " << delta;
-          }
-          lir->operands[0] = delta;
-          break;
-        }
-        case kX86Jcc32: {
-          LIR *target_lir = lir->target;
-          DCHECK(target_lir != nullptr);
-          CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
-          CodeOffset target = target_lir->offset;
-          int delta = target - pc;
-          if (kVerbosePcFixup) {
-            LOG(INFO) << "Source:";
-            DumpLIRInsn(lir, 0);
-            LOG(INFO) << "Target:";
-            DumpLIRInsn(target_lir, 0);
-            LOG(INFO) << "Delta " << delta;
-          }
-          lir->operands[0] = delta;
-          break;
-        }
-        case kX86Jecxz8: {
-          LIR *target_lir = lir->target;
-          DCHECK(target_lir != nullptr);
-          CodeOffset pc;
-          pc = lir->offset + 2;  // opcode + rel8
-          CodeOffset target = target_lir->offset;
-          int delta = target - pc;
-          lir->operands[0] = delta;
-          DCHECK(IS_SIMM8(delta));
-          break;
-        }
-        case kX86Jmp8: {
-          LIR *target_lir = lir->target;
-          DCHECK(target_lir != nullptr);
-          int delta = 0;
-          CodeOffset pc;
-          if (IS_SIMM8(lir->operands[0])) {
-            pc = lir->offset + 2 /* opcode + rel8 */;
-          } else {
-            pc = lir->offset + 5 /* opcode + rel32 */;
-          }
-          CodeOffset target = target_lir->offset;
-          delta = target - pc;
-          if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
-            // Useless branch
-            NopLIR(lir);
-            if (kVerbosePcFixup) {
-              LOG(INFO) << "Retry for useless branch at " << lir->offset;
-            }
-            res = kRetryAll;
-          } else if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
-            if (kVerbosePcFixup) {
-              LOG(INFO) << "Retry for JMP growth at " << lir->offset;
-            }
-            lir->opcode = kX86Jmp32;
-            lir->flags.size = GetInsnSize(lir);
-            DCHECK(lir->u.m.def_mask->Equals(kEncodeAll));
-            DCHECK(lir->u.m.use_mask->Equals(kEncodeAll));
-            res = kRetryAll;
-          }
-          lir->operands[0] = delta;
-          break;
-        }
-        case kX86Jmp32: {
-          LIR *target_lir = lir->target;
-          DCHECK(target_lir != nullptr);
-          CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
-          CodeOffset target = target_lir->offset;
-          int delta = target - pc;
-          lir->operands[0] = delta;
-          break;
-        }
-        default:
-          if (lir->flags.fixup == kFixupLoad) {
-            LIR *target_lir = lir->target;
-            DCHECK(target_lir != nullptr);
-            CodeOffset target = target_lir->offset;
-            // Handle 64 bit RIP addressing.
-            if (lir->operands[1] == kRIPReg) {
-              // Offset is relative to next instruction.
-              lir->operands[2] = target - (lir->offset + lir->flags.size);
-            } else {
-              const LIR* anchor = UnwrapPointer<LIR>(lir->operands[4]);
-              lir->operands[2] = target - anchor->offset;
-              int newSize = GetInsnSize(lir);
-              if (newSize != lir->flags.size) {
-                lir->flags.size = newSize;
-                res = kRetryAll;
-              }
-            }
-          } else if (lir->flags.fixup == kFixupSwitchTable) {
-            DCHECK(cu_->target64);
-            DCHECK_EQ(lir->opcode, kX86Lea64RM) << "Unknown instruction: " << X86Mir2Lir::EncodingMap[lir->opcode].name;
-            DCHECK_EQ(lir->operands[1], static_cast<int>(kRIPReg));
-            // Grab the target offset from the saved data.
-            const EmbeddedData* tab_rec = UnwrapPointer<Mir2Lir::EmbeddedData>(lir->operands[4]);
-            CodeOffset target = tab_rec->offset;
-            // Handle 64 bit RIP addressing.
-            // Offset is relative to next instruction.
-            lir->operands[2] = target - (lir->offset + lir->flags.size);
-          }
-          break;
-      }
-    }
-
-    /*
-     * If one of the pc-relative instructions expanded we'll have
-     * to make another pass.  Don't bother to fully assemble the
-     * instruction.
-     */
-    if (res != kSuccess) {
-      continue;
-    }
-    CHECK_EQ(static_cast<size_t>(lir->offset), code_buffer_.size());
-    const X86EncodingMap *entry = &X86Mir2Lir::EncodingMap[lir->opcode];
-    size_t starting_cbuf_size = code_buffer_.size();
-    switch (entry->kind) {
-      case kData:  // 4 bytes of data
-        code_buffer_.push_back(lir->operands[0]);
-        break;
-      case kNullary:  // 1 byte of opcode and possible prefixes.
-        EmitNullary(entry);
-        break;
-      case kRegOpcode:  // lir operands - 0: reg
-        EmitOpRegOpcode(entry, lir->operands[0]);
-        break;
-      case kReg:  // lir operands - 0: reg
-        EmitOpReg(entry, lir->operands[0]);
-        break;
-      case kMem:  // lir operands - 0: base, 1: disp
-        EmitOpMem(entry, lir->operands[0], lir->operands[1]);
-        break;
-      case kArray:  // lir operands - 0: base, 1: index, 2: scale, 3: disp
-        EmitOpArray(entry, lir->operands[0], lir->operands[1], lir->operands[2], lir->operands[3]);
-        break;
-      case kMemReg:  // lir operands - 0: base, 1: disp, 2: reg
-        EmitMemReg(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
-        break;
-      case kMemImm:  // lir operands - 0: base, 1: disp, 2: immediate
-        EmitMemImm(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
-        break;
-      case kArrayImm:  // lir operands - 0: base, 1: index, 2: disp, 3:scale, 4:immediate
-        EmitArrayImm(entry, lir->operands[0], lir->operands[1], lir->operands[2],
-                     lir->operands[3], lir->operands[4]);
-        break;
-      case kArrayReg:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
-        EmitArrayReg(entry, lir->operands[0], lir->operands[1], lir->operands[2],
-                     lir->operands[3], lir->operands[4]);
-        break;
-      case kRegMem:  // lir operands - 0: reg, 1: base, 2: disp
-        EmitRegMem(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
-        break;
-      case kRegArray:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
-        EmitRegArray(entry, lir->operands[0], lir->operands[1], lir->operands[2],
-                     lir->operands[3], lir->operands[4]);
-        break;
-      case kRegThread:  // lir operands - 0: reg, 1: disp
-        EmitRegThread(entry, lir->operands[0], lir->operands[1]);
-        break;
-      case kRegReg:  // lir operands - 0: reg1, 1: reg2
-        EmitRegReg(entry, lir->operands[0], lir->operands[1]);
-        break;
-      case kRegRegStore:  // lir operands - 0: reg2, 1: reg1
-        EmitRegReg(entry, lir->operands[1], lir->operands[0]);
-        break;
-      case kMemRegImm:  // lir operands - 0: base, 1: disp, 2: reg 3: immediate
-        EmitMemRegImm(entry, lir->operands[0], lir->operands[1], lir->operands[2],
-                      lir->operands[3]);
-        break;
-      case kRegRegImm:  // lir operands - 0: reg1, 1: reg2, 2: imm
-        EmitRegRegImm(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
-        break;
-      case kRegRegImmStore:   // lir operands - 0: reg2, 1: reg1, 2: imm
-        EmitRegRegImm(entry, lir->operands[1], lir->operands[0], lir->operands[2]);
-        break;
-      case kRegMemImm:  // lir operands - 0: reg, 1: base, 2: disp, 3: imm
-        EmitRegMemImm(entry, lir->operands[0], lir->operands[1], lir->operands[2],
-                      lir->operands[3]);
-        break;
-      case kRegImm:  // lir operands - 0: reg, 1: immediate
-        EmitRegImm(entry, lir->operands[0], lir->operands[1]);
-        break;
-      case kThreadImm:  // lir operands - 0: disp, 1: immediate
-        EmitThreadImm(entry, lir->operands[0], lir->operands[1]);
-        break;
-      case kMovRegImm:  // lir operands - 0: reg, 1: immediate
-        EmitMovRegImm(entry, lir->operands[0], lir->operands[1]);
-        break;
-      case kMovRegQuadImm: {
-          int64_t value = static_cast<int64_t>(static_cast<int64_t>(lir->operands[1]) << 32 |
-                          static_cast<uint32_t>(lir->operands[2]));
-          EmitMovRegImm(entry, lir->operands[0], value);
-        }
-        break;
-      case kShiftRegImm:  // lir operands - 0: reg, 1: immediate
-        EmitShiftRegImm(entry, lir->operands[0], lir->operands[1]);
-        break;
-      case kShiftMemImm:  // lir operands - 0: base, 1: disp, 2:immediate
-        EmitShiftMemImm(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
-        break;
-      case kShiftRegCl:  // lir operands - 0: reg, 1: cl
-        EmitShiftRegCl(entry, lir->operands[0], lir->operands[1]);
-        break;
-      case kShiftMemCl:  // lir operands - 0: base, 1:displacement, 2: cl
-        EmitShiftMemCl(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
-        break;
-      case kShiftRegRegCl:  // lir operands - 0: reg1, 1: reg2, 2: cl
-        EmitShiftRegRegCl(entry, lir->operands[1], lir->operands[0], lir->operands[2]);
-        break;
-      case kRegCond:  // lir operands - 0: reg, 1: condition
-        EmitRegCond(entry, lir->operands[0], lir->operands[1]);
-        break;
-      case kMemCond:  // lir operands - 0: base, 1: displacement, 2: condition
-        EmitMemCond(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
-        break;
-      case kRegRegCond:  // lir operands - 0: reg, 1: reg, 2: condition
-        EmitRegRegCond(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
-        break;
-      case kRegMemCond:  // lir operands - 0: reg, 1: reg, displacement, 3: condition
-        EmitRegMemCond(entry, lir->operands[0], lir->operands[1], lir->operands[2],
-                       lir->operands[3]);
-        break;
-      case kJmp:  // lir operands - 0: rel
-        if (entry->opcode == kX86JmpT) {
-          // This works since the instruction format for jmp and call is basically the same and
-          // EmitCallThread loads opcode info.
-          EmitCallThread(entry, lir->operands[0]);
-        } else {
-          EmitJmp(entry, lir->operands[0]);
-        }
-        break;
-      case kJcc:  // lir operands - 0: rel, 1: CC, target assigned
-        EmitJcc(entry, lir->operands[0], lir->operands[1]);
-        break;
-      case kCall:
-        switch (entry->opcode) {
-          case kX86CallI:  // lir operands - 0: disp
-            EmitCallImmediate(entry, lir->operands[0]);
-            break;
-          case kX86CallM:  // lir operands - 0: base, 1: disp
-            EmitCallMem(entry, lir->operands[0], lir->operands[1]);
-            break;
-          case kX86CallT:  // lir operands - 0: disp
-            EmitCallThread(entry, lir->operands[0]);
-            break;
-          default:
-            EmitUnimplemented(entry, lir);
-            break;
-        }
-        break;
-      case kPcRel:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
-        EmitPcRel(entry, lir->operands[0], lir->operands[1], lir->operands[2],
-                  lir->operands[3], lir->operands[4]);
-        break;
-      case kNop:  // TODO: these instruction kinds are missing implementations.
-      case kThreadReg:
-      case kRegArrayImm:
-      case kShiftArrayImm:
-      case kShiftArrayCl:
-      case kArrayCond:
-      case kUnimplemented:
-        EmitUnimplemented(entry, lir);
-        break;
-    }
-    DCHECK_EQ(lir->flags.size, GetInsnSize(lir));
-    CHECK_EQ(lir->flags.size, code_buffer_.size() - starting_cbuf_size)
-        << "Instruction size mismatch for entry: " << X86Mir2Lir::EncodingMap[lir->opcode].name;
-  }
-  return res;
-}
-
-// LIR offset assignment.
-// TODO: consolidate w/ Arm assembly mechanism.
-int X86Mir2Lir::AssignInsnOffsets() {
-  LIR* lir;
-  int offset = 0;
-
-  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
-    lir->offset = offset;
-    if (LIKELY(!IsPseudoLirOp(lir->opcode))) {
-      if (!lir->flags.is_nop) {
-        offset += lir->flags.size;
-      }
-    } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
-      if (offset & 0x2) {
-        offset += 2;
-        lir->operands[0] = 1;
-      } else {
-        lir->operands[0] = 0;
-      }
-    }
-    /* Pseudo opcodes don't consume space */
-  }
-  return offset;
-}
-
-/*
- * Walk the compilation unit and assign offsets to instructions
- * and literals and compute the total size of the compiled unit.
- * TODO: consolidate w/ Arm assembly mechanism.
- */
-void X86Mir2Lir::AssignOffsets() {
-  int offset = AssignInsnOffsets();
-
-  if (const_vectors_ != nullptr) {
-    // Vector literals must be 16-byte aligned. The header that is placed
-    // in the code section causes misalignment so we take it into account.
-    // Otherwise, we are sure that for x86 method is aligned to 16.
-    DCHECK_EQ(GetInstructionSetAlignment(cu_->instruction_set), 16u);
-    uint32_t bytes_to_fill = (0x10 - ((offset + sizeof(OatQuickMethodHeader)) & 0xF)) & 0xF;
-    offset += bytes_to_fill;
-
-    // Now assign each literal the right offset.
-    for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
-      p->offset = offset;
-      offset += 16;
-    }
-  }
-
-  /* Const values have to be word aligned */
-  offset = RoundUp(offset, 4);
-
-  /* Set up offsets for literals */
-  data_offset_ = offset;
-
-  offset = AssignLiteralOffset(offset);
-
-  offset = AssignSwitchTablesOffset(offset);
-
-  offset = AssignFillArrayDataOffset(offset);
-
-  total_size_ = offset;
-}
-
-/*
- * Go over each instruction in the list and calculate the offset from the top
- * before sending them off to the assembler. If out-of-range branch distance is
- * seen rearrange the instructions a bit to correct it.
- * TODO: consolidate w/ Arm assembly mechanism.
- */
-void X86Mir2Lir::AssembleLIR() {
-  cu_->NewTimingSplit("Assemble");
-
-  // We will remove the method address if we never ended up using it
-  if (pc_rel_base_reg_.Valid() && !pc_rel_base_reg_used_) {
-    if (kIsDebugBuild) {
-      LOG(WARNING) << "PC-relative addressing base promoted but unused in "
-          << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-    }
-    setup_pc_rel_base_reg_->flags.is_nop = true;
-    NEXT_LIR(setup_pc_rel_base_reg_)->flags.is_nop = true;
-  }
-
-  AssignOffsets();
-  int assembler_retries = 0;
-  /*
-   * Assemble here.  Note that we generate code with optimistic assumptions
-   * and if found now to work, we'll have to redo the sequence and retry.
-   */
-
-  while (true) {
-    AssemblerStatus res = AssembleInstructions(first_lir_insn_, 0);
-    if (res == kSuccess) {
-      break;
-    } else {
-      assembler_retries++;
-      if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
-        CodegenDump();
-        LOG(FATAL) << "Assembler error - too many retries";
-      }
-      // Redo offsets and try again
-      AssignOffsets();
-      code_buffer_.clear();
-    }
-  }
-
-  // Install literals
-  InstallLiteralPools();
-
-  // Install switch tables
-  InstallSwitchTables();
-
-  // Install fill array data
-  InstallFillArrayData();
-
-  // Create the mapping table and native offset to reference map.
-  cu_->NewTimingSplit("PcMappingTable");
-  CreateMappingTables();
-
-  cu_->NewTimingSplit("GcMap");
-  CreateNativeGcMap();
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/x86/backend_x86.h b/compiler/dex/quick/x86/backend_x86.h
deleted file mode 100644
index f73db94..0000000
--- a/compiler/dex/quick/x86/backend_x86.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_X86_BACKEND_X86_H_
-#define ART_COMPILER_DEX_QUICK_X86_BACKEND_X86_H_
-
-namespace art {
-
-struct CompilationUnit;
-class Mir2Lir;
-class MIRGraph;
-class ArenaAllocator;
-
-Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                          ArenaAllocator* const arena);
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_X86_BACKEND_X86_H_
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
deleted file mode 100644
index 9cb45a4..0000000
--- a/compiler/dex/quick/x86/call_x86.cc
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the X86 ISA */
-
-#include "codegen_x86.h"
-
-#include "art_method.h"
-#include "base/logging.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "gc/accounting/card_table.h"
-#include "mirror/object_array-inl.h"
-#include "utils/dex_cache_arrays_layout-inl.h"
-#include "x86_lir.h"
-
-namespace art {
-
-/*
- * The sparse table in the literal pool is an array of <key,displacement>
- * pairs.
- */
-void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  GenSmallSparseSwitch(mir, table_offset, rl_src);
-}
-
-/*
- * Code pattern will look something like:
- *
- * mov  r_val, ..
- * call 0
- * pop  r_start_of_method
- * sub  r_start_of_method, ..
- * mov  r_key_reg, r_val
- * sub  r_key_reg, low_key
- * cmp  r_key_reg, size-1  ; bound check
- * ja   done
- * mov  r_disp, [r_start_of_method + r_key_reg * 4 + table_offset]
- * add  r_start_of_method, r_disp
- * jmp  r_start_of_method
- * done:
- */
-void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
-  // Add the table to the list - we'll process it later
-  SwitchTable* tab_rec =
-      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
-  tab_rec->switch_mir = mir;
-  tab_rec->table = table;
-  tab_rec->vaddr = current_dalvik_offset_;
-  int size = table[1];
-  switch_tables_.push_back(tab_rec);
-
-  // Get the switch value
-  rl_src = LoadValue(rl_src, kCoreReg);
-
-  int low_key = s4FromSwitchData(&table[2]);
-  RegStorage keyReg;
-  // Remove the bias, if necessary
-  if (low_key == 0) {
-    keyReg = rl_src.reg;
-  } else {
-    keyReg = AllocTemp();
-    OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
-  }
-
-  // Bounds check - if < 0 or >= size continue following switch
-  OpRegImm(kOpCmp, keyReg, size - 1);
-  LIR* branch_over = OpCondBranch(kCondHi, nullptr);
-
-  RegStorage addr_for_jump;
-  if (cu_->target64) {
-    RegStorage table_base = AllocTempWide();
-    // Load the address of the table into table_base.
-    LIR* lea = RawLIR(current_dalvik_offset_, kX86Lea64RM, table_base.GetReg(), kRIPReg,
-                      256, 0, WrapPointer(tab_rec));
-    lea->flags.fixup = kFixupSwitchTable;
-    AppendLIR(lea);
-
-    // Load the offset from the table out of the table.
-    addr_for_jump = AllocTempWide();
-    NewLIR5(kX86MovsxdRA, addr_for_jump.GetReg(), table_base.GetReg(), keyReg.GetReg(), 2, 0);
-
-    // Add the offset from the table to the table base.
-    OpRegReg(kOpAdd, addr_for_jump, table_base);
-    tab_rec->anchor = nullptr;  // Unused for x86-64.
-  } else {
-    // Get the PC to a register and get the anchor.
-    LIR* anchor;
-    RegStorage r_pc = GetPcAndAnchor(&anchor);
-
-    // Load the displacement from the switch table.
-    addr_for_jump = AllocTemp();
-    NewLIR5(kX86PcRelLoadRA, addr_for_jump.GetReg(), r_pc.GetReg(), keyReg.GetReg(),
-            2, WrapPointer(tab_rec));
-    // Add displacement and r_pc to get the address.
-    OpRegReg(kOpAdd, addr_for_jump, r_pc);
-    tab_rec->anchor = anchor;
-  }
-
-  // ..and go!
-  NewLIR1(kX86JmpR, addr_for_jump.GetReg());
-
-  /* branch_over target here */
-  LIR* target = NewLIR0(kPseudoTargetLabel);
-  branch_over->target = target;
-}
-
-void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
-  int ex_offset = cu_->target64 ?
-      Thread::ExceptionOffset<8>().Int32Value() :
-      Thread::ExceptionOffset<4>().Int32Value();
-  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
-  NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
-  NewLIR2(cu_->target64 ? kX86Mov64TI : kX86Mov32TI, ex_offset, 0);
-  StoreValue(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
-  DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64);
-  RegStorage reg_card_base = AllocTempRef();
-  RegStorage reg_card_no = AllocTempRef();
-  int ct_offset = cu_->target64 ?
-      Thread::CardTableOffset<8>().Int32Value() :
-      Thread::CardTableOffset<4>().Int32Value();
-  NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
-  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
-  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
-  FreeTemp(reg_card_base);
-  FreeTemp(reg_card_no);
-}
-
-static dwarf::Reg DwarfCoreReg(bool is_x86_64, int num) {
-  return is_x86_64 ? dwarf::Reg::X86_64Core(num) : dwarf::Reg::X86Core(num);
-}
-
-void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
-  /*
-   * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live.  Let the register
-   * allocation mechanism know so it doesn't try to use any of them when
-   * expanding the frame or flushing.  This leaves the utility
-   * code with no spare temps.
-   */
-  const RegStorage arg0 = TargetReg32(kArg0);
-  const RegStorage arg1 = TargetReg32(kArg1);
-  const RegStorage arg2 = TargetReg32(kArg2);
-  LockTemp(arg0);
-  LockTemp(arg1);
-  LockTemp(arg2);
-
-  /*
-   * We can safely skip the stack overflow check if we're
-   * a leaf *and* our frame size < fudge factor.
-   */
-  const InstructionSet isa =  cu_->target64 ? kX86_64 : kX86;
-  bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa);
-  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-
-  // If we doing an implicit stack overflow check, perform the load immediately
-  // before the stack pointer is decremented and anything is saved.
-  if (!skip_overflow_check &&
-      cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
-    // Implicit stack overflow check.
-    // test eax,[esp + -overflow]
-    int overflow = GetStackOverflowReservedBytes(isa);
-    NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rSP.GetReg(), -overflow);
-    MarkPossibleStackOverflowException();
-  }
-
-  /* Build frame, return address already on stack */
-  cfi_.SetCurrentCFAOffset(GetInstructionSetPointerSize(cu_->instruction_set));
-  OpRegImm(kOpSub, rs_rSP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
-  cfi_.DefCFAOffset(frame_size_);
-
-  /* Spill core callee saves */
-  SpillCoreRegs();
-  SpillFPRegs();
-  if (!skip_overflow_check) {
-    class StackOverflowSlowPath : public LIRSlowPath {
-     public:
-      StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
-          : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) {
-      }
-      void Compile() OVERRIDE {
-        m2l_->ResetRegPool();
-        m2l_->ResetDefTracking();
-        GenerateTargetLabel(kPseudoThrowTarget);
-        const RegStorage local_rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-        m2l_->OpRegImm(kOpAdd, local_rs_rSP, sp_displace_);
-        m2l_->cfi().AdjustCFAOffset(-sp_displace_);
-        m2l_->ClobberCallerSave();
-        // Assumes codegen and target are in thumb2 mode.
-        m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow,
-                         false /* MarkSafepointPC */, false /* UseLink */);
-        m2l_->cfi().AdjustCFAOffset(sp_displace_);
-      }
-
-     private:
-      const size_t sp_displace_;
-    };
-    if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
-      // TODO: for large frames we should do something like:
-      // spill ebp
-      // lea ebp, [esp + frame_size]
-      // cmp ebp, fs:[stack_end_]
-      // jcc stack_overflow_exception
-      // mov esp, ebp
-      // in case a signal comes in that's not using an alternate signal stack and the large frame
-      // may have moved us outside of the reserved area at the end of the stack.
-      // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
-      if (cu_->target64) {
-        OpRegThreadMem(kOpCmp, rs_rX86_SP_64, Thread::StackEndOffset<8>());
-      } else {
-        OpRegThreadMem(kOpCmp, rs_rX86_SP_32, Thread::StackEndOffset<4>());
-      }
-      LIR* branch = OpCondBranch(kCondUlt, nullptr);
-      AddSlowPath(
-        new(arena_)StackOverflowSlowPath(this, branch,
-                                         frame_size_ -
-                                         GetInstructionSetPointerSize(cu_->instruction_set)));
-    }
-  }
-
-  FlushIns(ArgLocs, rl_method);
-
-  // We can promote the PC of an anchor for PC-relative addressing to a register
-  // if it's used at least twice. Without investigating where we should lazily
-  // load the reference, we conveniently load it after flushing inputs.
-  if (pc_rel_base_reg_.Valid()) {
-    DCHECK(!cu_->target64);
-    setup_pc_rel_base_reg_ = OpLoadPc(pc_rel_base_reg_);
-  }
-
-  FreeTemp(arg0);
-  FreeTemp(arg1);
-  FreeTemp(arg2);
-}
-
-void X86Mir2Lir::GenExitSequence() {
-  cfi_.RememberState();
-  /*
-   * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
-   * allocated by the register utilities as temps.
-   */
-  LockTemp(rs_rX86_RET0);
-  LockTemp(rs_rX86_RET1);
-
-  UnSpillCoreRegs();
-  UnSpillFPRegs();
-  /* Remove frame except for return address */
-  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-  int adjust = frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set);
-  OpRegImm(kOpAdd, rs_rSP, adjust);
-  cfi_.AdjustCFAOffset(-adjust);
-  // There is only the return PC on the stack now.
-  NewLIR0(kX86Ret);
-  // The CFI should be restored for any code that follows the exit block.
-  cfi_.RestoreState();
-  cfi_.DefCFAOffset(frame_size_);
-}
-
-void X86Mir2Lir::GenSpecialExitSequence() {
-  NewLIR0(kX86Ret);
-}
-
-void X86Mir2Lir::GenSpecialEntryForSuspend() {
-  // Keep 16-byte stack alignment, there's already the return address, so
-  //   - for 32-bit push EAX, i.e. ArtMethod*, ESI, EDI,
-  //   - for 64-bit push RAX, i.e. ArtMethod*.
-  const int kRegSize = cu_->target64 ? 8 : 4;
-  cfi_.SetCurrentCFAOffset(kRegSize);  // Return address.
-  if (!cu_->target64) {
-    DCHECK(!IsTemp(rs_rSI));
-    DCHECK(!IsTemp(rs_rDI));
-    core_spill_mask_ =
-        (1u << rs_rDI.GetRegNum()) | (1u << rs_rSI.GetRegNum()) | (1u << rs_rRET.GetRegNum());
-    num_core_spills_ = 3u;
-  } else {
-    core_spill_mask_ = (1u << rs_rRET.GetRegNum());
-    num_core_spills_ = 1u;
-  }
-  fp_spill_mask_ = 0u;
-  num_fp_spills_ = 0u;
-  frame_size_ = 16u;
-  core_vmap_table_.clear();
-  fp_vmap_table_.clear();
-  if (!cu_->target64) {
-    NewLIR1(kX86Push32R, rs_rDI.GetReg());
-    cfi_.AdjustCFAOffset(kRegSize);
-    cfi_.RelOffset(DwarfCoreReg(cu_->target64, rs_rDI.GetRegNum()), 0);
-    NewLIR1(kX86Push32R, rs_rSI.GetReg());
-    cfi_.AdjustCFAOffset(kRegSize);
-    cfi_.RelOffset(DwarfCoreReg(cu_->target64, rs_rSI.GetRegNum()), 0);
-  }
-  NewLIR1(kX86Push32R, TargetReg(kArg0, kRef).GetReg());  // ArtMethod*
-  cfi_.AdjustCFAOffset(kRegSize);
-  // Do not generate CFI for scratch register.
-}
-
-void X86Mir2Lir::GenSpecialExitForSuspend() {
-  const int kRegSize = cu_->target64 ? 8 : 4;
-  // Pop the frame. (ArtMethod* no longer needed but restore it anyway.)
-  NewLIR1(kX86Pop32R, TargetReg(kArg0, kRef).GetReg());  // ArtMethod*
-  cfi_.AdjustCFAOffset(-kRegSize);
-  if (!cu_->target64) {
-    NewLIR1(kX86Pop32R, rs_rSI.GetReg());
-    cfi_.AdjustCFAOffset(-kRegSize);
-    cfi_.Restore(DwarfCoreReg(cu_->target64, rs_rSI.GetRegNum()));
-    NewLIR1(kX86Pop32R, rs_rDI.GetReg());
-    cfi_.AdjustCFAOffset(-kRegSize);
-    cfi_.Restore(DwarfCoreReg(cu_->target64, rs_rDI.GetRegNum()));
-  }
-}
-
-void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
-  if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
-    return;
-  }
-  // Implicit null pointer check.
-  // test eax,[arg1+0]
-  NewLIR3(kX86Test32RM, rs_rAX.GetReg(), reg.GetReg(), 0);
-  MarkPossibleNullPointerException(opt_flags);
-}
-
-/*
- * Bit of a hack here - in the absence of a real scheduling pass,
- * emit the next instruction in static & direct invoke sequences.
- */
-int X86Mir2Lir::X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
-                                  int state, const MethodReference& target_method,
-                                  uint32_t,
-                                  uintptr_t direct_code ATTRIBUTE_UNUSED, uintptr_t direct_method,
-                                  InvokeType type) {
-  X86Mir2Lir* cg = static_cast<X86Mir2Lir*>(cu->cg.get());
-  if (info->string_init_offset != 0) {
-    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
-    switch (state) {
-    case 0: {  // Grab target method* from thread pointer
-      cg->NewLIR2(kX86Mov32RT, arg0_ref.GetReg(), info->string_init_offset);
-      break;
-    }
-    default:
-      return -1;
-    }
-  } else if (direct_method != 0) {
-    switch (state) {
-    case 0:  // Get the current Method* [sets kArg0]
-      if (direct_method != static_cast<uintptr_t>(-1)) {
-        auto target_reg = cg->TargetReg(kArg0, kRef);
-        if (target_reg.Is64Bit()) {
-          cg->LoadConstantWide(target_reg, direct_method);
-        } else {
-          cg->LoadConstant(target_reg, direct_method);
-        }
-      } else {
-        cg->LoadMethodAddress(target_method, type, kArg0);
-      }
-      break;
-    default:
-      return -1;
-    }
-  } else if (cg->CanUseOpPcRelDexCacheArrayLoad()) {
-    switch (state) {
-      case 0: {
-        CHECK_EQ(cu->dex_file, target_method.dex_file);
-        size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index);
-        cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, cg->TargetReg(kArg0, kRef),
-                                     cu->target64);
-        break;
-      }
-      default:
-        return -1;
-    }
-  } else {
-    RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
-    switch (state) {
-    case 0:  // Get the current Method* [sets kArg0]
-      // TUNING: we can save a reg copy if Method* has been promoted.
-      cg->LoadCurrMethodDirect(arg0_ref);
-      break;
-    case 1:  // Get method->dex_cache_resolved_methods_
-      cg->LoadBaseDisp(arg0_ref,
-                       ArtMethod::DexCacheResolvedMethodsOffset(
-                           cu->target64 ? kX86_64PointerSize : kX86PointerSize).Int32Value(),
-                       arg0_ref,
-                       cu->target64 ? k64 : k32,
-                       kNotVolatile);
-      break;
-    case 2: {
-      // Grab target method*
-      CHECK_EQ(cu->dex_file, target_method.dex_file);
-      const size_t pointer_size = GetInstructionSetPointerSize(cu->instruction_set);
-      cg->LoadWordDisp(arg0_ref,
-                       cg->GetCachePointerOffset(target_method.dex_method_index, pointer_size),
-                       arg0_ref);
-      break;
-    }
-    default:
-      return -1;
-    }
-  }
-  return state + 1;
-}
-
-NextCallInsn X86Mir2Lir::GetNextSDCallInsn() {
-  return X86NextSDCallInsn;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
deleted file mode 100644
index 11d9d4a..0000000
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ /dev/null
@@ -1,985 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
-#define ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
-
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir.h"
-#include "x86_lir.h"
-
-#include <map>
-#include <vector>
-
-namespace art {
-
-class X86Mir2Lir FINAL : public Mir2Lir {
- protected:
-  class InToRegStorageX86_64Mapper : public InToRegStorageMapper {
-   public:
-    explicit InToRegStorageX86_64Mapper(Mir2Lir* m2l)
-        : m2l_(m2l), cur_core_reg_(0), cur_fp_reg_(0) {}
-    virtual RegStorage GetNextReg(ShortyArg arg);
-    virtual void Reset() OVERRIDE {
-      cur_core_reg_ = 0;
-      cur_fp_reg_ = 0;
-    }
-   protected:
-    Mir2Lir* m2l_;
-    size_t cur_core_reg_;
-    size_t cur_fp_reg_;
-  };
-
-  class InToRegStorageX86Mapper : public InToRegStorageX86_64Mapper {
-   public:
-    explicit InToRegStorageX86Mapper(Mir2Lir* m2l)
-        : InToRegStorageX86_64Mapper(m2l) { }
-    virtual RegStorage GetNextReg(ShortyArg arg);
-  };
-
-  InToRegStorageX86_64Mapper in_to_reg_storage_x86_64_mapper_;
-  InToRegStorageX86Mapper in_to_reg_storage_x86_mapper_;
-  InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
-    InToRegStorageMapper* res;
-    if (cu_->target64) {
-      res = &in_to_reg_storage_x86_64_mapper_;
-    } else {
-      res = &in_to_reg_storage_x86_mapper_;
-    }
-    res->Reset();
-    return res;
-  }
-
-  class ExplicitTempRegisterLock {
-  public:
-    ExplicitTempRegisterLock(X86Mir2Lir* mir_to_lir, int n_regs, ...);
-    ~ExplicitTempRegisterLock();
-  protected:
-    std::vector<RegStorage> temp_regs_;
-    X86Mir2Lir* const mir_to_lir_;
-  };
-
-  virtual int GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) OVERRIDE;
-
- public:
-  X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
-
-  // Required for target - codegen helpers.
-  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                          RegLocation rl_dest, int lit) OVERRIDE;
-  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-  void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
-                                  int32_t constant) OVERRIDE;
-  void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
-                                   int64_t constant) OVERRIDE;
-  LIR* CheckSuspendUsingLoad() OVERRIDE;
-  RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
-  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                    OpSize size, VolatileKind is_volatile) OVERRIDE;
-  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
-                       OpSize size) OVERRIDE;
-  LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
-  LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
-  void GenLongToInt(RegLocation rl_dest, RegLocation rl_src);
-  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                     OpSize size, VolatileKind is_volatile) OVERRIDE;
-  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
-                        OpSize size) OVERRIDE;
-
-  /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
-  void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
-
-  bool CanUseOpPcRelDexCacheArrayLoad() const OVERRIDE;
-  void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, bool wide)
-      OVERRIDE;
-
-  void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE;
-
-  // Required for target - register utilities.
-  RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
-  RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
-    if (wide_kind == kWide) {
-      if (cu_->target64) {
-        return As64BitReg(TargetReg32(symbolic_reg));
-      } else {
-        if (symbolic_reg >= kFArg0 && symbolic_reg <= kFArg3) {
-          // We want an XMM, not a pair.
-          return As64BitReg(TargetReg32(symbolic_reg));
-        }
-        // x86: construct a pair.
-        DCHECK((kArg0 <= symbolic_reg && symbolic_reg < kArg3) ||
-               (kRet0 == symbolic_reg));
-        return RegStorage::MakeRegPair(TargetReg32(symbolic_reg),
-                                 TargetReg32(static_cast<SpecialTargetRegister>(symbolic_reg + 1)));
-      }
-    } else if (wide_kind == kRef && cu_->target64) {
-      return As64BitReg(TargetReg32(symbolic_reg));
-    } else {
-      return TargetReg32(symbolic_reg);
-    }
-  }
-  RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
-    return TargetReg(symbolic_reg, cu_->target64 ? kWide : kNotWide);
-  }
-
-  RegLocation GetReturnAlt() OVERRIDE;
-  RegLocation GetReturnWideAlt() OVERRIDE;
-  RegLocation LocCReturn() OVERRIDE;
-  RegLocation LocCReturnRef() OVERRIDE;
-  RegLocation LocCReturnDouble() OVERRIDE;
-  RegLocation LocCReturnFloat() OVERRIDE;
-  RegLocation LocCReturnWide() OVERRIDE;
-
-  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
-  void AdjustSpillMask() OVERRIDE;
-  void ClobberCallerSave() OVERRIDE;
-  void FreeCallTemps() OVERRIDE;
-  void LockCallTemps() OVERRIDE;
-
-  void CompilerInitializeRegAlloc() OVERRIDE;
-  int VectorRegisterSize() OVERRIDE;
-  int NumReservableVectorRegisters(bool long_or_fp) OVERRIDE;
-
-  // Required for target - miscellaneous.
-  void AssembleLIR() OVERRIDE;
-  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
-  void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
-  const char* GetTargetInstFmt(int opcode) OVERRIDE;
-  const char* GetTargetInstName(int opcode) OVERRIDE;
-  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
-  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
-  uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
-  size_t GetInsnSize(LIR* lir) OVERRIDE;
-  bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
-
-  // Get the register class for load/store of a field.
-  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
-
-  // Required for target - Dalvik-level generators.
-  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
-                   RegLocation rl_dest, int scale) OVERRIDE;
-  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
-                   RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
-
-  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                        RegLocation rl_src2) OVERRIDE;
-  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2) OVERRIDE;
-  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                RegLocation rl_src2) OVERRIDE;
-  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
-
-  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
-  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
-  bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
-  bool GenInlinedReverseBits(CallInfo* info, OpSize size) OVERRIDE;
-  bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
-  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
-  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
-  bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
-  bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
-  bool GenInlinedCharAt(CallInfo* info) OVERRIDE;
-
-  // Long instructions.
-  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                      RegLocation rl_src2, int flags) OVERRIDE;
-  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_src2, int flags) OVERRIDE;
-  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                         RegLocation rl_src1, RegLocation rl_shift, int flags) OVERRIDE;
-  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) OVERRIDE;
-  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
-  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                      RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
-
-  /*
-   * @brief Generate a two address long operation with a constant value
-   * @param rl_dest location of result
-   * @param rl_src constant source operand
-   * @param op Opcode to be generated
-   * @return success or not
-   */
-  bool GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
-
-  /*
-   * @brief Generate a three address long operation with a constant value
-   * @param rl_dest location of result
-   * @param rl_src1 source operand
-   * @param rl_src2 constant source operand
-   * @param op Opcode to be generated
-   * @return success or not
-   */
-  bool GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                      Instruction::Code op);
-  /**
-   * @brief Generate a long arithmetic operation.
-   * @param rl_dest The destination.
-   * @param rl_src1 First operand.
-   * @param rl_src2 Second operand.
-   * @param op The DEX opcode for the operation.
-   * @param is_commutative The sources can be swapped if needed.
-   */
-  virtual void GenLongArith(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                            Instruction::Code op, bool is_commutative);
-
-  /**
-   * @brief Generate a two operand long arithmetic operation.
-   * @param rl_dest The destination.
-   * @param rl_src Second operand.
-   * @param op The DEX opcode for the operation.
-   */
-  void GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
-
-  /**
-   * @brief Generate a long operation.
-   * @param rl_dest The destination.  Must be in a register
-   * @param rl_src The other operand.  May be in a register or in memory.
-   * @param op The DEX opcode for the operation.
-   */
-  virtual void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
-
-
-  // TODO: collapse reg_lo, reg_hi
-  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
-      OVERRIDE;
-  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) OVERRIDE;
-  void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
-  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
-  void GenExitSequence() OVERRIDE;
-  void GenSpecialExitSequence() OVERRIDE;
-  void GenSpecialEntryForSuspend() OVERRIDE;
-  void GenSpecialExitForSuspend() OVERRIDE;
-  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
-  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
-  void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
-  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                        RegisterClass dest_reg_class) OVERRIDE;
-  bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
-  void GenMoveException(RegLocation rl_dest) OVERRIDE;
-  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                     int first_bit, int second_bit) OVERRIDE;
-  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
-  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
-  void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
-  void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
-
-  /**
-   * @brief Implement instanceof a final class with x86 specific code.
-   * @param use_declaring_class 'true' if we can use the class itself.
-   * @param type_idx Type index to use if use_declaring_class is 'false'.
-   * @param rl_dest Result to be set to 0 or 1.
-   * @param rl_src Object to be tested.
-   */
-  void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
-                          RegLocation rl_src) OVERRIDE;
-
-  // Single operation generators.
-  LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
-  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
-  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
-  LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
-  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
-  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
-  LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
-  void OpEndIT(LIR* it) OVERRIDE;
-  LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
-  void OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
-  LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
-  void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
-  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
-  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
-  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
-  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
-  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
-  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
-  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
-  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
-  LIR* OpTestSuspend(LIR* target) OVERRIDE;
-  LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
-  LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
-  void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
-  bool GenInlinedCurrentThread(CallInfo* info) OVERRIDE;
-
-  bool InexpensiveConstantInt(int32_t value) OVERRIDE;
-  bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
-  bool InexpensiveConstantLong(int64_t value) OVERRIDE;
-  bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
-
-  /*
-   * @brief Should try to optimize for two address instructions?
-   * @return true if we try to avoid generating three operand instructions.
-   */
-  virtual bool GenerateTwoOperandInstructions() const { return true; }
-
-  /*
-   * @brief x86 specific codegen for int operations.
-   * @param opcode Operation to perform.
-   * @param rl_dest Destination for the result.
-   * @param rl_lhs Left hand operand.
-   * @param rl_rhs Right hand operand.
-   * @param flags The instruction optimization flags.
-   */
-  void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
-                     RegLocation rl_rhs, int flags) OVERRIDE;
-
-  /*
-   * @brief Load the Method* of a dex method into the register.
-   * @param target_method The MethodReference of the method to be invoked.
-   * @param type How the method will be invoked.
-   * @param register that will contain the code address.
-   * @note register will be passed to TargetReg to get physical register.
-   */
-  void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
-                         SpecialTargetRegister symbolic_reg) OVERRIDE;
-
-  /*
-   * @brief Load the Class* of a Dex Class type into the register.
-   * @param dex DexFile that contains the class type.
-   * @param type How the method will be invoked.
-   * @param register that will contain the code address.
-   * @note register will be passed to TargetReg to get physical register.
-   */
-  void LoadClassType(const DexFile& dex_file, uint32_t type_idx,
-                     SpecialTargetRegister symbolic_reg) OVERRIDE;
-
-  NextCallInsn GetNextSDCallInsn() OVERRIDE;
-
-  /*
-   * @brief Generate a relative call to the method that will be patched at link time.
-   * @param target_method The MethodReference of the method to be invoked.
-   * @param type How the method will be invoked.
-   * @returns Call instruction
-   */
-  LIR* CallWithLinkerFixup(const MethodReference& target_method, InvokeType type);
-
-  /*
-   * @brief Generate the actual call insn based on the method info.
-   * @param method_info the lowering info for the method call.
-   * @returns Call instruction
-   */
-  LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
-
-  void AnalyzeMIR(RefCounts* core_counts, MIR* mir, uint32_t weight) OVERRIDE;
-  void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) OVERRIDE;
-  void DoPromotion() OVERRIDE;
-
-  /*
-   * @brief Handle x86 specific literals
-   */
-  void InstallLiteralPools() OVERRIDE;
-
-  LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
-
- protected:
-  RegStorage TargetReg32(SpecialTargetRegister reg) const;
-  // Casting of RegStorage
-  RegStorage As32BitReg(RegStorage reg) {
-    DCHECK(!reg.IsPair());
-    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
-      if (kFailOnSizeError) {
-        LOG(FATAL) << "Expected 64b register " << reg.GetReg();
-      } else {
-        LOG(WARNING) << "Expected 64b register " << reg.GetReg();
-        return reg;
-      }
-    }
-    RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
-                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
-    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
-                             ->GetReg().GetReg(),
-              ret_val.GetReg());
-    return ret_val;
-  }
-
-  RegStorage As64BitReg(RegStorage reg) {
-    DCHECK(!reg.IsPair());
-    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
-      if (kFailOnSizeError) {
-        LOG(FATAL) << "Expected 32b register " << reg.GetReg();
-      } else {
-        LOG(WARNING) << "Expected 32b register " << reg.GetReg();
-        return reg;
-      }
-    }
-    RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
-                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
-    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
-                             ->GetReg().GetReg(),
-              ret_val.GetReg());
-    return ret_val;
-  }
-
-  LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                           RegStorage r_dest, OpSize size);
-  LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                            RegStorage r_src, OpSize size, int opt_flags = 0);
-
-  int AssignInsnOffsets();
-  void AssignOffsets();
-  AssemblerStatus AssembleInstructions(LIR* first_lir_insn, CodeOffset start_addr);
-
-  size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
-                     int32_t raw_base, int32_t displacement);
-  void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg);
-  void EmitPrefix(const X86EncodingMap* entry,
-                  int32_t raw_reg_r, int32_t raw_reg_x, int32_t raw_reg_b);
-  void EmitOpcode(const X86EncodingMap* entry);
-  void EmitPrefixAndOpcode(const X86EncodingMap* entry,
-                           int32_t reg_r, int32_t reg_x, int32_t reg_b);
-  void EmitDisp(uint8_t base, int32_t disp);
-  void EmitModrmThread(uint8_t reg_or_opcode);
-  void EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int32_t disp);
-  void EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index, int scale,
-                        int32_t disp);
-  void EmitImm(const X86EncodingMap* entry, int64_t imm);
-  void EmitNullary(const X86EncodingMap* entry);
-  void EmitOpRegOpcode(const X86EncodingMap* entry, int32_t raw_reg);
-  void EmitOpReg(const X86EncodingMap* entry, int32_t raw_reg);
-  void EmitOpMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
-  void EmitOpArray(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
-                   int32_t disp);
-  void EmitMemReg(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_reg);
-  void EmitRegMem(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base, int32_t disp);
-  void EmitRegArray(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base,
-                    int32_t raw_index, int scale, int32_t disp);
-  void EmitArrayReg(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
-                    int32_t disp, int32_t raw_reg);
-  void EmitMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
-  void EmitArrayImm(const X86EncodingMap* entry, int32_t raw_base, int32_t raw_index, int scale,
-                    int32_t raw_disp, int32_t imm);
-  void EmitRegThread(const X86EncodingMap* entry, int32_t raw_reg, int32_t disp);
-  void EmitRegReg(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2);
-  void EmitRegRegImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t imm);
-  void EmitRegMemImm(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
-                     int32_t imm);
-  void EmitMemRegImm(const X86EncodingMap* entry, int32_t base, int32_t disp, int32_t raw_reg1,
-                     int32_t imm);
-  void EmitRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
-  void EmitThreadImm(const X86EncodingMap* entry, int32_t disp, int32_t imm);
-  void EmitMovRegImm(const X86EncodingMap* entry, int32_t raw_reg, int64_t imm);
-  void EmitShiftRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
-  void EmitShiftRegCl(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_cl);
-  void EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_cl);
-  void EmitShiftRegRegCl(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2,
-                         int32_t raw_cl);
-  void EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
-  void EmitRegCond(const X86EncodingMap* entry, int32_t raw_reg, int32_t cc);
-  void EmitMemCond(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t cc);
-  void EmitRegRegCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t cc);
-  void EmitRegMemCond(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_base, int32_t disp,
-                      int32_t cc);
-
-  void EmitJmp(const X86EncodingMap* entry, int32_t rel);
-  void EmitJcc(const X86EncodingMap* entry, int32_t rel, int32_t cc);
-  void EmitCallMem(const X86EncodingMap* entry, int32_t raw_base, int32_t disp);
-  void EmitCallImmediate(const X86EncodingMap* entry, int32_t disp);
-  void EmitCallThread(const X86EncodingMap* entry, int32_t disp);
-  void EmitPcRel(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_base_or_table,
-                 int32_t raw_index, int scale, int32_t table_or_disp);
-  void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
-  void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
-                                int64_t val, ConditionCode ccode);
-  void GenConstWide(RegLocation rl_dest, int64_t value);
-  void GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2);
-  void GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2);
-  void GenShiftByteVector(MIR* mir);
-  void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
-                             uint32_t m4);
-  void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
-                          uint32_t m3, uint32_t m4);
-  void AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir);
-  virtual void LoadVectorRegister(RegStorage rs_dest, RegStorage rs_src, OpSize opsize,
-                                  int op_mov);
-
-  static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
-
-  /*
-   * @brief Ensure that a temporary register is byte addressable.
-   * @returns a temporary guarenteed to be byte addressable.
-   */
-  virtual RegStorage AllocateByteRegister();
-
-  /*
-   * @brief Use a wide temporary as a 128-bit register
-   * @returns a 128-bit temporary register.
-   */
-  virtual RegStorage Get128BitRegister(RegStorage reg);
-
-  /*
-   * @brief Check if a register is byte addressable.
-   * @returns true if a register is byte addressable.
-   */
-  bool IsByteRegister(RegStorage reg) const;
-
-  void GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src, int64_t imm, bool is_div);
-
-  bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
-
-  /*
-   * @brief generate inline code for fast case of Strng.indexOf.
-   * @param info Call parameters
-   * @param zero_based 'true' if the index into the string is 0.
-   * @returns 'true' if the call was inlined, 'false' if a regular call needs to be
-   * generated.
-   */
-  bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
-
-  /**
-   * @brief Used to reserve a range of vector registers.
-   * @see kMirOpReserveVectorRegisters
-   * @param mir The extended MIR for reservation.
-   */
-  void ReserveVectorRegisters(MIR* mir);
-
-  /**
-   * @brief Used to return a range of vector registers.
-   * @see kMirOpReturnVectorRegisters
-   * @param mir The extended MIR for returning vector regs.
-   */
-  void ReturnVectorRegisters(MIR* mir);
-
-  /*
-   * @brief Load 128 bit constant into vector register.
-   * @param mir The MIR whose opcode is kMirConstVector
-   * @note vA is the TypeSize for the register.
-   * @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
-   */
-  void GenConst128(MIR* mir);
-
-  /*
-   * @brief MIR to move a vectorized register to another.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize
-   * @note vB: destination
-   * @note vC: source
-   */
-  void GenMoveVector(MIR* mir);
-
-  /*
-   * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
-   * the type of the vector.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize
-   * @note vB: destination and source
-   * @note vC: source
-   */
-  void GenMultiplyVector(MIR* mir);
-
-  /*
-   * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
-   * type of the vector.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize
-   * @note vB: destination and source
-   * @note vC: source
-   */
-  void GenAddVector(MIR* mir);
-
-  /*
-   * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
-   * type of the vector.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize
-   * @note vB: destination and source
-   * @note vC: source
-   */
-  void GenSubtractVector(MIR* mir);
-
-  /*
-   * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
-   * type of the vector.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize
-   * @note vB: destination and source
-   * @note vC: immediate
-   */
-  void GenShiftLeftVector(MIR* mir);
-
-  /*
-   * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
-   * know the type of the vector.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize
-   * @note vB: destination and source
-   * @note vC: immediate
-   */
-  void GenSignedShiftRightVector(MIR* mir);
-
-  /*
-   * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
-   * to know the type of the vector.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize
-   * @note vB: destination and source
-   * @note vC: immediate
-   */
-  void GenUnsignedShiftRightVector(MIR* mir);
-
-  /*
-   * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
-   * type of the vector.
-   * @note vA: TypeSize
-   * @note vB: destination and source
-   * @note vC: source
-   */
-  void GenAndVector(MIR* mir);
-
-  /*
-   * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
-   * type of the vector.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize
-   * @note vB: destination and source
-   * @note vC: source
-   */
-  void GenOrVector(MIR* mir);
-
-  /*
-   * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
-   * type of the vector.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize
-   * @note vB: destination and source
-   * @note vC: source
-   */
-  void GenXorVector(MIR* mir);
-
-  /*
-   * @brief Reduce a 128-bit packed element into a single VR by taking lower bits
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
-   * @note vA: TypeSize
-   * @note vB: destination and source VR (not vector register)
-   * @note vC: source (vector register)
-   */
-  void GenAddReduceVector(MIR* mir);
-
-  /*
-   * @brief Extract a packed element into a single VR.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize
-   * @note vB: destination VR (not vector register)
-   * @note vC: source (vector register)
-   * @note arg[0]: The index to use for extraction from vector register (which packed element).
-   */
-  void GenReduceVector(MIR* mir);
-
-  /*
-   * @brief Create a vector value, with all TypeSize values equal to vC
-   * @param bb The basic block in which the MIR is from.
-   * @param mir The MIR whose opcode is kMirConstVector.
-   * @note vA: TypeSize.
-   * @note vB: destination vector register.
-   * @note vC: source VR (not vector register).
-   */
-  void GenSetVector(MIR* mir);
-
-  /**
-   * @brief Used to generate code for kMirOpPackedArrayGet.
-   * @param bb The basic block of MIR.
-   * @param mir The mir whose opcode is kMirOpPackedArrayGet.
-   */
-  void GenPackedArrayGet(BasicBlock* bb, MIR* mir);
-
-  /**
-   * @brief Used to generate code for kMirOpPackedArrayPut.
-   * @param bb The basic block of MIR.
-   * @param mir The mir whose opcode is kMirOpPackedArrayPut.
-   */
-  void GenPackedArrayPut(BasicBlock* bb, MIR* mir);
-
-  /*
-   * @brief Generate code for a vector opcode.
-   * @param bb The basic block in which the MIR is from.
-   * @param mir The MIR whose opcode is a non-standard opcode.
-   */
-  void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
-
-  /*
-   * @brief Return the correct x86 opcode for the Dex operation
-   * @param op Dex opcode for the operation
-   * @param loc Register location of the operand
-   * @param is_high_op 'true' if this is an operation on the high word
-   * @param value Immediate value for the operation.  Used for byte variants
-   * @returns the correct x86 opcode to perform the operation
-   */
-  X86OpCode GetOpcode(Instruction::Code op, RegLocation loc, bool is_high_op, int32_t value);
-
-  /*
-   * @brief Return the correct x86 opcode for the Dex operation
-   * @param op Dex opcode for the operation
-   * @param dest location of the destination.  May be register or memory.
-   * @param rhs Location for the rhs of the operation.  May be in register or memory.
-   * @param is_high_op 'true' if this is an operation on the high word
-   * @returns the correct x86 opcode to perform the operation
-   * @note at most one location may refer to memory
-   */
-  X86OpCode GetOpcode(Instruction::Code op, RegLocation dest, RegLocation rhs,
-                      bool is_high_op);
-
-  /*
-   * @brief Is this operation a no-op for this opcode and value
-   * @param op Dex opcode for the operation
-   * @param value Immediate value for the operation.
-   * @returns 'true' if the operation will have no effect
-   */
-  bool IsNoOp(Instruction::Code op, int32_t value);
-
-  /**
-   * @brief Calculate magic number and shift for a given divisor
-   * @param divisor divisor number for calculation
-   * @param magic hold calculated magic number
-   * @param shift hold calculated shift
-   * @param is_long 'true' if divisor is jlong, 'false' for jint.
-   */
-  void CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long);
-
-  /*
-   * @brief Generate an integer div or rem operation.
-   * @param rl_dest Destination Location.
-   * @param rl_src1 Numerator Location.
-   * @param rl_src2 Divisor Location.
-   * @param is_div 'true' if this is a division, 'false' for a remainder.
-   * @param flags The instruction optimization flags. It can include information
-   * if exception check can be elided.
-   */
-  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                        bool is_div, int flags);
-
-  /*
-   * @brief Generate an integer div or rem operation by a literal.
-   * @param rl_dest Destination Location.
-   * @param rl_src Numerator Location.
-   * @param lit Divisor.
-   * @param is_div 'true' if this is a division, 'false' for a remainder.
-   */
-  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, int lit, bool is_div);
-
-  /*
-   * Generate code to implement long shift operations.
-   * @param opcode The DEX opcode to specify the shift type.
-   * @param rl_dest The destination.
-   * @param rl_src The value to be shifted.
-   * @param shift_amount How much to shift.
-   * @param flags The instruction optimization flags.
-   * @returns the RegLocation of the result.
-   */
-  RegLocation GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                                RegLocation rl_src, int shift_amount, int flags);
-  /*
-   * Generate an imul of a register by a constant or a better sequence.
-   * @param dest Destination Register.
-   * @param src Source Register.
-   * @param val Constant multiplier.
-   */
-  void GenImulRegImm(RegStorage dest, RegStorage src, int val);
-
-  /*
-   * Generate an imul of a memory location by a constant or a better sequence.
-   * @param dest Destination Register.
-   * @param sreg Symbolic register.
-   * @param displacement Displacement on stack of Symbolic Register.
-   * @param val Constant multiplier.
-   */
-  void GenImulMemImm(RegStorage dest, int sreg, int displacement, int val);
-
-  /*
-   * @brief Compare memory to immediate, and branch if condition true.
-   * @param cond The condition code that when true will branch to the target.
-   * @param temp_reg A temporary register that can be used if compare memory is not
-   * supported by the architecture.
-   * @param base_reg The register holding the base address.
-   * @param offset The offset from the base.
-   * @param check_value The immediate to compare to.
-   * @param target branch target (or nullptr)
-   * @param compare output for getting LIR for comparison (or nullptr)
-   */
-  LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
-                         int offset, int check_value, LIR* target, LIR** compare);
-
-  void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
-
-  /*
-   * Can this operation be using core registers without temporaries?
-   * @param rl_lhs Left hand operand.
-   * @param rl_rhs Right hand operand.
-   * @returns 'true' if the operation can proceed without needing temporary regs.
-   */
-  bool IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs);
-
-  /**
-   * @brief Generates inline code for conversion of long to FP by using x87/
-   * @param rl_dest The destination of the FP.
-   * @param rl_src The source of the long.
-   * @param is_double 'true' if dealing with double, 'false' for float.
-   */
-  virtual void GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double);
-
-  void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
-  void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
-
-  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
-  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
-  LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
-  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
-  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset);
-  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
-  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
-  void OpTlsCmp(ThreadOffset<4> offset, int val);
-  void OpTlsCmp(ThreadOffset<8> offset, int val);
-
-  void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
-
-  // Try to do a long multiplication where rl_src2 is a constant. This simplified setup might fail,
-  // in which case false will be returned.
-  bool GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val, int flags);
-  void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2, int flags);
-  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-  void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                     RegLocation rl_src2, bool is_div, int flags);
-
-  void SpillCoreRegs();
-  void UnSpillCoreRegs();
-  void UnSpillFPRegs();
-  void SpillFPRegs();
-
-  /*
-   * Mir2Lir's UpdateLoc() looks to see if the Dalvik value is currently live in any temp register
-   * without regard to data type.  In practice, this can result in UpdateLoc returning a
-   * location record for a Dalvik float value in a core register, and vis-versa.  For targets
-   * which can inexpensively move data between core and float registers, this can often be a win.
-   * However, for x86 this is generally not a win.  These variants of UpdateLoc()
-   * take a register class argument - and will return an in-register location record only if
-   * the value is live in a temp register of the correct class.  Additionally, if the value is in
-   * a temp register of the wrong register class, it will be clobbered.
-   */
-  RegLocation UpdateLocTyped(RegLocation loc);
-  RegLocation UpdateLocWideTyped(RegLocation loc);
-
-  /*
-   * @brief Analyze one MIR float/double instruction
-   * @param opcode MIR instruction opcode.
-   * @param mir Instruction to analyze.
-   * @return true iff the instruction needs to load a literal using PC-relative addressing.
-   */
-  bool AnalyzeFPInstruction(int opcode, MIR* mir);
-
-  /*
-   * @brief Analyze one use of a double operand.
-   * @param rl_use Double RegLocation for the operand.
-   * @return true iff the instruction needs to load a literal using PC-relative addressing.
-   */
-  bool AnalyzeDoubleUse(RegLocation rl_use);
-
-  /*
-   * @brief Analyze one invoke-static MIR instruction
-   * @param mir Instruction to analyze.
-   * @return true iff the instruction needs to load a literal using PC-relative addressing.
-   */
-  bool AnalyzeInvokeStaticIntrinsic(MIR* mir);
-
-  // Information derived from analysis of MIR
-
-  // The base register for PC-relative addressing if promoted (32-bit only).
-  RegStorage pc_rel_base_reg_;
-
-  // Have we actually used the pc_rel_base_reg_?
-  bool pc_rel_base_reg_used_;
-
-  // Pointer to the "call +0" insn that sets up the promoted register for PC-relative addressing.
-  // The anchor "pop" insn is NEXT_LIR(setup_pc_rel_base_reg_). The whole "call +0; pop <reg>"
-  // sequence will be removed in AssembleLIR() if we do not actually use PC-relative addressing.
-  LIR* setup_pc_rel_base_reg_;  // There are 2 chained insns (no reordering allowed).
-
-  // Instructions needing patching with Method* values.
-  ArenaVector<LIR*> method_address_insns_;
-
-  // Instructions needing patching with Class Type* values.
-  ArenaVector<LIR*> class_type_address_insns_;
-
-  // Instructions needing patching with PC relative code addresses.
-  ArenaVector<LIR*> call_method_insns_;
-
-  // Instructions needing patching with PC relative code addresses.
-  ArenaVector<LIR*> dex_cache_access_insns_;
-
-  // The list of const vector literals.
-  LIR* const_vectors_;
-
-  /*
-   * @brief Search for a matching vector literal
-   * @param constants An array of size 4 which contains all of 32-bit constants.
-   * @returns pointer to matching LIR constant, or nullptr if not found.
-   */
-  LIR* ScanVectorLiteral(int32_t* constants);
-
-  /*
-   * @brief Add a constant vector literal
-   * @param constants An array of size 4 which contains all of 32-bit constants.
-   */
-  LIR* AddVectorLiteral(int32_t* constants);
-
-  bool WideGPRsAreAliases() const OVERRIDE {
-    return cu_->target64;  // On 64b, we have 64b GPRs.
-  }
-
-  bool WideFPRsAreAliases() const OVERRIDE {
-    return true;  // xmm registers have 64b views even on x86.
-  }
-
-  /*
-   * @brief Dump a RegLocation using printf
-   * @param loc Register location to dump
-   */
-  static void DumpRegLocation(RegLocation loc);
-
- private:
-  void SwapBits(RegStorage result_reg, int shift, int32_t value);
-  void SwapBits64(RegStorage result_reg, int shift, int64_t value);
-
-  static int X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
-                               int state, const MethodReference& target_method,
-                               uint32_t,
-                               uintptr_t direct_code, uintptr_t direct_method,
-                               InvokeType type);
-
-  LIR* OpLoadPc(RegStorage r_dest);
-  RegStorage GetPcAndAnchor(LIR** anchor, RegStorage r_tmp = RegStorage::InvalidReg());
-
-  // When we don't know the proper offset for the value, pick one that will force
-  // 4 byte offset.  We will fix this up in the assembler or linker later to have
-  // the right value.
-  static constexpr int kDummy32BitOffset = 256;
-
-  static const X86EncodingMap EncodingMap[kX86Last];
-
-  friend std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
-  friend class QuickAssembleX86Test;
-  friend class QuickAssembleX86MacroTest;
-  friend class QuickAssembleX86LowLevelTest;
-
-  DISALLOW_COPY_AND_ASSIGN(X86Mir2Lir);
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
deleted file mode 100755
index b11d41c..0000000
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ /dev/null
@@ -1,813 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_x86.h"
-
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "x86_lir.h"
-
-namespace art {
-
-void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode,
-                                 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
-  X86OpCode op = kX86Nop;
-  RegLocation rl_result;
-
-  /*
-   * Don't attempt to optimize register usage since these opcodes call out to
-   * the handlers.
-   */
-  switch (opcode) {
-    case Instruction::ADD_FLOAT_2ADDR:
-    case Instruction::ADD_FLOAT:
-      op = kX86AddssRR;
-      break;
-    case Instruction::SUB_FLOAT_2ADDR:
-    case Instruction::SUB_FLOAT:
-      op = kX86SubssRR;
-      break;
-    case Instruction::DIV_FLOAT_2ADDR:
-    case Instruction::DIV_FLOAT:
-      op = kX86DivssRR;
-      break;
-    case Instruction::MUL_FLOAT_2ADDR:
-    case Instruction::MUL_FLOAT:
-      op = kX86MulssRR;
-      break;
-    case Instruction::REM_FLOAT_2ADDR:
-    case Instruction::REM_FLOAT:
-      GenRemFP(rl_dest, rl_src1, rl_src2, false /* is_double */);
-      return;
-    case Instruction::NEG_FLOAT:
-      GenNegFloat(rl_dest, rl_src1);
-      return;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-  rl_src1 = LoadValue(rl_src1, kFPReg);
-  rl_src2 = LoadValue(rl_src2, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  RegStorage r_dest = rl_result.reg;
-  RegStorage r_src1 = rl_src1.reg;
-  RegStorage r_src2 = rl_src2.reg;
-  if (r_dest == r_src2) {
-    r_src2 = AllocTempSingle();
-    OpRegCopy(r_src2, r_dest);
-  }
-  OpRegCopy(r_dest, r_src1);
-  NewLIR2(op, r_dest.GetReg(), r_src2.GetReg());
-  StoreValue(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
-                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
-  DCHECK(rl_dest.wide);
-  DCHECK(rl_dest.fp);
-  DCHECK(rl_src1.wide);
-  DCHECK(rl_src1.fp);
-  DCHECK(rl_src2.wide);
-  DCHECK(rl_src2.fp);
-  X86OpCode op = kX86Nop;
-  RegLocation rl_result;
-
-  switch (opcode) {
-    case Instruction::ADD_DOUBLE_2ADDR:
-    case Instruction::ADD_DOUBLE:
-      op = kX86AddsdRR;
-      break;
-    case Instruction::SUB_DOUBLE_2ADDR:
-    case Instruction::SUB_DOUBLE:
-      op = kX86SubsdRR;
-      break;
-    case Instruction::DIV_DOUBLE_2ADDR:
-    case Instruction::DIV_DOUBLE:
-      op = kX86DivsdRR;
-      break;
-    case Instruction::MUL_DOUBLE_2ADDR:
-    case Instruction::MUL_DOUBLE:
-      op = kX86MulsdRR;
-      break;
-    case Instruction::REM_DOUBLE_2ADDR:
-    case Instruction::REM_DOUBLE:
-      GenRemFP(rl_dest, rl_src1, rl_src2, true /* is_double */);
-      return;
-    case Instruction::NEG_DOUBLE:
-      GenNegDouble(rl_dest, rl_src1);
-      return;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << opcode;
-  }
-  rl_src1 = LoadValueWide(rl_src1, kFPReg);
-  rl_src2 = LoadValueWide(rl_src2, kFPReg);
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  if (rl_result.reg == rl_src2.reg) {
-    rl_src2.reg = AllocTempDouble();
-    OpRegCopy(rl_src2.reg, rl_result.reg);
-  }
-  OpRegCopy(rl_result.reg, rl_src1.reg);
-  NewLIR2(op, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                            RegLocation rl_src1 ATTRIBUTE_UNUSED,
-                                            int32_t constant ATTRIBUTE_UNUSED) {
-  // TODO: need x86 implementation.
-  LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in x86";
-}
-
-void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                             RegLocation rl_src1 ATTRIBUTE_UNUSED,
-                                             int64_t constant ATTRIBUTE_UNUSED) {
-  // TODO: need x86 implementation.
-  LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in x86";
-}
-
-void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double) {
-  // Compute offsets to the source and destination VRs on stack
-  int src_v_reg_offset = SRegOffset(rl_src.s_reg_low);
-  int dest_v_reg_offset = SRegOffset(rl_dest.s_reg_low);
-
-  // Update the in-register state of source.
-  rl_src = UpdateLocWide(rl_src);
-
-  // All memory accesses below reference dalvik regs.
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-
-  // If the source is in physical register, then put it in its location on stack.
-  if (rl_src.location == kLocPhysReg) {
-    RegisterInfo* reg_info = GetRegInfo(rl_src.reg);
-
-    if (reg_info != nullptr && reg_info->IsTemp()) {
-      // Calling FlushSpecificReg because it will only write back VR if it is dirty.
-      FlushSpecificReg(reg_info);
-      // ResetDef to prevent NullifyRange from removing stores.
-      ResetDef(rl_src.reg);
-    } else {
-      // It must have been register promoted if it is not a temp but is still in physical
-      // register. Since we need it to be in memory to convert, we place it there now.
-      const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-      StoreBaseDisp(rs_rSP, src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
-    }
-  }
-
-  // Push the source virtual register onto the x87 stack.
-  LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP_32.GetReg(),
-                              src_v_reg_offset + LOWORD_OFFSET);
-  AnnotateDalvikRegAccess(fild64, (src_v_reg_offset + LOWORD_OFFSET) >> 2,
-                          true /* is_load */, true /* is64bit */);
-
-  // Now pop off x87 stack and store it in the destination VR's stack location.
-  int opcode = is_double ? kX86Fstp64M : kX86Fstp32M;
-  int displacement = is_double ? dest_v_reg_offset + LOWORD_OFFSET : dest_v_reg_offset;
-  LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP_32.GetReg(), displacement);
-  AnnotateDalvikRegAccess(fstp, displacement >> 2, false /* is_load */, is_double);
-
-  /*
-   * The result is in a physical register if it was in a temp or was register
-   * promoted. For that reason it is enough to check if it is in physical
-   * register. If it is, then we must do all of the bookkeeping necessary to
-   * invalidate temp (if needed) and load in promoted register (if needed).
-   * If the result's location is in memory, then we do not need to do anything
-   * more since the fstp has already placed the correct value in memory.
-   */
-  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
-  if (rl_result.location == kLocPhysReg) {
-    /*
-     * We already know that the result is in a physical register but do not know if it is the
-     * right class. So we call EvalLoc(Wide) first which will ensure that it will get moved to the
-     * correct register class.
-     */
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-    const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-    if (is_double) {
-      LoadBaseDisp(rs_rSP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
-
-      StoreFinalValueWide(rl_dest, rl_result);
-    } else {
-      Load32Disp(rs_rSP, dest_v_reg_offset, rl_result.reg);
-
-      StoreFinalValue(rl_dest, rl_result);
-    }
-  }
-}
-
-void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
-                               RegLocation rl_src) {
-  RegisterClass rcSrc = kFPReg;
-  X86OpCode op = kX86Nop;
-  RegLocation rl_result;
-  switch (opcode) {
-    case Instruction::INT_TO_FLOAT:
-      rcSrc = kCoreReg;
-      op = kX86Cvtsi2ssRR;
-      break;
-    case Instruction::DOUBLE_TO_FLOAT:
-      rcSrc = kFPReg;
-      op = kX86Cvtsd2ssRR;
-      break;
-    case Instruction::FLOAT_TO_DOUBLE:
-      rcSrc = kFPReg;
-      op = kX86Cvtss2sdRR;
-      break;
-    case Instruction::INT_TO_DOUBLE:
-      rcSrc = kCoreReg;
-      op = kX86Cvtsi2sdRR;
-      break;
-    case Instruction::FLOAT_TO_INT: {
-      rl_src = LoadValue(rl_src, kFPReg);
-      // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
-      ClobberSReg(rl_dest.s_reg_low);
-      rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      RegStorage temp_reg = AllocTempSingle();
-
-      LoadConstant(rl_result.reg, 0x7fffffff);
-      NewLIR2(kX86Cvtsi2ssRR, temp_reg.GetReg(), rl_result.reg.GetReg());
-      NewLIR2(kX86ComissRR, rl_src.reg.GetReg(), temp_reg.GetReg());
-      LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
-      LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
-      NewLIR2(kX86Cvttss2siRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-      LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
-      branch_na_n->target = NewLIR0(kPseudoTargetLabel);
-      NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-      branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
-      branch_normal->target = NewLIR0(kPseudoTargetLabel);
-      StoreValue(rl_dest, rl_result);
-      return;
-    }
-    case Instruction::DOUBLE_TO_INT: {
-      rl_src = LoadValueWide(rl_src, kFPReg);
-      // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
-      ClobberSReg(rl_dest.s_reg_low);
-      rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      RegStorage temp_reg = AllocTempDouble();
-
-      LoadConstant(rl_result.reg, 0x7fffffff);
-      NewLIR2(kX86Cvtsi2sdRR, temp_reg.GetReg(), rl_result.reg.GetReg());
-      NewLIR2(kX86ComisdRR, rl_src.reg.GetReg(), temp_reg.GetReg());
-      LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
-      LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
-      NewLIR2(kX86Cvttsd2siRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-      LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
-      branch_na_n->target = NewLIR0(kPseudoTargetLabel);
-      NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-      branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
-      branch_normal->target = NewLIR0(kPseudoTargetLabel);
-      StoreValue(rl_dest, rl_result);
-      return;
-    }
-    case Instruction::LONG_TO_DOUBLE:
-      if (cu_->target64) {
-        rcSrc = kCoreReg;
-        op = kX86Cvtsqi2sdRR;
-        break;
-      }
-      GenLongToFP(rl_dest, rl_src, true /* is_double */);
-      return;
-    case Instruction::LONG_TO_FLOAT:
-      if (cu_->target64) {
-        rcSrc = kCoreReg;
-        op = kX86Cvtsqi2ssRR;
-       break;
-      }
-      GenLongToFP(rl_dest, rl_src, false /* is_double */);
-      return;
-    case Instruction::FLOAT_TO_LONG:
-      if (cu_->target64) {
-        rl_src = LoadValue(rl_src, kFPReg);
-        // If result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
-        ClobberSReg(rl_dest.s_reg_low);
-        rl_result = EvalLoc(rl_dest, kCoreReg, true);
-        RegStorage temp_reg = AllocTempSingle();
-
-        // Set 0x7fffffffffffffff to rl_result
-        LoadConstantWide(rl_result.reg, 0x7fffffffffffffff);
-        NewLIR2(kX86Cvtsqi2ssRR, temp_reg.GetReg(), rl_result.reg.GetReg());
-        NewLIR2(kX86ComissRR, rl_src.reg.GetReg(), temp_reg.GetReg());
-        LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
-        LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
-        NewLIR2(kX86Cvttss2sqiRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-        LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
-        branch_na_n->target = NewLIR0(kPseudoTargetLabel);
-        NewLIR2(kX86Xor64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-        branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
-        branch_normal->target = NewLIR0(kPseudoTargetLabel);
-        StoreValueWide(rl_dest, rl_result);
-      } else {
-        CheckEntrypointTypes<kQuickF2l, int64_t, float>();  // int64_t -> kCoreReg
-        GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
-      }
-      return;
-    case Instruction::DOUBLE_TO_LONG:
-      if (cu_->target64) {
-        rl_src = LoadValueWide(rl_src, kFPReg);
-        // If result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
-        ClobberSReg(rl_dest.s_reg_low);
-        rl_result = EvalLoc(rl_dest, kCoreReg, true);
-        RegStorage temp_reg = AllocTempDouble();
-
-        // Set 0x7fffffffffffffff to rl_result
-        LoadConstantWide(rl_result.reg, 0x7fffffffffffffff);
-        NewLIR2(kX86Cvtsqi2sdRR, temp_reg.GetReg(), rl_result.reg.GetReg());
-        NewLIR2(kX86ComisdRR, rl_src.reg.GetReg(), temp_reg.GetReg());
-        LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
-        LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
-        NewLIR2(kX86Cvttsd2sqiRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-        LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
-        branch_na_n->target = NewLIR0(kPseudoTargetLabel);
-        NewLIR2(kX86Xor64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-        branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
-        branch_normal->target = NewLIR0(kPseudoTargetLabel);
-        StoreValueWide(rl_dest, rl_result);
-      } else {
-        CheckEntrypointTypes<kQuickD2l, int64_t, double>();  // int64_t -> kCoreReg
-        GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
-      }
-      return;
-    default:
-      LOG(INFO) << "Unexpected opcode: " << opcode;
-  }
-  // At this point, target will be either float or double.
-  DCHECK(rl_dest.fp);
-  if (rl_src.wide) {
-    rl_src = LoadValueWide(rl_src, rcSrc);
-  } else {
-    rl_src = LoadValue(rl_src, rcSrc);
-  }
-  rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  if (rl_dest.wide) {
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double) {
-  // Compute offsets to the source and destination VRs on stack.
-  int src1_v_reg_offset = SRegOffset(rl_src1.s_reg_low);
-  int src2_v_reg_offset = SRegOffset(rl_src2.s_reg_low);
-  int dest_v_reg_offset = SRegOffset(rl_dest.s_reg_low);
-
-  // Update the in-register state of sources.
-  rl_src1 = is_double ? UpdateLocWide(rl_src1) : UpdateLoc(rl_src1);
-  rl_src2 = is_double ? UpdateLocWide(rl_src2) : UpdateLoc(rl_src2);
-
-  // All memory accesses below reference dalvik regs.
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-
-  // If the source is in physical register, then put it in its location on stack.
-  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-  if (rl_src1.location == kLocPhysReg) {
-    RegisterInfo* reg_info = GetRegInfo(rl_src1.reg);
-
-    if (reg_info != nullptr && reg_info->IsTemp()) {
-      // Calling FlushSpecificReg because it will only write back VR if it is dirty.
-      FlushSpecificReg(reg_info);
-      // ResetDef to prevent NullifyRange from removing stores.
-      ResetDef(rl_src1.reg);
-    } else {
-      // It must have been register promoted if it is not a temp but is still in physical
-      // register. Since we need it to be in memory to convert, we place it there now.
-      StoreBaseDisp(rs_rSP, src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
-                    kNotVolatile);
-    }
-  }
-
-  if (rl_src2.location == kLocPhysReg) {
-    RegisterInfo* reg_info = GetRegInfo(rl_src2.reg);
-    if (reg_info != nullptr && reg_info->IsTemp()) {
-      FlushSpecificReg(reg_info);
-      ResetDef(rl_src2.reg);
-    } else {
-      StoreBaseDisp(rs_rSP, src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
-                    kNotVolatile);
-    }
-  }
-
-  int fld_opcode = is_double ? kX86Fld64M : kX86Fld32M;
-
-  // Push the source virtual registers onto the x87 stack.
-  LIR *fld_2 = NewLIR2NoDest(fld_opcode, rs_rSP.GetReg(),
-                             src2_v_reg_offset + LOWORD_OFFSET);
-  AnnotateDalvikRegAccess(fld_2, (src2_v_reg_offset + LOWORD_OFFSET) >> 2,
-                          true /* is_load */, is_double /* is64bit */);
-
-  LIR *fld_1 = NewLIR2NoDest(fld_opcode, rs_rSP.GetReg(),
-                             src1_v_reg_offset + LOWORD_OFFSET);
-  AnnotateDalvikRegAccess(fld_1, (src1_v_reg_offset + LOWORD_OFFSET) >> 2,
-                          true /* is_load */, is_double /* is64bit */);
-
-  FlushReg(rs_rAX);
-  Clobber(rs_rAX);
-  LockTemp(rs_rAX);
-
-  LIR* retry = NewLIR0(kPseudoTargetLabel);
-
-  // Divide ST(0) by ST(1) and place result to ST(0).
-  NewLIR0(kX86Fprem);
-
-  // Move FPU status word to AX.
-  NewLIR0(kX86Fstsw16R);
-
-  // Check if reduction is complete.
-  OpRegImm(kOpAnd, rs_rAX, 0x400);
-
-  // If no then continue to compute remainder.
-  LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
-  branch->target = retry;
-
-  FreeTemp(rs_rAX);
-
-  // Now store result in the destination VR's stack location.
-  int displacement = dest_v_reg_offset + LOWORD_OFFSET;
-  int opcode = is_double ? kX86Fst64M : kX86Fst32M;
-  LIR *fst = NewLIR2NoDest(opcode, rs_rSP.GetReg(), displacement);
-  AnnotateDalvikRegAccess(fst, displacement >> 2, false /* is_load */, is_double /* is64bit */);
-
-  // Pop ST(1) and ST(0).
-  NewLIR0(kX86Fucompp);
-
-  /*
-   * The result is in a physical register if it was in a temp or was register
-   * promoted. For that reason it is enough to check if it is in physical
-   * register. If it is, then we must do all of the bookkeeping necessary to
-   * invalidate temp (if needed) and load in promoted register (if needed).
-   * If the result's location is in memory, then we do not need to do anything
-   * more since the fstp has already placed the correct value in memory.
-   */
-  RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
-  if (rl_result.location == kLocPhysReg) {
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-    if (is_double) {
-      LoadBaseDisp(rs_rSP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
-      StoreFinalValueWide(rl_dest, rl_result);
-    } else {
-      Load32Disp(rs_rSP, dest_v_reg_offset, rl_result.reg);
-      StoreFinalValue(rl_dest, rl_result);
-    }
-  }
-}
-
-void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
-                          RegLocation rl_src1, RegLocation rl_src2) {
-  bool single = (code == Instruction::CMPL_FLOAT) || (code == Instruction::CMPG_FLOAT);
-  bool unordered_gt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
-  if (single) {
-    rl_src1 = LoadValue(rl_src1, kFPReg);
-    rl_src2 = LoadValue(rl_src2, kFPReg);
-  } else {
-    rl_src1 = LoadValueWide(rl_src1, kFPReg);
-    rl_src2 = LoadValueWide(rl_src2, kFPReg);
-  }
-  // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
-  ClobberSReg(rl_dest.s_reg_low);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  LoadConstantNoClobber(rl_result.reg, unordered_gt ? 1 : 0);
-  if (single) {
-    NewLIR2(kX86UcomissRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  } else {
-    NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  }
-  LIR* branch = nullptr;
-  if (unordered_gt) {
-    branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
-  }
-  // If the result reg can't be byte accessed, use a jump and move instead of a set.
-  if (!IsByteRegister(rl_result.reg)) {
-    LIR* branch2 = nullptr;
-    if (unordered_gt) {
-      branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
-      NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
-    } else {
-      branch2 = NewLIR2(kX86Jcc8, 0, kX86CondBe);
-      NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x1);
-    }
-    branch2->target = NewLIR0(kPseudoTargetLabel);
-  } else {
-    NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondA /* above - unsigned > */);
-  }
-  NewLIR2(kX86Sbb32RI, rl_result.reg.GetReg(), 0);
-  if (unordered_gt) {
-    branch->target = NewLIR0(kPseudoTargetLabel);
-  }
-  StoreValue(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
-                                     bool is_double) {
-  LIR* taken = &block_label_list_[bb->taken];
-  LIR* not_taken = &block_label_list_[bb->fall_through];
-  LIR* branch = nullptr;
-  RegLocation rl_src1;
-  RegLocation rl_src2;
-  if (is_double) {
-    rl_src1 = mir_graph_->GetSrcWide(mir, 0);
-    rl_src2 = mir_graph_->GetSrcWide(mir, 2);
-    rl_src1 = LoadValueWide(rl_src1, kFPReg);
-    rl_src2 = LoadValueWide(rl_src2, kFPReg);
-    NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  } else {
-    rl_src1 = mir_graph_->GetSrc(mir, 0);
-    rl_src2 = mir_graph_->GetSrc(mir, 1);
-    rl_src1 = LoadValue(rl_src1, kFPReg);
-    rl_src2 = LoadValue(rl_src2, kFPReg);
-    NewLIR2(kX86UcomissRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
-  }
-  ConditionCode ccode = mir->meta.ccode;
-  switch (ccode) {
-    case kCondEq:
-      if (!gt_bias) {
-        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
-        branch->target = not_taken;
-      }
-      break;
-    case kCondNe:
-      if (!gt_bias) {
-        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
-        branch->target = taken;
-      }
-      break;
-    case kCondLt:
-      if (gt_bias) {
-        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
-        branch->target = not_taken;
-      }
-      ccode = kCondUlt;
-      break;
-    case kCondLe:
-      if (gt_bias) {
-        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
-        branch->target = not_taken;
-      }
-      ccode = kCondLs;
-      break;
-    case kCondGt:
-      if (gt_bias) {
-        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
-        branch->target = taken;
-      }
-      ccode = kCondHi;
-      break;
-    case kCondGe:
-      if (gt_bias) {
-        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
-        branch->target = taken;
-      }
-      ccode = kCondUge;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected ccode: " << ccode;
-  }
-  OpCondBranch(ccode, taken);
-}
-
-void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-  rl_src = LoadValue(rl_src, kCoreReg);
-  rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
-  StoreValue(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation rl_result;
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  if (cu_->target64) {
-    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    OpRegCopy(rl_result.reg, rl_src.reg);
-    // Flip sign bit.
-    NewLIR2(kX86Rol64RI, rl_result.reg.GetReg(), 1);
-    NewLIR2(kX86Xor64RI, rl_result.reg.GetReg(), 1);
-    NewLIR2(kX86Ror64RI, rl_result.reg.GetReg(), 1);
-  } else {
-    rl_result = ForceTempWide(rl_src);
-    OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), 0x80000000);
-  }
-  StoreValueWide(rl_dest, rl_result);
-}
-
-bool X86Mir2Lir::GenInlinedSqrt(CallInfo* info) {
-  RegLocation rl_dest = InlineTargetWide(info);  // double place for result
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  RegLocation rl_src = info->args[0];
-  rl_src = LoadValueWide(rl_src, kFPReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-  NewLIR2(kX86SqrtsdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-bool X86Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
-  // Get the argument
-  RegLocation rl_src = info->args[0];
-
-  // Get the inlined intrinsic target virtual register
-  RegLocation rl_dest = InlineTarget(info);
-
-  // Get the virtual register number
-  DCHECK_NE(rl_src.s_reg_low, INVALID_SREG);
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  int v_src_reg = mir_graph_->SRegToVReg(rl_src.s_reg_low);
-  int v_dst_reg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
-
-  // if argument is the same as inlined intrinsic target
-  if (v_src_reg == v_dst_reg) {
-    rl_src = UpdateLoc(rl_src);
-
-    // if argument is in the physical register
-    if (rl_src.location == kLocPhysReg) {
-      rl_src = LoadValue(rl_src, kCoreReg);
-      OpRegImm(kOpAnd, rl_src.reg, 0x7fffffff);
-      StoreValue(rl_dest, rl_src);
-      return true;
-    }
-    // the argument is in memory
-    DCHECK((rl_src.location == kLocDalvikFrame) ||
-         (rl_src.location == kLocCompilerTemp));
-
-    // Operate directly into memory.
-    int displacement = SRegOffset(rl_dest.s_reg_low);
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP_32.GetReg(), displacement, 0x7fffffff);
-    AnnotateDalvikRegAccess(lir, displacement >> 2, false /*is_load */, false /* is_64bit */);
-    AnnotateDalvikRegAccess(lir, displacement >> 2, true /* is_load */, false /* is_64bit*/);
-    return true;
-  } else {
-    rl_src = LoadValue(rl_src, kCoreReg);
-    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
-    StoreValue(rl_dest, rl_result);
-    return true;
-  }
-}
-
-bool X86Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
-  RegLocation rl_src = info->args[0];
-  RegLocation rl_dest = InlineTargetWide(info);
-  DCHECK_NE(rl_src.s_reg_low, INVALID_SREG);
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  if (cu_->target64) {
-    rl_src = LoadValueWide(rl_src, kCoreReg);
-    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    OpRegCopyWide(rl_result.reg, rl_src.reg);
-    OpRegImm(kOpLsl, rl_result.reg, 1);
-    OpRegImm(kOpLsr, rl_result.reg, 1);
-    StoreValueWide(rl_dest, rl_result);
-    return true;
-  }
-  int v_src_reg = mir_graph_->SRegToVReg(rl_src.s_reg_low);
-  int v_dst_reg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
-  rl_src = UpdateLocWide(rl_src);
-
-  // if argument is in the physical XMM register
-  if (rl_src.location == kLocPhysReg && rl_src.reg.IsFloat()) {
-    RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-    if (rl_result.reg != rl_src.reg) {
-      LoadConstantWide(rl_result.reg, 0x7fffffffffffffff);
-      NewLIR2(kX86PandRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-    } else {
-      RegStorage sign_mask = AllocTempDouble();
-      LoadConstantWide(sign_mask, 0x7fffffffffffffff);
-      NewLIR2(kX86PandRR, rl_result.reg.GetReg(), sign_mask.GetReg());
-      FreeTemp(sign_mask);
-    }
-    StoreValueWide(rl_dest, rl_result);
-    return true;
-  } else if (v_src_reg == v_dst_reg) {
-    // if argument is the same as inlined intrinsic target
-    // if argument is in the physical register
-    if (rl_src.location == kLocPhysReg) {
-      rl_src = LoadValueWide(rl_src, kCoreReg);
-      OpRegImm(kOpAnd, rl_src.reg.GetHigh(), 0x7fffffff);
-      StoreValueWide(rl_dest, rl_src);
-      return true;
-    }
-    // the argument is in memory
-    DCHECK((rl_src.location == kLocDalvikFrame) ||
-           (rl_src.location == kLocCompilerTemp));
-
-    // Operate directly into memory.
-    int displacement = SRegOffset(rl_dest.s_reg_low);
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP_32.GetReg(), displacement  + HIWORD_OFFSET, 0x7fffffff);
-    AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, true /* is_load */, true /* is_64bit*/);
-    AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, false /*is_load */, true /* is_64bit */);
-    return true;
-  } else {
-    rl_src = LoadValueWide(rl_src, kCoreReg);
-    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    OpRegCopyWide(rl_result.reg, rl_src.reg);
-    OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
-    StoreValueWide(rl_dest, rl_result);
-    return true;
-  }
-}
-
-bool X86Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
-  if (is_double) {
-    RegLocation rl_dest = InlineTargetWide(info);
-    if (rl_dest.s_reg_low == INVALID_SREG) {
-      // Result is unused, the code is dead. Inlining successful, no code generated.
-      return true;
-    }
-    RegLocation rl_src1 = LoadValueWide(info->args[0], kFPReg);
-    RegLocation rl_src2 = LoadValueWide(info->args[2], kFPReg);
-    RegLocation rl_result = EvalLocWide(rl_dest, kFPReg, true);
-
-    // Avoid src2 corruption by OpRegCopyWide.
-    if (rl_result.reg == rl_src2.reg) {
-        std::swap(rl_src2.reg, rl_src1.reg);
-    }
-
-    OpRegCopyWide(rl_result.reg, rl_src1.reg);
-    NewLIR2(kX86UcomisdRR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
-    // If either arg is NaN, return NaN.
-    LIR* branch_nan = NewLIR2(kX86Jcc8, 0, kX86CondP);
-    // Min/Max branches.
-    LIR* branch_cond1 = NewLIR2(kX86Jcc8, 0, (is_min) ? kX86CondA : kX86CondB);
-    LIR* branch_cond2 = NewLIR2(kX86Jcc8, 0, (is_min) ? kX86CondB : kX86CondA);
-    // If equal, we need to resolve situations like min/max(0.0, -0.0) == -0.0/0.0.
-    NewLIR2((is_min) ? kX86OrpdRR : kX86AndpdRR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
-    LIR* branch_exit_equal = NewLIR1(kX86Jmp8, 0);
-    // Handle NaN.
-    branch_nan->target = NewLIR0(kPseudoTargetLabel);
-    LoadConstantWide(rl_result.reg, INT64_C(0x7ff8000000000000));
-
-    LIR* branch_exit_nan = NewLIR1(kX86Jmp8, 0);
-    // Handle Min/Max. Copy greater/lesser value from src2.
-    branch_cond1->target = NewLIR0(kPseudoTargetLabel);
-    OpRegCopyWide(rl_result.reg, rl_src2.reg);
-    // Right operand is already in result reg.
-    branch_cond2->target = NewLIR0(kPseudoTargetLabel);
-    // Exit.
-    branch_exit_nan->target = NewLIR0(kPseudoTargetLabel);
-    branch_exit_equal->target = NewLIR0(kPseudoTargetLabel);
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    RegLocation rl_dest = InlineTarget(info);
-    if (rl_dest.s_reg_low == INVALID_SREG) {
-      // Result is unused, the code is dead. Inlining successful, no code generated.
-      return true;
-    }
-    RegLocation rl_src1 = LoadValue(info->args[0], kFPReg);
-    RegLocation rl_src2 = LoadValue(info->args[1], kFPReg);
-    RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
-
-    // Avoid src2 corruption by OpRegCopyWide.
-    if (rl_result.reg == rl_src2.reg) {
-        std::swap(rl_src2.reg, rl_src1.reg);
-    }
-
-    OpRegCopy(rl_result.reg, rl_src1.reg);
-    NewLIR2(kX86UcomissRR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
-    // If either arg is NaN, return NaN.
-    LIR* branch_nan = NewLIR2(kX86Jcc8, 0, kX86CondP);
-    // Min/Max branches.
-    LIR* branch_cond1 = NewLIR2(kX86Jcc8, 0, (is_min) ? kX86CondA : kX86CondB);
-    LIR* branch_cond2 = NewLIR2(kX86Jcc8, 0, (is_min) ? kX86CondB : kX86CondA);
-    // If equal, we need to resolve situations like min/max(0.0, -0.0) == -0.0/0.0.
-    NewLIR2((is_min) ? kX86OrpsRR : kX86AndpsRR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
-    LIR* branch_exit_equal = NewLIR1(kX86Jmp8, 0);
-    // Handle NaN.
-    branch_nan->target = NewLIR0(kPseudoTargetLabel);
-    LoadConstantNoClobber(rl_result.reg, 0x7fc00000);
-    LIR* branch_exit_nan = NewLIR1(kX86Jmp8, 0);
-    // Handle Min/Max. Copy greater/lesser value from src2.
-    branch_cond1->target = NewLIR0(kPseudoTargetLabel);
-    OpRegCopy(rl_result.reg, rl_src2.reg);
-    // Right operand is already in result reg.
-    branch_cond2->target = NewLIR0(kPseudoTargetLabel);
-    // Exit.
-    branch_exit_nan->target = NewLIR0(kPseudoTargetLabel);
-    branch_exit_equal->target = NewLIR0(kPseudoTargetLabel);
-    StoreValue(rl_dest, rl_result);
-  }
-  return true;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
deleted file mode 100755
index a8706c3..0000000
--- a/compiler/dex/quick/x86/int_x86.cc
+++ /dev/null
@@ -1,3467 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the X86 ISA */
-
-#include "codegen_x86.h"
-
-#include "art_method.h"
-#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "mirror/array-inl.h"
-#include "x86_lir.h"
-
-namespace art {
-
-/*
- * Compare two 64-bit values
- *    x = y     return  0
- *    x < y     return -1
- *    x > y     return  1
- */
-void X86Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  if (cu_->target64) {
-    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    RegStorage temp_reg = AllocTemp();
-    OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
-    NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondG);   // result = (src1 > src2) ? 1 : 0
-    NewLIR2(kX86Set8R, temp_reg.GetReg(), kX86CondL);  // temp = (src1 >= src2) ? 0 : 1
-    NewLIR2(kX86Sub8RR, rl_result.reg.GetReg(), temp_reg.GetReg());
-    NewLIR2(kX86Movsx8qRR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-
-    StoreValue(rl_dest, rl_result);
-    FreeTemp(temp_reg);
-    return;
-  }
-
-  // Prepare for explicit register usage
-  ExplicitTempRegisterLock(this, 4, &rs_r0, &rs_r1, &rs_r2, &rs_r3);
-  RegStorage r_tmp1 = RegStorage::MakeRegPair(rs_r0, rs_r1);
-  RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_r2, rs_r3);
-  LoadValueDirectWideFixed(rl_src1, r_tmp1);
-  LoadValueDirectWideFixed(rl_src2, r_tmp2);
-  // Compute (r1:r0) = (r1:r0) - (r3:r2)
-  OpRegReg(kOpSub, rs_r0, rs_r2);  // r0 = r0 - r2
-  OpRegReg(kOpSbc, rs_r1, rs_r3);  // r1 = r1 - r3 - CF
-  NewLIR2(kX86Set8R, rs_r2.GetReg(), kX86CondL);  // r2 = (r1:r0) < (r3:r2) ? 1 : 0
-  NewLIR2(kX86Movzx8RR, rs_r2.GetReg(), rs_r2.GetReg());
-  OpReg(kOpNeg, rs_r2);         // r2 = -r2
-  OpRegReg(kOpOr, rs_r0, rs_r1);   // r0 = high | low - sets ZF
-  NewLIR2(kX86Set8R, rs_r0.GetReg(), kX86CondNz);  // r0 = (r1:r0) != (r3:r2) ? 1 : 0
-  NewLIR2(kX86Movzx8RR, r0, r0);
-  OpRegReg(kOpOr, rs_r0, rs_r2);   // r0 = r0 | r2
-  RegLocation rl_result = LocCReturn();
-  StoreValue(rl_dest, rl_result);
-}
-
-X86ConditionCode X86ConditionEncoding(ConditionCode cond) {
-  switch (cond) {
-    case kCondEq: return kX86CondEq;
-    case kCondNe: return kX86CondNe;
-    case kCondCs: return kX86CondC;
-    case kCondCc: return kX86CondNc;
-    case kCondUlt: return kX86CondC;
-    case kCondUge: return kX86CondNc;
-    case kCondMi: return kX86CondS;
-    case kCondPl: return kX86CondNs;
-    case kCondVs: return kX86CondO;
-    case kCondVc: return kX86CondNo;
-    case kCondHi: return kX86CondA;
-    case kCondLs: return kX86CondBe;
-    case kCondGe: return kX86CondGe;
-    case kCondLt: return kX86CondL;
-    case kCondGt: return kX86CondG;
-    case kCondLe: return kX86CondLe;
-    case kCondAl:
-    case kCondNv: LOG(FATAL) << "Should not reach here";
-  }
-  return kX86CondO;
-}
-
-LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
-  NewLIR2(src1.Is64Bit() ? kX86Cmp64RR : kX86Cmp32RR, src1.GetReg(), src2.GetReg());
-  X86ConditionCode cc = X86ConditionEncoding(cond);
-  LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
-                        cc);
-  branch->target = target;
-  return branch;
-}
-
-LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg,
-                                int check_value, LIR* target) {
-  if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
-    // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
-    NewLIR2(reg.Is64Bit() ? kX86Test64RR: kX86Test32RR, reg.GetReg(), reg.GetReg());
-  } else {
-    if (reg.Is64Bit()) {
-      NewLIR2(IS_SIMM8(check_value) ? kX86Cmp64RI8 : kX86Cmp64RI, reg.GetReg(), check_value);
-    } else {
-      NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg.GetReg(), check_value);
-    }
-  }
-  X86ConditionCode cc = X86ConditionEncoding(cond);
-  LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
-  branch->target = target;
-  return branch;
-}
-
-LIR* X86Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
-  // If src or dest is a pair, we'll be using low reg.
-  if (r_dest.IsPair()) {
-    r_dest = r_dest.GetLow();
-  }
-  if (r_src.IsPair()) {
-    r_src = r_src.GetLow();
-  }
-  if (r_dest.IsFloat() || r_src.IsFloat())
-    return OpFpRegCopy(r_dest, r_src);
-  LIR* res = RawLIR(current_dalvik_offset_, r_dest.Is64Bit() ? kX86Mov64RR : kX86Mov32RR,
-                    r_dest.GetReg(), r_src.GetReg());
-  if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
-    res->flags.is_nop = true;
-  }
-  return res;
-}
-
-void X86Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
-  if (r_dest != r_src) {
-    LIR *res = OpRegCopyNoInsert(r_dest, r_src);
-    AppendLIR(res);
-  }
-}
-
-void X86Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
-  if (r_dest != r_src) {
-    bool dest_fp = r_dest.IsFloat();
-    bool src_fp = r_src.IsFloat();
-    if (dest_fp) {
-      if (src_fp) {
-        OpRegCopy(r_dest, r_src);
-      } else {
-        // TODO: Prevent this from happening in the code. The result is often
-        // unused or could have been loaded more easily from memory.
-        if (!r_src.IsPair()) {
-          DCHECK(!r_dest.IsPair());
-          NewLIR2(kX86MovqxrRR, r_dest.GetReg(), r_src.GetReg());
-        } else {
-          NewLIR2(kX86MovdxrRR, r_dest.GetReg(), r_src.GetLowReg());
-          RegStorage r_tmp = AllocTempDouble();
-          NewLIR2(kX86MovdxrRR, r_tmp.GetReg(), r_src.GetHighReg());
-          NewLIR2(kX86PunpckldqRR, r_dest.GetReg(), r_tmp.GetReg());
-          FreeTemp(r_tmp);
-        }
-      }
-    } else {
-      if (src_fp) {
-        if (!r_dest.IsPair()) {
-          DCHECK(!r_src.IsPair());
-          NewLIR2(kX86MovqrxRR, r_dest.GetReg(), r_src.GetReg());
-        } else {
-          NewLIR2(kX86MovdrxRR, r_dest.GetLowReg(), r_src.GetReg());
-          RegStorage temp_reg = AllocTempDouble();
-          NewLIR2(kX86MovsdRR, temp_reg.GetReg(), r_src.GetReg());
-          NewLIR2(kX86PsrlqRI, temp_reg.GetReg(), 32);
-          NewLIR2(kX86MovdrxRR, r_dest.GetHighReg(), temp_reg.GetReg());
-        }
-      } else {
-        DCHECK_EQ(r_dest.IsPair(), r_src.IsPair());
-        if (!r_src.IsPair()) {
-          // Just copy the register directly.
-          OpRegCopy(r_dest, r_src);
-        } else {
-          // Handle overlap
-          if (r_src.GetHighReg() == r_dest.GetLowReg() &&
-              r_src.GetLowReg() == r_dest.GetHighReg()) {
-            // Deal with cycles.
-            RegStorage temp_reg = AllocTemp();
-            OpRegCopy(temp_reg, r_dest.GetHigh());
-            OpRegCopy(r_dest.GetHigh(), r_dest.GetLow());
-            OpRegCopy(r_dest.GetLow(), temp_reg);
-            FreeTemp(temp_reg);
-          } else if (r_src.GetHighReg() == r_dest.GetLowReg()) {
-            OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
-            OpRegCopy(r_dest.GetLow(), r_src.GetLow());
-          } else {
-            OpRegCopy(r_dest.GetLow(), r_src.GetLow());
-            OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
-          }
-        }
-      }
-    }
-  }
-}
-
-void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                                  int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                                  RegisterClass dest_reg_class) {
-  DCHECK(!left_op.IsPair() && !right_op.IsPair() && !rs_dest.IsPair());
-  DCHECK(!left_op.IsFloat() && !right_op.IsFloat() && !rs_dest.IsFloat());
-
-  // We really need this check for correctness, otherwise we will need to do more checks in
-  // non zero/one case
-  if (true_val == false_val) {
-    LoadConstantNoClobber(rs_dest, true_val);
-    return;
-  }
-
-  const bool dest_intersect = IsSameReg(rs_dest, left_op) || IsSameReg(rs_dest, right_op);
-
-  const bool zero_one_case = (true_val == 0 && false_val == 1) || (true_val == 1 && false_val == 0);
-  if (zero_one_case && IsByteRegister(rs_dest)) {
-    if (!dest_intersect) {
-      LoadConstantNoClobber(rs_dest, 0);
-    }
-    OpRegReg(kOpCmp, left_op, right_op);
-    // Set the low byte of the result to 0 or 1 from the compare condition code.
-    NewLIR2(kX86Set8R, rs_dest.GetReg(),
-            X86ConditionEncoding(true_val == 1 ? code : FlipComparisonOrder(code)));
-    if (dest_intersect) {
-      NewLIR2(rs_dest.Is64Bit() ? kX86Movzx8qRR : kX86Movzx8RR, rs_dest.GetReg(), rs_dest.GetReg());
-    }
-  } else {
-    // Be careful rs_dest can be changed only after cmp because it can be the same as one of ops
-    // and it cannot use xor because it makes cc flags to be dirty
-    RegStorage temp_reg = AllocTypedTemp(false, dest_reg_class, false);
-    if (temp_reg.Valid()) {
-      if (false_val == 0 && dest_intersect) {
-        code = FlipComparisonOrder(code);
-        std::swap(true_val, false_val);
-      }
-      if (!dest_intersect) {
-        LoadConstantNoClobber(rs_dest, false_val);
-      }
-      LoadConstantNoClobber(temp_reg, true_val);
-      OpRegReg(kOpCmp, left_op, right_op);
-      if (dest_intersect) {
-        LoadConstantNoClobber(rs_dest, false_val);
-        DCHECK(!last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-      }
-      OpCondRegReg(kOpCmov, code, rs_dest, temp_reg);
-      FreeTemp(temp_reg);
-    } else {
-      // slow path
-      LIR* cmp_branch = OpCmpBranch(code, left_op, right_op, nullptr);
-      LoadConstantNoClobber(rs_dest, false_val);
-      LIR* that_is_it = NewLIR1(kX86Jmp8, 0);
-      LIR* true_case = NewLIR0(kPseudoTargetLabel);
-      cmp_branch->target = true_case;
-      LoadConstantNoClobber(rs_dest, true_val);
-      LIR* end = NewLIR0(kPseudoTargetLabel);
-      that_is_it->target = end;
-    }
-  }
-}
-
-void X86Mir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
-  RegLocation rl_result;
-  RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
-  RegLocation rl_dest = mir_graph_->GetDest(mir);
-  // Avoid using float regs here.
-  RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg;
-  RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
-  ConditionCode ccode = mir->meta.ccode;
-
-  // The kMirOpSelect has two variants, one for constants and one for moves.
-  const bool is_constant_case = (mir->ssa_rep->num_uses == 1);
-
-  if (is_constant_case) {
-    int true_val = mir->dalvikInsn.vB;
-    int false_val = mir->dalvikInsn.vC;
-
-    // simplest strange case
-    if (true_val == false_val) {
-      rl_result = EvalLoc(rl_dest, result_reg_class, true);
-      LoadConstantNoClobber(rl_result.reg, true_val);
-    } else {
-      // TODO: use GenSelectConst32 and handle additional opcode patterns such as
-      // "cmp; setcc; movzx" or "cmp; sbb r0,r0; and r0,$mask; add r0,$literal".
-      rl_src = LoadValue(rl_src, src_reg_class);
-      rl_result = EvalLoc(rl_dest, result_reg_class, true);
-      /*
-       * For ccode == kCondEq:
-       *
-       * 1) When the true case is zero and result_reg is not same as src_reg:
-       *     xor result_reg, result_reg
-       *     cmp $0, src_reg
-       *     mov t1, $false_case
-       *     cmovnz result_reg, t1
-       * 2) When the false case is zero and result_reg is not same as src_reg:
-       *     xor result_reg, result_reg
-       *     cmp $0, src_reg
-       *     mov t1, $true_case
-       *     cmovz result_reg, t1
-       * 3) All other cases (we do compare first to set eflags):
-       *     cmp $0, src_reg
-       *     mov result_reg, $false_case
-       *     mov t1, $true_case
-       *     cmovz result_reg, t1
-       */
-      // FIXME: depending on how you use registers you could get a false != mismatch when dealing
-      // with different views of the same underlying physical resource (i.e. solo32 vs. solo64).
-      const bool result_reg_same_as_src =
-          (rl_src.location == kLocPhysReg && rl_src.reg.GetRegNum() == rl_result.reg.GetRegNum());
-      const bool true_zero_case = (true_val == 0 && false_val != 0 && !result_reg_same_as_src);
-      const bool false_zero_case = (false_val == 0 && true_val != 0 && !result_reg_same_as_src);
-      const bool catch_all_case = !(true_zero_case || false_zero_case);
-
-      if (true_zero_case || false_zero_case) {
-        OpRegReg(kOpXor, rl_result.reg, rl_result.reg);
-      }
-
-      if (true_zero_case || false_zero_case || catch_all_case) {
-        OpRegImm(kOpCmp, rl_src.reg, 0);
-      }
-
-      if (catch_all_case) {
-        OpRegImm(kOpMov, rl_result.reg, false_val);
-      }
-
-      if (true_zero_case || false_zero_case || catch_all_case) {
-        ConditionCode cc = true_zero_case ? NegateComparison(ccode) : ccode;
-        int immediateForTemp = true_zero_case ? false_val : true_val;
-        RegStorage temp1_reg = AllocTypedTemp(false, result_reg_class);
-        OpRegImm(kOpMov, temp1_reg, immediateForTemp);
-
-        OpCondRegReg(kOpCmov, cc, rl_result.reg, temp1_reg);
-
-        FreeTemp(temp1_reg);
-      }
-    }
-  } else {
-    rl_src = LoadValue(rl_src, src_reg_class);
-    RegLocation rl_true = mir_graph_->GetSrc(mir, 1);
-    RegLocation rl_false = mir_graph_->GetSrc(mir, 2);
-    rl_true = LoadValue(rl_true, result_reg_class);
-    rl_false = LoadValue(rl_false, result_reg_class);
-    rl_result = EvalLoc(rl_dest, result_reg_class, true);
-
-    /*
-     * For ccode == kCondEq:
-     *
-     * 1) When true case is already in place:
-     *     cmp $0, src_reg
-     *     cmovnz result_reg, false_reg
-     * 2) When false case is already in place:
-     *     cmp $0, src_reg
-     *     cmovz result_reg, true_reg
-     * 3) When neither cases are in place:
-     *     cmp $0, src_reg
-     *     mov result_reg, false_reg
-     *     cmovz result_reg, true_reg
-     */
-
-    // kMirOpSelect is generated just for conditional cases when comparison is done with zero.
-    OpRegImm(kOpCmp, rl_src.reg, 0);
-
-    if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {
-      OpCondRegReg(kOpCmov, NegateComparison(ccode), rl_result.reg, rl_false.reg);
-    } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {
-      OpCondRegReg(kOpCmov, ccode, rl_result.reg, rl_true.reg);
-    } else {
-      OpRegCopy(rl_result.reg, rl_false.reg);
-      OpCondRegReg(kOpCmov, ccode, rl_result.reg, rl_true.reg);
-    }
-  }
-
-  StoreValue(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
-  LIR* taken = &block_label_list_[bb->taken];
-  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
-  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
-  ConditionCode ccode = mir->meta.ccode;
-
-  if (rl_src1.is_const) {
-    std::swap(rl_src1, rl_src2);
-    ccode = FlipComparisonOrder(ccode);
-  }
-  if (rl_src2.is_const) {
-    // Do special compare/branch against simple const operand
-    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-    GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
-    return;
-  }
-
-  if (cu_->target64) {
-    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-
-    OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
-    OpCondBranch(ccode, taken);
-    return;
-  }
-
-  // Prepare for explicit register usage
-  ExplicitTempRegisterLock(this, 4, &rs_r0, &rs_r1, &rs_r2, &rs_r3);
-  RegStorage r_tmp1 = RegStorage::MakeRegPair(rs_r0, rs_r1);
-  RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_r2, rs_r3);
-  LoadValueDirectWideFixed(rl_src1, r_tmp1);
-  LoadValueDirectWideFixed(rl_src2, r_tmp2);
-
-  // Swap operands and condition code to prevent use of zero flag.
-  if (ccode == kCondLe || ccode == kCondGt) {
-    // Compute (r3:r2) = (r3:r2) - (r1:r0)
-    OpRegReg(kOpSub, rs_r2, rs_r0);  // r2 = r2 - r0
-    OpRegReg(kOpSbc, rs_r3, rs_r1);  // r3 = r3 - r1 - CF
-  } else {
-    // Compute (r1:r0) = (r1:r0) - (r3:r2)
-    OpRegReg(kOpSub, rs_r0, rs_r2);  // r0 = r0 - r2
-    OpRegReg(kOpSbc, rs_r1, rs_r3);  // r1 = r1 - r3 - CF
-  }
-  switch (ccode) {
-    case kCondEq:
-    case kCondNe:
-      OpRegReg(kOpOr, rs_r0, rs_r1);  // r0 = r0 | r1
-      break;
-    case kCondLe:
-      ccode = kCondGe;
-      break;
-    case kCondGt:
-      ccode = kCondLt;
-      break;
-    case kCondLt:
-    case kCondGe:
-      break;
-    default:
-      LOG(FATAL) << "Unexpected ccode: " << ccode;
-  }
-  OpCondBranch(ccode, taken);
-}
-
-void X86Mir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
-                                          int64_t val, ConditionCode ccode) {
-  int32_t val_lo = Low32Bits(val);
-  int32_t val_hi = High32Bits(val);
-  LIR* taken = &block_label_list_[bb->taken];
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  bool is_equality_test = ccode == kCondEq || ccode == kCondNe;
-
-  if (cu_->target64) {
-    if (is_equality_test && val == 0) {
-      // We can simplify of comparing for ==, != to 0.
-      NewLIR2(kX86Test64RR, rl_src1.reg.GetReg(), rl_src1.reg.GetReg());
-    } else if (is_equality_test && val_hi == 0 && val_lo > 0) {
-      OpRegImm(kOpCmp, rl_src1.reg, val_lo);
-    } else {
-      RegStorage tmp = AllocTypedTempWide(false, kCoreReg);
-      LoadConstantWide(tmp, val);
-      OpRegReg(kOpCmp, rl_src1.reg, tmp);
-      FreeTemp(tmp);
-    }
-    OpCondBranch(ccode, taken);
-    return;
-  }
-
-  if (is_equality_test && val != 0) {
-    rl_src1 = ForceTempWide(rl_src1);
-  }
-  RegStorage low_reg = rl_src1.reg.GetLow();
-  RegStorage high_reg = rl_src1.reg.GetHigh();
-
-  if (is_equality_test) {
-    // We can simplify of comparing for ==, != to 0.
-    if (val == 0) {
-      if (IsTemp(low_reg)) {
-        OpRegReg(kOpOr, low_reg, high_reg);
-        // We have now changed it; ignore the old values.
-        Clobber(rl_src1.reg);
-      } else {
-        RegStorage t_reg = AllocTemp();
-        OpRegRegReg(kOpOr, t_reg, low_reg, high_reg);
-        FreeTemp(t_reg);
-      }
-      OpCondBranch(ccode, taken);
-      return;
-    }
-
-    // Need to compute the actual value for ==, !=.
-    OpRegImm(kOpSub, low_reg, val_lo);
-    NewLIR2(kX86Sbb32RI, high_reg.GetReg(), val_hi);
-    OpRegReg(kOpOr, high_reg, low_reg);
-    Clobber(rl_src1.reg);
-  } else if (ccode == kCondLe || ccode == kCondGt) {
-    // Swap operands and condition code to prevent use of zero flag.
-    RegStorage tmp = AllocTypedTempWide(false, kCoreReg);
-    LoadConstantWide(tmp, val);
-    OpRegReg(kOpSub, tmp.GetLow(), low_reg);
-    OpRegReg(kOpSbc, tmp.GetHigh(), high_reg);
-    ccode = (ccode == kCondLe) ? kCondGe : kCondLt;
-    FreeTemp(tmp);
-  } else {
-    // We can use a compare for the low word to set CF.
-    OpRegImm(kOpCmp, low_reg, val_lo);
-    if (IsTemp(high_reg)) {
-      NewLIR2(kX86Sbb32RI, high_reg.GetReg(), val_hi);
-      // We have now changed it; ignore the old values.
-      Clobber(rl_src1.reg);
-    } else {
-      // mov temp_reg, high_reg; sbb temp_reg, high_constant
-      RegStorage t_reg = AllocTemp();
-      OpRegCopy(t_reg, high_reg);
-      NewLIR2(kX86Sbb32RI, t_reg.GetReg(), val_hi);
-      FreeTemp(t_reg);
-    }
-  }
-
-  OpCondBranch(ccode, taken);
-}
-
-void X86Mir2Lir::CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long) {
-  // It does not make sense to calculate magic and shift for zero divisor.
-  DCHECK_NE(divisor, 0);
-
-  /* According to H.S.Warren's Hacker's Delight Chapter 10 and
-   * T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication.
-   * The magic number M and shift S can be calculated in the following way:
-   * Let nc be the most positive value of numerator(n) such that nc = kd - 1,
-   * where divisor(d) >=2.
-   * Let nc be the most negative value of numerator(n) such that nc = kd + 1,
-   * where divisor(d) <= -2.
-   * Thus nc can be calculated like:
-   * nc = exp + exp % d - 1, where d >= 2 and exp = 2^31 for int or 2^63 for long
-   * nc = -exp + (exp + 1) % d, where d >= 2 and exp = 2^31 for int or 2^63 for long
-   *
-   * So the shift p is the smallest p satisfying
-   * 2^p > nc * (d - 2^p % d), where d >= 2
-   * 2^p > nc * (d + 2^p % d), where d <= -2.
-   *
-   * the magic number M is calcuated by
-   * M = (2^p + d - 2^p % d) / d, where d >= 2
-   * M = (2^p - d - 2^p % d) / d, where d <= -2.
-   *
-   * Notice that p is always bigger than or equal to 32/64, so we just return 32-p/64-p as
-   * the shift number S.
-   */
-
-  int64_t p = (is_long) ? 63 : 31;
-  const uint64_t exp = (is_long) ? 0x8000000000000000ULL : 0x80000000U;
-
-  // Initialize the computations.
-  uint64_t abs_d = (divisor >= 0) ? divisor : -divisor;
-  uint64_t tmp = exp + ((is_long) ? static_cast<uint64_t>(divisor) >> 63 :
-                                    static_cast<uint32_t>(divisor) >> 31);
-  uint64_t abs_nc = tmp - 1 - tmp % abs_d;
-  uint64_t quotient1 = exp / abs_nc;
-  uint64_t remainder1 = exp % abs_nc;
-  uint64_t quotient2 = exp / abs_d;
-  uint64_t remainder2 = exp % abs_d;
-
-  /*
-   * To avoid handling both positive and negative divisor, Hacker's Delight
-   * introduces a method to handle these 2 cases together to avoid duplication.
-   */
-  uint64_t delta;
-  do {
-    p++;
-    quotient1 = 2 * quotient1;
-    remainder1 = 2 * remainder1;
-    if (remainder1 >= abs_nc) {
-      quotient1++;
-      remainder1 = remainder1 - abs_nc;
-    }
-    quotient2 = 2 * quotient2;
-    remainder2 = 2 * remainder2;
-    if (remainder2 >= abs_d) {
-      quotient2++;
-      remainder2 = remainder2 - abs_d;
-    }
-    delta = abs_d - remainder2;
-  } while (quotient1 < delta || (quotient1 == delta && remainder1 == 0));
-
-  magic = (divisor > 0) ? (quotient2 + 1) : (-quotient2 - 1);
-
-  if (!is_long) {
-    magic = static_cast<int>(magic);
-  }
-
-  shift = (is_long) ? p - 64 : p - 32;
-}
-
-RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                     RegStorage reg_lo ATTRIBUTE_UNUSED,
-                                     int lit ATTRIBUTE_UNUSED,
-                                     bool is_div ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
-  UNREACHABLE();
-}
-
-RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
-                                     int imm, bool is_div) {
-  // Use a multiply (and fixup) to perform an int div/rem by a constant.
-  RegLocation rl_result;
-
-  if (imm == 1) {
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    if (is_div) {
-      // x / 1 == x.
-      LoadValueDirectFixed(rl_src, rl_result.reg);
-    } else {
-      // x % 1 == 0.
-      LoadConstantNoClobber(rl_result.reg, 0);
-    }
-  } else if (imm == -1) {  // handle 0x80000000 / -1 special case.
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    if (is_div) {
-      LoadValueDirectFixed(rl_src, rl_result.reg);
-
-      // Check if numerator is 0
-      OpRegImm(kOpCmp, rl_result.reg, 0);
-      LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
-
-      // handle 0x80000000 / -1
-      OpRegImm(kOpCmp, rl_result.reg, 0x80000000);
-      LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
-
-      // for x != MIN_INT, x / -1 == -x.
-      NewLIR1(kX86Neg32R, rl_result.reg.GetReg());
-
-      // EAX already contains the right value (0x80000000),
-      minint_branch->target = NewLIR0(kPseudoTargetLabel);
-      branch->target = NewLIR0(kPseudoTargetLabel);
-    } else {
-      // x % -1 == 0.
-      LoadConstantNoClobber(rl_result.reg, 0);
-    }
-  } else if (is_div && IsPowerOfTwo(std::abs(imm))) {
-    // Division using shifting.
-    rl_src = LoadValue(rl_src, kCoreReg);
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    if (IsSameReg(rl_result.reg, rl_src.reg)) {
-      RegStorage rs_temp = AllocTypedTemp(false, kCoreReg);
-      rl_result.reg.SetReg(rs_temp.GetReg());
-    }
-
-    // Check if numerator is 0
-    OpRegImm(kOpCmp, rl_src.reg, 0);
-    LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
-    LoadConstantNoClobber(rl_result.reg, 0);
-    LIR* done = NewLIR1(kX86Jmp8, 0);
-    branch->target = NewLIR0(kPseudoTargetLabel);
-
-    NewLIR3(kX86Lea32RM, rl_result.reg.GetReg(), rl_src.reg.GetReg(), std::abs(imm) - 1);
-    NewLIR2(kX86Test32RR, rl_src.reg.GetReg(), rl_src.reg.GetReg());
-    OpCondRegReg(kOpCmov, kCondPl, rl_result.reg, rl_src.reg);
-    int shift_amount = CTZ(imm);
-    OpRegImm(kOpAsr, rl_result.reg, shift_amount);
-    if (imm < 0) {
-      OpReg(kOpNeg, rl_result.reg);
-    }
-    done->target = NewLIR0(kPseudoTargetLabel);
-  } else {
-    CHECK(imm <= -2 || imm >= 2);
-
-    // Use H.S.Warren's Hacker's Delight Chapter 10 and
-    // T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication.
-    int64_t magic;
-    int shift;
-    CalculateMagicAndShift((int64_t)imm, magic, shift, false /* is_long */);
-
-    /*
-     * For imm >= 2,
-     *     int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n > 0
-     *     int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1, while n < 0.
-     * For imm <= -2,
-     *     int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1 , while n > 0
-     *     int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n < 0.
-     * We implement this algorithm in the following way:
-     * 1. multiply magic number m and numerator n, get the higher 32bit result in EDX
-     * 2. if imm > 0 and magic < 0, add numerator to EDX
-     *    if imm < 0 and magic > 0, sub numerator from EDX
-     * 3. if S !=0, SAR S bits for EDX
-     * 4. add 1 to EDX if EDX < 0
-     * 5. Thus, EDX is the quotient
-     */
-
-    FlushReg(rs_r0);
-    Clobber(rs_r0);
-    LockTemp(rs_r0);
-    FlushReg(rs_r2);
-    Clobber(rs_r2);
-    LockTemp(rs_r2);
-
-    // Assume that the result will be in EDX for divide, and EAX for remainder.
-    rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, is_div ? rs_r2 : rs_r0,
-                 INVALID_SREG, INVALID_SREG};
-
-    // We need the value at least twice.  Load into a temp.
-    rl_src = LoadValue(rl_src, kCoreReg);
-    RegStorage numerator_reg = rl_src.reg;
-
-    // Check if numerator is 0.
-    OpRegImm(kOpCmp, numerator_reg, 0);
-    LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
-    // Return result 0 if numerator was 0.
-    LoadConstantNoClobber(rl_result.reg, 0);
-    LIR* done = NewLIR1(kX86Jmp8, 0);
-    branch->target = NewLIR0(kPseudoTargetLabel);
-
-    // EAX = magic.
-    LoadConstant(rs_r0, magic);
-
-    // EDX:EAX = magic * numerator.
-    NewLIR1(kX86Imul32DaR, numerator_reg.GetReg());
-
-    if (imm > 0 && magic < 0) {
-      // Add numerator to EDX.
-      DCHECK(numerator_reg.Valid());
-      NewLIR2(kX86Add32RR, rs_r2.GetReg(), numerator_reg.GetReg());
-    } else if (imm < 0 && magic > 0) {
-      DCHECK(numerator_reg.Valid());
-      NewLIR2(kX86Sub32RR, rs_r2.GetReg(), numerator_reg.GetReg());
-    }
-
-    // Do we need the shift?
-    if (shift != 0) {
-      // Shift EDX by 'shift' bits.
-      NewLIR2(kX86Sar32RI, rs_r2.GetReg(), shift);
-    }
-
-    // Add 1 to EDX if EDX < 0.
-
-    // Move EDX to EAX.
-    OpRegCopy(rs_r0, rs_r2);
-
-    // Move sign bit to bit 0, zeroing the rest.
-    NewLIR2(kX86Shr32RI, rs_r2.GetReg(), 31);
-
-    // EDX = EDX + EAX.
-    NewLIR2(kX86Add32RR, rs_r2.GetReg(), rs_r0.GetReg());
-
-    // Quotient is in EDX.
-    if (!is_div) {
-      // We need to compute the remainder.
-      // Remainder is divisor - (quotient * imm).
-      DCHECK(numerator_reg.Valid());
-      OpRegCopy(rs_r0, numerator_reg);
-
-      // EAX = numerator * imm.
-      OpRegRegImm(kOpMul, rs_r2, rs_r2, imm);
-
-      // EAX -= EDX.
-      NewLIR2(kX86Sub32RR, rs_r0.GetReg(), rs_r2.GetReg());
-
-      // For this case, return the result in EAX.
-    }
-    done->target = NewLIR0(kPseudoTargetLabel);
-  }
-
-  return rl_result;
-}
-
-RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                  RegStorage reg_lo ATTRIBUTE_UNUSED,
-                                  RegStorage reg_hi ATTRIBUTE_UNUSED,
-                                  bool is_div ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of GenDivRem for x86";
-  UNREACHABLE();
-}
-
-RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                  RegLocation rl_src1,
-                                  RegLocation rl_src2,
-                                  bool is_div,
-                                  int flags) {
-  // We have to use fixed registers, so flush all the temps.
-
-  // Prepare for explicit register usage.
-  ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
-
-  // Load LHS into EAX.
-  LoadValueDirectFixed(rl_src1, rs_r0);
-
-  // Load RHS into EBX.
-  LoadValueDirectFixed(rl_src2, rs_r1);
-
-  // Copy LHS sign bit into EDX.
-  NewLIR0(kx86Cdq32Da);
-
-  if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
-    // Handle division by zero case.
-    GenDivZeroCheck(rs_r1);
-  }
-
-  // Check if numerator is 0
-  OpRegImm(kOpCmp, rs_r0, 0);
-  LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
-
-  // Have to catch 0x80000000/-1 case, or we will get an exception!
-  OpRegImm(kOpCmp, rs_r1, -1);
-  LIR* minus_one_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
-
-  // RHS is -1.
-  OpRegImm(kOpCmp, rs_r0, 0x80000000);
-  LIR* minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
-
-  branch->target = NewLIR0(kPseudoTargetLabel);
-
-  // In 0x80000000/-1 case.
-  if (!is_div) {
-    // For DIV, EAX is already right. For REM, we need EDX 0.
-    LoadConstantNoClobber(rs_r2, 0);
-  }
-  LIR* done = NewLIR1(kX86Jmp8, 0);
-
-  // Expected case.
-  minus_one_branch->target = NewLIR0(kPseudoTargetLabel);
-  minint_branch->target = minus_one_branch->target;
-  NewLIR1(kX86Idivmod32DaR, rs_r1.GetReg());
-  done->target = NewLIR0(kPseudoTargetLabel);
-
-  // Result is in EAX for div and EDX for rem.
-  RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r0, INVALID_SREG, INVALID_SREG};
-  if (!is_div) {
-    rl_result.reg.SetReg(r2);
-  }
-  return rl_result;
-}
-
-static dwarf::Reg DwarfCoreReg(bool is_x86_64, int num) {
-  return is_x86_64 ? dwarf::Reg::X86_64Core(num) : dwarf::Reg::X86Core(num);
-}
-
-bool X86Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
-  DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
-
-  if (is_long && !cu_->target64) {
-   /*
-    * We want to implement the following algorithm
-    * mov eax, low part of arg1
-    * mov edx, high part of arg1
-    * mov ebx, low part of arg2
-    * mov ecx, high part of arg2
-    * mov edi, eax
-    * sub edi, ebx
-    * mov edi, edx
-    * sbb edi, ecx
-    * is_min ? "cmovgel eax, ebx" : "cmovll eax, ebx"
-    * is_min ? "cmovgel edx, ecx" : "cmovll edx, ecx"
-    *
-    * The algorithm above needs 5 registers: a pair for the first operand
-    * (which later will be used as result), a pair for the second operand
-    * and a temp register (e.g. 'edi') for intermediate calculations.
-    * Ideally we have 6 GP caller-save registers in 32-bit mode. They are:
-    * 'eax', 'ebx', 'ecx', 'edx', 'esi' and 'edi'. So there should be
-    * always enough registers to operate on. Practically, there is a pair
-    * of registers 'edi' and 'esi' which holds promoted values and
-    * sometimes should be treated as 'callee save'. If one of the operands
-    * is in the promoted registers then we have enough register to
-    * operate on. Otherwise there is lack of resources and we have to
-    * save 'edi' before calculations and restore after.
-    */
-
-    RegLocation rl_src1 = info->args[0];
-    RegLocation rl_src2 = info->args[2];
-    RegLocation rl_dest = InlineTargetWide(info);
-
-    if (rl_dest.s_reg_low == INVALID_SREG) {
-      // Result is unused, the code is dead. Inlining successful, no code generated.
-      return true;
-    }
-
-    if (PartiallyIntersects(rl_src1, rl_dest) &&
-        PartiallyIntersects(rl_src2, rl_dest)) {
-      // A special case which we don't want to handle.
-      // This is when src1 is mapped on v0 and v1,
-      // src2 is mapped on v2, v3,
-      // result is mapped on v1, v2
-      return false;
-    }
-
-
-    /*
-     * If the result register is the same as the second element, then we
-     * need to be careful. The reason is that the first copy will
-     * inadvertently clobber the second element with the first one thus
-     * yielding the wrong result. Thus we do a swap in that case.
-     */
-    if (Intersects(rl_src2, rl_dest)) {
-      std::swap(rl_src1, rl_src2);
-    }
-
-    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-    // Pick the first integer as min/max.
-    OpRegCopyWide(rl_result.reg, rl_src1.reg);
-
-    /*
-     * If the integers are both in the same register, then there is
-     * nothing else to do because they are equal and we have already
-     * moved one into the result.
-     */
-    if (mir_graph_->SRegToVReg(rl_src1.s_reg_low) ==
-        mir_graph_->SRegToVReg(rl_src2.s_reg_low)) {
-      StoreValueWide(rl_dest, rl_result);
-      return true;
-    }
-
-    // Free registers to make some room for the second operand.
-    // But don't try to free part of a source which intersects
-    // part of result or promoted registers.
-
-    if (IsTemp(rl_src1.reg.GetLow()) &&
-       (rl_src1.reg.GetLowReg() != rl_result.reg.GetHighReg()) &&
-       (rl_src1.reg.GetLowReg() != rl_result.reg.GetLowReg())) {
-      // Is low part temporary and doesn't intersect any parts of result?
-      FreeTemp(rl_src1.reg.GetLow());
-    }
-
-    if (IsTemp(rl_src1.reg.GetHigh()) &&
-       (rl_src1.reg.GetHighReg() != rl_result.reg.GetLowReg()) &&
-       (rl_src1.reg.GetHighReg() != rl_result.reg.GetHighReg())) {
-      // Is high part temporary and doesn't intersect any parts of result?
-      FreeTemp(rl_src1.reg.GetHigh());
-    }
-
-    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-
-    // Do we have a free register for intermediate calculations?
-    RegStorage tmp = AllocTemp(false);
-    const int kRegSize = cu_->target64 ? 8 : 4;
-    if (tmp == RegStorage::InvalidReg()) {
-       /*
-        * No, will use 'edi'.
-        *
-        * As mentioned above we have 4 temporary and 2 promotable
-        * caller-save registers. Therefore, we assume that a free
-        * register can be allocated only if 'esi' and 'edi' are
-        * already used as operands. If number of promotable registers
-        * increases from 2 to 4 then our assumption fails and operand
-        * data is corrupted.
-        * Let's DCHECK it.
-        */
-       DCHECK(IsTemp(rl_src2.reg.GetLow()) &&
-              IsTemp(rl_src2.reg.GetHigh()) &&
-              IsTemp(rl_result.reg.GetLow()) &&
-              IsTemp(rl_result.reg.GetHigh()));
-       tmp = rs_rDI;
-       NewLIR1(kX86Push32R, tmp.GetReg());
-       cfi_.AdjustCFAOffset(kRegSize);
-       // Record cfi only if it is not already spilled.
-       if (!CoreSpillMaskContains(tmp.GetReg())) {
-         cfi_.RelOffset(DwarfCoreReg(cu_->target64, tmp.GetReg()), 0);
-       }
-    }
-
-    // Now we are ready to do calculations.
-    OpRegReg(kOpMov, tmp, rl_result.reg.GetLow());
-    OpRegReg(kOpSub, tmp, rl_src2.reg.GetLow());
-    OpRegReg(kOpMov, tmp, rl_result.reg.GetHigh());
-    OpRegReg(kOpSbc, tmp, rl_src2.reg.GetHigh());
-
-    // Let's put pop 'edi' here to break a bit the dependency chain.
-    if (tmp == rs_rDI) {
-      NewLIR1(kX86Pop32R, tmp.GetReg());
-      cfi_.AdjustCFAOffset(-kRegSize);
-      if (!CoreSpillMaskContains(tmp.GetReg())) {
-        cfi_.Restore(DwarfCoreReg(cu_->target64, tmp.GetReg()));
-      }
-    } else {
-      FreeTemp(tmp);
-    }
-
-    // Conditionally move the other integer into the destination register.
-    ConditionCode cc = is_min ? kCondGe : kCondLt;
-    OpCondRegReg(kOpCmov, cc, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
-    OpCondRegReg(kOpCmov, cc, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
-    FreeTemp(rl_src2.reg);
-    StoreValueWide(rl_dest, rl_result);
-    return true;
-  }
-
-  // Get the two arguments to the invoke and place them in GP registers.
-  RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info);
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  RegLocation rl_src1 = info->args[0];
-  RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1];
-  rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg);
-  rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg);
-
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-
-  /*
-   * If the result register is the same as the second element, then we need to be careful.
-   * The reason is that the first copy will inadvertently clobber the second element with
-   * the first one thus yielding the wrong result. Thus we do a swap in that case.
-   */
-  if (rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
-    std::swap(rl_src1, rl_src2);
-  }
-
-  // Pick the first integer as min/max.
-  OpRegCopy(rl_result.reg, rl_src1.reg);
-
-  // If the integers are both in the same register, then there is nothing else to do
-  // because they are equal and we have already moved one into the result.
-  if (rl_src1.reg.GetReg() != rl_src2.reg.GetReg()) {
-    // It is possible we didn't pick correctly so do the actual comparison now.
-    OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
-
-    // Conditionally move the other integer into the destination register.
-    ConditionCode condition_code = is_min ? kCondGt : kCondLt;
-    OpCondRegReg(kOpCmov, condition_code, rl_result.reg, rl_src2.reg);
-  }
-
-  if (is_long) {
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    StoreValue(rl_dest, rl_result);
-  }
-  return true;
-}
-
-bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
-  RegLocation rl_dest = size == k64 ? InlineTargetWide(info) : InlineTarget(info);
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  RegLocation rl_src_address = info->args[0];  // long address
-  RegLocation rl_address;
-  if (!cu_->target64) {
-    rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[0]
-    rl_address = LoadValue(rl_src_address, kCoreReg);
-  } else {
-    rl_address = LoadValueWide(rl_src_address, kCoreReg);
-  }
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  // Unaligned access is allowed on x86.
-  LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
-  if (size == k64) {
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
-    StoreValue(rl_dest, rl_result);
-  }
-  return true;
-}
-
-bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
-  RegLocation rl_src_address = info->args[0];  // long address
-  RegLocation rl_address;
-  if (!cu_->target64) {
-    rl_src_address = NarrowRegLoc(rl_src_address);  // ignore high half in info->args[0]
-    rl_address = LoadValue(rl_src_address, kCoreReg);
-  } else {
-    rl_address = LoadValueWide(rl_src_address, kCoreReg);
-  }
-  RegLocation rl_src_value = info->args[2];  // [size] value
-  RegLocation rl_value;
-  if (size == k64) {
-    // Unaligned access is allowed on x86.
-    rl_value = LoadValueWide(rl_src_value, kCoreReg);
-  } else {
-    DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
-    // In 32-bit mode the only EAX..EDX registers can be used with Mov8MR.
-    if (!cu_->target64 && size == kSignedByte) {
-      rl_src_value = UpdateLocTyped(rl_src_value);
-      if (rl_src_value.location == kLocPhysReg && !IsByteRegister(rl_src_value.reg)) {
-        RegStorage temp = AllocateByteRegister();
-        OpRegCopy(temp, rl_src_value.reg);
-        rl_value.reg = temp;
-      } else {
-        rl_value = LoadValue(rl_src_value, kCoreReg);
-      }
-    } else {
-      rl_value = LoadValue(rl_src_value, kCoreReg);
-    }
-  }
-  StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
-  return true;
-}
-
-void X86Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
-  NewLIR5(kX86Lea32RA, r_base.GetReg(), reg1.GetReg(), reg2.GetReg(), scale, offset);
-}
-
-void X86Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
-  DCHECK_EQ(kX86, cu_->instruction_set);
-  NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val);
-}
-
-void X86Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
-  DCHECK_EQ(kX86_64, cu_->instruction_set);
-  NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val);
-}
-
-static bool IsInReg(X86Mir2Lir *pMir2Lir, const RegLocation &rl, RegStorage reg) {
-  return rl.reg.Valid() && rl.reg.GetReg() == reg.GetReg() && (pMir2Lir->IsLive(reg) || rl.home);
-}
-
-bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
-  DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
-  // Unused - RegLocation rl_src_unsafe = info->args[0];
-  RegLocation rl_src_obj = info->args[1];  // Object - known non-null
-  RegLocation rl_src_offset = info->args[2];  // long low
-  if (!cu_->target64) {
-    rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
-  }
-  RegLocation rl_src_expected = info->args[4];  // int, long or Object
-  // If is_long, high half is in info->args[5]
-  RegLocation rl_src_new_value = info->args[is_long ? 6 : 5];  // int, long or Object
-  // If is_long, high half is in info->args[7]
-  const int kRegSize = cu_->target64 ? 8 : 4;
-
-  if (is_long && cu_->target64) {
-    // RAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in RAX.
-    FlushReg(rs_r0q);
-    Clobber(rs_r0q);
-    LockTemp(rs_r0q);
-
-    RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
-    RegLocation rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
-    RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
-    LoadValueDirectWide(rl_src_expected, rs_r0q);
-    NewLIR5(kX86LockCmpxchg64AR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0,
-            rl_new_value.reg.GetReg());
-
-    // After a store we need to insert barrier in case of potential load. Since the
-    // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated.
-    GenMemBarrier(kAnyAny);
-
-    FreeTemp(rs_r0q);
-  } else if (is_long) {
-    // TODO: avoid unnecessary loads of SI and DI when the values are in registers.
-    FlushAllRegs();
-    LockCallTemps();
-    RegStorage r_tmp1 = RegStorage::MakeRegPair(rs_rAX, rs_rDX);
-    RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_rBX, rs_rCX);
-    LoadValueDirectWideFixed(rl_src_expected, r_tmp1);
-    LoadValueDirectWideFixed(rl_src_new_value, r_tmp2);
-    // FIXME: needs 64-bit update.
-    const bool obj_in_di = IsInReg(this, rl_src_obj, rs_rDI);
-    const bool obj_in_si = IsInReg(this, rl_src_obj, rs_rSI);
-    DCHECK(!obj_in_si || !obj_in_di);
-    const bool off_in_di = IsInReg(this, rl_src_offset, rs_rDI);
-    const bool off_in_si = IsInReg(this, rl_src_offset, rs_rSI);
-    DCHECK(!off_in_si || !off_in_di);
-    // If obj/offset is in a reg, use that reg. Otherwise, use the empty reg.
-    RegStorage rs_obj = obj_in_di ? rs_rDI : obj_in_si ? rs_rSI : !off_in_di ? rs_rDI : rs_rSI;
-    RegStorage rs_off = off_in_si ? rs_rSI : off_in_di ? rs_rDI : !obj_in_si ? rs_rSI : rs_rDI;
-    bool push_di = (!obj_in_di && !off_in_di) && (rs_obj == rs_rDI || rs_off == rs_rDI);
-    bool push_si = (!obj_in_si && !off_in_si) && (rs_obj == rs_rSI || rs_off == rs_rSI);
-    if (push_di) {
-      NewLIR1(kX86Push32R, rs_rDI.GetReg());
-      MarkTemp(rs_rDI);
-      LockTemp(rs_rDI);
-      cfi_.AdjustCFAOffset(kRegSize);
-      // Record cfi only if it is not already spilled.
-      if (!CoreSpillMaskContains(rs_rDI.GetReg())) {
-        cfi_.RelOffset(DwarfCoreReg(cu_->target64, rs_rDI.GetReg()), 0);
-      }
-    }
-    if (push_si) {
-      NewLIR1(kX86Push32R, rs_rSI.GetReg());
-      MarkTemp(rs_rSI);
-      LockTemp(rs_rSI);
-      cfi_.AdjustCFAOffset(kRegSize);
-      // Record cfi only if it is not already spilled.
-      if (!CoreSpillMaskContains(rs_rSI.GetReg())) {
-        cfi_.RelOffset(DwarfCoreReg(cu_->target64, rs_rSI.GetReg()), 0);
-      }
-    }
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    const size_t push_offset = (push_si ? 4u : 0u) + (push_di ? 4u : 0u);
-    const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-    if (!obj_in_si && !obj_in_di) {
-      LoadWordDisp(rs_rSP, SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
-      // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
-      DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
-      int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
-      AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
-    }
-    if (!off_in_si && !off_in_di) {
-      LoadWordDisp(rs_rSP, SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
-      // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
-      DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
-      int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
-      AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
-    }
-    NewLIR4(kX86LockCmpxchg64A, rs_obj.GetReg(), rs_off.GetReg(), 0, 0);
-
-    // After a store we need to insert barrier to prevent reordering with either
-    // earlier or later memory accesses.  Since
-    // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated,
-    // and it will be associated with the cmpxchg instruction, preventing both.
-    GenMemBarrier(kAnyAny);
-
-    if (push_si) {
-      FreeTemp(rs_rSI);
-      UnmarkTemp(rs_rSI);
-      NewLIR1(kX86Pop32R, rs_rSI.GetReg());
-      cfi_.AdjustCFAOffset(-kRegSize);
-      if (!CoreSpillMaskContains(rs_rSI.GetReg())) {
-        cfi_.Restore(DwarfCoreReg(cu_->target64, rs_rSI.GetRegNum()));
-      }
-    }
-    if (push_di) {
-      FreeTemp(rs_rDI);
-      UnmarkTemp(rs_rDI);
-      NewLIR1(kX86Pop32R, rs_rDI.GetReg());
-      cfi_.AdjustCFAOffset(-kRegSize);
-      if (!CoreSpillMaskContains(rs_rDI.GetReg())) {
-        cfi_.Restore(DwarfCoreReg(cu_->target64, rs_rDI.GetRegNum()));
-      }
-    }
-    FreeCallTemps();
-  } else {
-    // EAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in EAX.
-    FlushReg(rs_r0);
-    Clobber(rs_r0);
-    LockTemp(rs_r0);
-
-    RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
-    RegLocation rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
-
-    if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
-      // Mark card for object assuming new value is stored.
-      FreeTemp(rs_r0);  // Temporarily release EAX for MarkGCCard().
-      MarkGCCard(0, rl_new_value.reg, rl_object.reg);
-      LockTemp(rs_r0);
-    }
-
-    RegLocation rl_offset;
-    if (cu_->target64) {
-      rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
-    } else {
-      rl_offset = LoadValue(rl_src_offset, kCoreReg);
-    }
-    LoadValueDirect(rl_src_expected, rs_r0);
-    NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0,
-            rl_new_value.reg.GetReg());
-
-    // After a store we need to insert barrier to prevent reordering with either
-    // earlier or later memory accesses.  Since
-    // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated,
-    // and it will be associated with the cmpxchg instruction, preventing both.
-    GenMemBarrier(kAnyAny);
-
-    FreeTemp(rs_r0);
-  }
-
-  // Convert ZF to boolean
-  RegLocation rl_dest = InlineTarget(info);  // boolean place for result
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegStorage result_reg = rl_result.reg;
-
-  // For 32-bit, SETcc only works with EAX..EDX.
-  if (!IsByteRegister(result_reg)) {
-    result_reg = AllocateByteRegister();
-  }
-  NewLIR2(kX86Set8R, result_reg.GetReg(), kX86CondZ);
-  NewLIR2(kX86Movzx8RR, rl_result.reg.GetReg(), result_reg.GetReg());
-  if (IsTemp(result_reg)) {
-    FreeTemp(result_reg);
-  }
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-void X86Mir2Lir::SwapBits(RegStorage result_reg, int shift, int32_t value) {
-  RegStorage r_temp = AllocTemp();
-  OpRegCopy(r_temp, result_reg);
-  OpRegImm(kOpLsr, result_reg, shift);
-  OpRegImm(kOpAnd, r_temp, value);
-  OpRegImm(kOpAnd, result_reg, value);
-  OpRegImm(kOpLsl, r_temp, shift);
-  OpRegReg(kOpOr, result_reg, r_temp);
-  FreeTemp(r_temp);
-}
-
-void X86Mir2Lir::SwapBits64(RegStorage result_reg, int shift, int64_t value) {
-  RegStorage r_temp = AllocTempWide();
-  OpRegCopy(r_temp, result_reg);
-  OpRegImm(kOpLsr, result_reg, shift);
-  RegStorage r_value = AllocTempWide();
-  LoadConstantWide(r_value, value);
-  OpRegReg(kOpAnd, r_temp, r_value);
-  OpRegReg(kOpAnd, result_reg, r_value);
-  OpRegImm(kOpLsl, r_temp, shift);
-  OpRegReg(kOpOr, result_reg, r_temp);
-  FreeTemp(r_temp);
-  FreeTemp(r_value);
-}
-
-bool X86Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
-  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
-  if (rl_dest.s_reg_low == INVALID_SREG) {
-    // Result is unused, the code is dead. Inlining successful, no code generated.
-    return true;
-  }
-  RegLocation rl_src_i = info->args[0];
-  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg)
-                                   : LoadValue(rl_src_i, kCoreReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (size == k64) {
-    if (cu_->instruction_set == kX86_64) {
-      /* Use one bswap instruction to reverse byte order first and then use 3 rounds of
-         swapping bits to reverse bits in a long number x. Using bswap to save instructions
-         compared to generic luni implementation which has 5 rounds of swapping bits.
-         x = bswap x
-         x = (x & 0x5555555555555555) << 1 | (x >> 1) & 0x5555555555555555;
-         x = (x & 0x3333333333333333) << 2 | (x >> 2) & 0x3333333333333333;
-         x = (x & 0x0F0F0F0F0F0F0F0F) << 4 | (x >> 4) & 0x0F0F0F0F0F0F0F0F;
-      */
-      OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
-      SwapBits64(rl_result.reg, 1, 0x5555555555555555);
-      SwapBits64(rl_result.reg, 2, 0x3333333333333333);
-      SwapBits64(rl_result.reg, 4, 0x0f0f0f0f0f0f0f0f);
-      StoreValueWide(rl_dest, rl_result);
-      return true;
-    }
-    RegStorage r_i_low = rl_i.reg.GetLow();
-    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
-      // First REV shall clobber rl_result.reg.GetLowReg(), save the value in a temp for the second
-      // REV.
-      r_i_low = AllocTemp();
-      OpRegCopy(r_i_low, rl_i.reg);
-    }
-    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
-    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
-    // Free up at least one input register if it was a temp. Otherwise we may be in the bad
-    // situation of not having a temp available for SwapBits. Make sure it's not overlapping
-    // with the output, though.
-    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
-      // There's definitely a free temp after this.
-      FreeTemp(r_i_low);
-    } else {
-      // We opportunistically release both here. That saves duplication of the register state
-      // lookup (to see if it's actually a temp).
-      if (rl_i.reg.GetLowReg() != rl_result.reg.GetHighReg()) {
-        FreeTemp(rl_i.reg.GetLow());
-      }
-      if (rl_i.reg.GetHighReg() != rl_result.reg.GetLowReg() &&
-          rl_i.reg.GetHighReg() != rl_result.reg.GetHighReg()) {
-        FreeTemp(rl_i.reg.GetHigh());
-      }
-    }
-
-    SwapBits(rl_result.reg.GetLow(), 1, 0x55555555);
-    SwapBits(rl_result.reg.GetLow(), 2, 0x33333333);
-    SwapBits(rl_result.reg.GetLow(), 4, 0x0f0f0f0f);
-    SwapBits(rl_result.reg.GetHigh(), 1, 0x55555555);
-    SwapBits(rl_result.reg.GetHigh(), 2, 0x33333333);
-    SwapBits(rl_result.reg.GetHigh(), 4, 0x0f0f0f0f);
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
-    SwapBits(rl_result.reg, 1, 0x55555555);
-    SwapBits(rl_result.reg, 2, 0x33333333);
-    SwapBits(rl_result.reg, 4, 0x0f0f0f0f);
-    StoreValue(rl_dest, rl_result);
-  }
-  return true;
-}
-
-void X86Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
-  if (cu_->target64) {
-    // We can do this directly using RIP addressing.
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-    LIR* res = NewLIR3(kX86Mov32RM, reg.GetReg(), kRIPReg, kDummy32BitOffset);
-    res->target = target;
-    res->flags.fixup = kFixupLoad;
-    return;
-  }
-
-  // Get the PC to a register and get the anchor.
-  LIR* anchor;
-  RegStorage r_pc = GetPcAndAnchor(&anchor);
-
-  // Load the proper value from the literal area.
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-  LIR* res = NewLIR3(kX86Mov32RM, reg.GetReg(), r_pc.GetReg(), kDummy32BitOffset);
-  res->operands[4] = WrapPointer(anchor);
-  res->target = target;
-  res->flags.fixup = kFixupLoad;
-}
-
-bool X86Mir2Lir::CanUseOpPcRelDexCacheArrayLoad() const {
-  return dex_cache_arrays_layout_.Valid();
-}
-
-LIR* X86Mir2Lir::OpLoadPc(RegStorage r_dest) {
-  DCHECK(!cu_->target64);
-  LIR* call = NewLIR1(kX86CallI, 0);
-  call->flags.fixup = kFixupLabel;
-  LIR* pop = NewLIR1(kX86Pop32R, r_dest.GetReg());
-  pop->flags.fixup = kFixupLabel;
-  DCHECK(NEXT_LIR(call) == pop);
-  return call;
-}
-
-RegStorage X86Mir2Lir::GetPcAndAnchor(LIR** anchor, RegStorage r_tmp) {
-  if (pc_rel_base_reg_.Valid()) {
-    DCHECK(setup_pc_rel_base_reg_ != nullptr);
-    *anchor = NEXT_LIR(setup_pc_rel_base_reg_);
-    DCHECK(*anchor != nullptr);
-    DCHECK_EQ((*anchor)->opcode, kX86Pop32R);
-    pc_rel_base_reg_used_ = true;
-    return pc_rel_base_reg_;
-  } else {
-    RegStorage r_pc = r_tmp.Valid() ? r_tmp : AllocTempRef();
-    LIR* load_pc = OpLoadPc(r_pc);
-    *anchor = NEXT_LIR(load_pc);
-    DCHECK(*anchor != nullptr);
-    DCHECK_EQ((*anchor)->opcode, kX86Pop32R);
-    return r_pc;
-  }
-}
-
-void X86Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
-                                          bool wide) {
-  if (cu_->target64) {
-    LIR* mov = NewLIR3(wide ? kX86Mov64RM : kX86Mov32RM, r_dest.GetReg(), kRIPReg,
-        kDummy32BitOffset);
-    mov->flags.fixup = kFixupLabel;
-    mov->operands[3] = WrapPointer(dex_file);
-    mov->operands[4] = offset;
-    mov->target = mov;  // Used for pc_insn_offset (not used by x86-64 relative patcher).
-    dex_cache_access_insns_.push_back(mov);
-  } else {
-    CHECK(!wide) << "Unsupported";
-    // Get the PC to a register and get the anchor. Use r_dest for the temp if needed.
-    LIR* anchor;
-    RegStorage r_pc = GetPcAndAnchor(&anchor, r_dest);
-    LIR* mov = NewLIR3(kX86Mov32RM, r_dest.GetReg(), r_pc.GetReg(), kDummy32BitOffset);
-    mov->flags.fixup = kFixupLabel;
-    mov->operands[3] = WrapPointer(dex_file);
-    mov->operands[4] = offset;
-    mov->target = anchor;  // Used for pc_insn_offset.
-    dex_cache_access_insns_.push_back(mov);
-  }
-}
-
-LIR* X86Mir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpVldm for x86";
-  UNREACHABLE();
-}
-
-LIR* X86Mir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpVstm for x86";
-  UNREACHABLE();
-}
-
-void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
-                                               RegLocation rl_result,
-                                               int lit ATTRIBUTE_UNUSED,
-                                               int first_bit,
-                                               int second_bit) {
-  RegStorage t_reg = AllocTemp();
-  OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
-  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
-  FreeTemp(t_reg);
-  if (first_bit != 0) {
-    OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
-  }
-}
-
-void X86Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
-  if (cu_->target64) {
-    DCHECK(reg.Is64Bit());
-
-    NewLIR2(kX86Cmp64RI8, reg.GetReg(), 0);
-  } else {
-    DCHECK(reg.IsPair());
-
-    // We are not supposed to clobber the incoming storage, so allocate a temporary.
-    RegStorage t_reg = AllocTemp();
-    // Doing an OR is a quick way to check if both registers are zero. This will set the flags.
-    OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
-    // The temp is no longer needed so free it at this time.
-    FreeTemp(t_reg);
-  }
-
-  // In case of zero, throw ArithmeticException.
-  GenDivZeroCheck(kCondEq);
-}
-
-void X86Mir2Lir::GenArrayBoundsCheck(RegStorage index,
-                                     RegStorage array_base,
-                                     int len_offset) {
-  class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
-   public:
-    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in,
-                             RegStorage index_in, RegStorage array_base_in, int32_t len_offset_in)
-        : LIRSlowPath(m2l, branch_in),
-          index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) {
-    }
-
-    void Compile() OVERRIDE {
-      m2l_->ResetRegPool();
-      m2l_->ResetDefTracking();
-      GenerateTargetLabel(kPseudoThrowTarget);
-
-      RegStorage new_index = index_;
-      // Move index out of kArg1, either directly to kArg0, or to kArg2.
-      // TODO: clean-up to check not a number but with type
-      if (index_ == m2l_->TargetReg(kArg1, kNotWide)) {
-        if (array_base_ == m2l_->TargetReg(kArg0, kRef)) {
-          m2l_->OpRegCopy(m2l_->TargetReg(kArg2, kNotWide), index_);
-          new_index = m2l_->TargetReg(kArg2, kNotWide);
-        } else {
-          m2l_->OpRegCopy(m2l_->TargetReg(kArg0, kNotWide), index_);
-          new_index = m2l_->TargetReg(kArg0, kNotWide);
-        }
-      }
-      // Load array length to kArg1.
-      X86Mir2Lir* x86_m2l = static_cast<X86Mir2Lir*>(m2l_);
-      x86_m2l->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_);
-      x86_m2l->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, new_index,
-                                       m2l_->TargetReg(kArg1, kNotWide), true);
-    }
-
-   private:
-    const RegStorage index_;
-    const RegStorage array_base_;
-    const int32_t len_offset_;
-  };
-
-  OpRegMem(kOpCmp, index, array_base, len_offset);
-  MarkPossibleNullPointerException(0);
-  LIR* branch = OpCondBranch(kCondUge, nullptr);
-  AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch,
-                                                    index, array_base, len_offset));
-}
-
-void X86Mir2Lir::GenArrayBoundsCheck(int32_t index,
-                                     RegStorage array_base,
-                                     int32_t len_offset) {
-  class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
-   public:
-    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in,
-                             int32_t index_in, RegStorage array_base_in, int32_t len_offset_in)
-        : LIRSlowPath(m2l, branch_in),
-          index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) {
-    }
-
-    void Compile() OVERRIDE {
-      m2l_->ResetRegPool();
-      m2l_->ResetDefTracking();
-      GenerateTargetLabel(kPseudoThrowTarget);
-
-      // Load array length to kArg1.
-      X86Mir2Lir* x86_m2l = static_cast<X86Mir2Lir*>(m2l_);
-      x86_m2l->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_);
-      x86_m2l->LoadConstant(m2l_->TargetReg(kArg0, kNotWide), index_);
-      x86_m2l->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, m2l_->TargetReg(kArg0, kNotWide),
-                                       m2l_->TargetReg(kArg1, kNotWide), true);
-    }
-
-   private:
-    const int32_t index_;
-    const RegStorage array_base_;
-    const int32_t len_offset_;
-  };
-
-  NewLIR3(IS_SIMM8(index) ? kX86Cmp32MI8 : kX86Cmp32MI, array_base.GetReg(), len_offset, index);
-  MarkPossibleNullPointerException(0);
-  LIR* branch = OpCondBranch(kCondLs, nullptr);
-  AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch,
-                                                    index, array_base, len_offset));
-}
-
-// Test suspend flag, return target of taken suspend branch
-LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
-  if (cu_->target64) {
-    OpTlsCmp(Thread::ThreadFlagsOffset<8>(), 0);
-  } else {
-    OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
-  }
-  return OpCondBranch((target == nullptr) ? kCondNe : kCondEq, target);
-}
-
-// Decrement register and branch on condition
-LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
-  OpRegImm(kOpSub, reg, 1);
-  return OpCondBranch(c_code, target);
-}
-
-bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
-                                    bool is_div ATTRIBUTE_UNUSED,
-                                    RegLocation rl_src ATTRIBUTE_UNUSED,
-                                    RegLocation rl_dest ATTRIBUTE_UNUSED,
-                                    int lit ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of smallLiteralDivRem in x86";
-  UNREACHABLE();
-}
-
-bool X86Mir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
-                              RegLocation rl_dest ATTRIBUTE_UNUSED,
-                              int lit ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of easyMultiply in x86";
-  UNREACHABLE();
-}
-
-LIR* X86Mir2Lir::OpIT(ConditionCode cond ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpIT in x86";
-  UNREACHABLE();
-}
-
-void X86Mir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of OpEndIT in x86";
-  UNREACHABLE();
-}
-
-void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
-  switch (val) {
-    case 0:
-      NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
-      break;
-    case 1:
-      OpRegCopy(dest, src);
-      break;
-    default:
-      OpRegRegImm(kOpMul, dest, src, val);
-      break;
-  }
-}
-
-void X86Mir2Lir::GenImulMemImm(RegStorage dest,
-                               int sreg ATTRIBUTE_UNUSED,
-                               int displacement,
-                               int val) {
-  // All memory accesses below reference dalvik regs.
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-
-  LIR *m;
-  switch (val) {
-    case 0:
-      NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
-      break;
-    case 1: {
-      const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-      LoadBaseDisp(rs_rSP, displacement, dest, k32, kNotVolatile);
-      break;
-    }
-    default:
-      m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(),
-                  rs_rX86_SP_32.GetReg(), displacement, val);
-      AnnotateDalvikRegAccess(m, displacement >> 2, true /* is_load */, true /* is_64bit */);
-      break;
-  }
-}
-
-void X86Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                                RegLocation rl_src2, int flags) {
-  if (!cu_->target64) {
-    // Some x86 32b ops are fallback.
-    switch (opcode) {
-      case Instruction::NOT_LONG:
-      case Instruction::DIV_LONG:
-      case Instruction::DIV_LONG_2ADDR:
-      case Instruction::REM_LONG:
-      case Instruction::REM_LONG_2ADDR:
-        Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-        return;
-
-      default:
-        // Everything else we can handle.
-        break;
-    }
-  }
-
-  switch (opcode) {
-    case Instruction::NOT_LONG:
-      GenNotLong(rl_dest, rl_src2);
-      return;
-
-    case Instruction::ADD_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-      return;
-
-    case Instruction::SUB_LONG:
-    case Instruction::SUB_LONG_2ADDR:
-      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, false);
-      return;
-
-    case Instruction::MUL_LONG:
-    case Instruction::MUL_LONG_2ADDR:
-      GenMulLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-      return;
-
-    case Instruction::DIV_LONG:
-    case Instruction::DIV_LONG_2ADDR:
-      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
-      return;
-
-    case Instruction::REM_LONG:
-    case Instruction::REM_LONG_2ADDR:
-      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
-      return;
-
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::AND_LONG:
-      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-      return;
-
-    case Instruction::OR_LONG:
-    case Instruction::OR_LONG_2ADDR:
-      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-      return;
-
-    case Instruction::XOR_LONG:
-    case Instruction::XOR_LONG_2ADDR:
-      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-      return;
-
-    case Instruction::NEG_LONG:
-      GenNegLong(rl_dest, rl_src2);
-      return;
-
-    default:
-      LOG(FATAL) << "Invalid long arith op";
-      return;
-  }
-}
-
-bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val, int flags) {
-  // All memory accesses below reference dalvik regs.
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-
-  if (val == 0) {
-    RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    if (cu_->target64) {
-      OpRegReg(kOpXor, rl_result.reg, rl_result.reg);
-    } else {
-      OpRegReg(kOpXor, rl_result.reg.GetLow(), rl_result.reg.GetLow());
-      OpRegReg(kOpXor, rl_result.reg.GetHigh(), rl_result.reg.GetHigh());
-    }
-    StoreValueWide(rl_dest, rl_result);
-    return true;
-  } else if (val == 1) {
-    StoreValueWide(rl_dest, rl_src1);
-    return true;
-  } else if (val == 2) {
-    GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1, flags);
-    return true;
-  } else if (IsPowerOfTwo(val)) {
-    int shift_amount = CTZ(val);
-    if (!PartiallyIntersects(rl_src1, rl_dest)) {
-      rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-      RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest, rl_src1,
-                                                shift_amount, flags);
-      StoreValueWide(rl_dest, rl_result);
-      return true;
-    }
-  }
-
-  // Okay, on 32b just bite the bullet and do it, still better than the general case.
-  if (!cu_->target64) {
-    int32_t val_lo = Low32Bits(val);
-    int32_t val_hi = High32Bits(val);
-    // Prepare for explicit register usage.
-    ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
-    rl_src1 = UpdateLocWideTyped(rl_src1);
-    bool src1_in_reg = rl_src1.location == kLocPhysReg;
-    int displacement = SRegOffset(rl_src1.s_reg_low);
-
-    // ECX <- 1H * 2L
-    // EAX <- 1L * 2H
-    if (src1_in_reg) {
-      GenImulRegImm(rs_r1, rl_src1.reg.GetHigh(), val_lo);
-      GenImulRegImm(rs_r0, rl_src1.reg.GetLow(), val_hi);
-    } else {
-      GenImulMemImm(rs_r1, GetSRegHi(rl_src1.s_reg_low), displacement + HIWORD_OFFSET, val_lo);
-      GenImulMemImm(rs_r0, rl_src1.s_reg_low, displacement + LOWORD_OFFSET, val_hi);
-    }
-
-    // ECX <- ECX + EAX  (2H * 1L) + (1H * 2L)
-    NewLIR2(kX86Add32RR, rs_r1.GetReg(), rs_r0.GetReg());
-
-    // EAX <- 2L
-    LoadConstantNoClobber(rs_r0, val_lo);
-
-    // EDX:EAX <- 2L * 1L (double precision)
-    if (src1_in_reg) {
-      NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
-    } else {
-      LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP_32.GetReg(), displacement + LOWORD_OFFSET);
-      AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
-                              true /* is_load */, true /* is_64bit */);
-    }
-
-    // EDX <- EDX + ECX (add high words)
-    NewLIR2(kX86Add32RR, rs_r2.GetReg(), rs_r1.GetReg());
-
-    // Result is EDX:EAX
-    RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
-                             RegStorage::MakeRegPair(rs_r0, rs_r2), INVALID_SREG, INVALID_SREG};
-    StoreValueWide(rl_dest, rl_result);
-    return true;
-  }
-  return false;
-}
-
-void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2, int flags) {
-  if (rl_src1.is_const) {
-    std::swap(rl_src1, rl_src2);
-  }
-
-  if (rl_src2.is_const) {
-    if (GenMulLongConst(rl_dest, rl_src1, mir_graph_->ConstantValueWide(rl_src2), flags)) {
-      return;
-    }
-  }
-
-  // All memory accesses below reference dalvik regs.
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-
-  if (cu_->target64) {
-    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-    RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
-        rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-    } else if (rl_result.reg.GetReg() != rl_src1.reg.GetReg() &&
-               rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
-    } else if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
-               rl_result.reg.GetReg() != rl_src2.reg.GetReg()) {
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
-    } else {
-      OpRegCopy(rl_result.reg, rl_src1.reg);
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
-    }
-    StoreValueWide(rl_dest, rl_result);
-    return;
-  }
-
-  // Not multiplying by a constant. Do it the hard way
-  // Check for V*V.  We can eliminate a multiply in that case, as 2L*1H == 2H*1L.
-  bool is_square = mir_graph_->SRegToVReg(rl_src1.s_reg_low) ==
-                   mir_graph_->SRegToVReg(rl_src2.s_reg_low);
-
-  // Prepare for explicit register usage.
-  ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
-  rl_src1 = UpdateLocWideTyped(rl_src1);
-  rl_src2 = UpdateLocWideTyped(rl_src2);
-
-  // At this point, the VRs are in their home locations.
-  bool src1_in_reg = rl_src1.location == kLocPhysReg;
-  bool src2_in_reg = rl_src2.location == kLocPhysReg;
-  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-
-  // ECX <- 1H
-  if (src1_in_reg) {
-    NewLIR2(kX86Mov32RR, rs_r1.GetReg(), rl_src1.reg.GetHighReg());
-  } else {
-    LoadBaseDisp(rs_rSP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32,
-                 kNotVolatile);
-  }
-
-  if (is_square) {
-    // Take advantage of the fact that the values are the same.
-    // ECX <- ECX * 2L  (1H * 2L)
-    if (src2_in_reg) {
-      NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg());
-    } else {
-      int displacement = SRegOffset(rl_src2.s_reg_low);
-      LIR* m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP_32.GetReg(),
-                       displacement + LOWORD_OFFSET);
-      AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
-                              true /* is_load */, true /* is_64bit */);
-    }
-
-    // ECX <- 2*ECX (2H * 1L) + (1H * 2L)
-    NewLIR2(kX86Add32RR, rs_r1.GetReg(), rs_r1.GetReg());
-  } else {
-    // EAX <- 2H
-    if (src2_in_reg) {
-      NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetHighReg());
-    } else {
-      LoadBaseDisp(rs_rSP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32,
-                   kNotVolatile);
-    }
-
-    // EAX <- EAX * 1L  (2H * 1L)
-    if (src1_in_reg) {
-      NewLIR2(kX86Imul32RR, rs_r0.GetReg(), rl_src1.reg.GetLowReg());
-    } else {
-      int displacement = SRegOffset(rl_src1.s_reg_low);
-      LIR *m = NewLIR3(kX86Imul32RM, rs_r0.GetReg(), rs_rX86_SP_32.GetReg(),
-                       displacement + LOWORD_OFFSET);
-      AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
-                              true /* is_load */, true /* is_64bit */);
-    }
-
-    // ECX <- ECX * 2L  (1H * 2L)
-    if (src2_in_reg) {
-      NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg());
-    } else {
-      int displacement = SRegOffset(rl_src2.s_reg_low);
-      LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP_32.GetReg(),
-                       displacement + LOWORD_OFFSET);
-      AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
-                              true /* is_load */, true /* is_64bit */);
-    }
-
-    // ECX <- ECX + EAX  (2H * 1L) + (1H * 2L)
-    NewLIR2(kX86Add32RR, rs_r1.GetReg(), rs_r0.GetReg());
-  }
-
-  // EAX <- 2L
-  if (src2_in_reg) {
-    NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetLowReg());
-  } else {
-    LoadBaseDisp(rs_rSP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32,
-                 kNotVolatile);
-  }
-
-  // EDX:EAX <- 2L * 1L (double precision)
-  if (src1_in_reg) {
-    NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
-  } else {
-    int displacement = SRegOffset(rl_src1.s_reg_low);
-    LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP_32.GetReg(), displacement + LOWORD_OFFSET);
-    AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
-                            true /* is_load */, true /* is_64bit */);
-  }
-
-  // EDX <- EDX + ECX (add high words)
-  NewLIR2(kX86Add32RR, rs_r2.GetReg(), rs_r1.GetReg());
-
-  // Result is EDX:EAX
-  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
-                           RegStorage::MakeRegPair(rs_r0, rs_r2), INVALID_SREG, INVALID_SREG};
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
-                                   Instruction::Code op) {
-  DCHECK_EQ(rl_dest.location, kLocPhysReg);
-  X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
-  if (rl_src.location == kLocPhysReg) {
-    // Both operands are in registers.
-    // But we must ensure that rl_src is in pair
-    if (cu_->target64) {
-      NewLIR2(x86op, rl_dest.reg.GetReg(), rl_src.reg.GetReg());
-    } else {
-      rl_src = LoadValueWide(rl_src, kCoreReg);
-      if (rl_dest.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
-        // The registers are the same, so we would clobber it before the use.
-        RegStorage temp_reg = AllocTemp();
-        OpRegCopy(temp_reg, rl_dest.reg);
-        rl_src.reg.SetHighReg(temp_reg.GetReg());
-      }
-      NewLIR2(x86op, rl_dest.reg.GetLowReg(), rl_src.reg.GetLowReg());
-
-      x86op = GetOpcode(op, rl_dest, rl_src, true);
-      NewLIR2(x86op, rl_dest.reg.GetHighReg(), rl_src.reg.GetHighReg());
-    }
-    return;
-  }
-
-  // RHS is in memory.
-  DCHECK((rl_src.location == kLocDalvikFrame) ||
-         (rl_src.location == kLocCompilerTemp));
-  int r_base = rs_rX86_SP_32.GetReg();
-  int displacement = SRegOffset(rl_src.s_reg_low);
-
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-  LIR *lir = NewLIR3(x86op, cu_->target64 ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(),
-                     r_base, displacement + LOWORD_OFFSET);
-  AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
-                          true /* is_load */, true /* is64bit */);
-  if (!cu_->target64) {
-    x86op = GetOpcode(op, rl_dest, rl_src, true);
-    lir = NewLIR3(x86op, rl_dest.reg.GetHighReg(), r_base, displacement + HIWORD_OFFSET);
-    AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
-                            true /* is_load */, true /* is64bit */);
-  }
-}
-
-void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op) {
-  rl_dest = UpdateLocWideTyped(rl_dest);
-  if (rl_dest.location == kLocPhysReg) {
-    // Ensure we are in a register pair
-    RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-
-    rl_src = UpdateLocWideTyped(rl_src);
-    GenLongRegOrMemOp(rl_result, rl_src, op);
-    StoreFinalValueWide(rl_dest, rl_result);
-    return;
-  } else if (!cu_->target64 && Intersects(rl_src, rl_dest)) {
-    // Handle the case when src and dest are intersect.
-    rl_src = LoadValueWide(rl_src, kCoreReg);
-    RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    rl_src = UpdateLocWideTyped(rl_src);
-    GenLongRegOrMemOp(rl_result, rl_src, op);
-    StoreFinalValueWide(rl_dest, rl_result);
-    return;
-  }
-
-  // It wasn't in registers, so it better be in memory.
-  DCHECK((rl_dest.location == kLocDalvikFrame) ||
-         (rl_dest.location == kLocCompilerTemp));
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-
-  // Operate directly into memory.
-  X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
-  int r_base = rs_rX86_SP_32.GetReg();
-  int displacement = SRegOffset(rl_dest.s_reg_low);
-
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-  LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET,
-                     cu_->target64 ? rl_src.reg.GetReg() : rl_src.reg.GetLowReg());
-  AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
-                          true /* is_load */, true /* is64bit */);
-  AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
-                          false /* is_load */, true /* is64bit */);
-  if (!cu_->target64) {
-    x86op = GetOpcode(op, rl_dest, rl_src, true);
-    lir = NewLIR3(x86op, r_base, displacement + HIWORD_OFFSET, rl_src.reg.GetHighReg());
-    AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
-                            true /* is_load */, true /* is64bit */);
-    AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
-                            false /* is_load */, true /* is64bit */);
-  }
-
-  int v_src_reg = mir_graph_->SRegToVReg(rl_src.s_reg_low);
-  int v_dst_reg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
-
-  // If the left operand is in memory and the right operand is in a register
-  // and both belong to the same dalvik register then we should clobber the
-  // right one because it doesn't hold valid data anymore.
-  if (v_src_reg == v_dst_reg) {
-    Clobber(rl_src.reg);
-  }
-}
-
-void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
-                              RegLocation rl_src2, Instruction::Code op,
-                              bool is_commutative) {
-  // Is this really a 2 operand operation?
-  switch (op) {
-    case Instruction::ADD_LONG_2ADDR:
-    case Instruction::SUB_LONG_2ADDR:
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::OR_LONG_2ADDR:
-    case Instruction::XOR_LONG_2ADDR:
-      if (GenerateTwoOperandInstructions()) {
-        GenLongArith(rl_dest, rl_src2, op);
-        return;
-      }
-      break;
-
-    default:
-      break;
-  }
-
-  if (rl_dest.location == kLocPhysReg) {
-    RegLocation rl_result = LoadValueWide(rl_src1, kCoreReg);
-
-    // We are about to clobber the LHS, so it needs to be a temp.
-    rl_result = ForceTempWide(rl_result);
-
-    // Perform the operation using the RHS.
-    rl_src2 = UpdateLocWideTyped(rl_src2);
-    GenLongRegOrMemOp(rl_result, rl_src2, op);
-
-    // And now record that the result is in the temp.
-    StoreFinalValueWide(rl_dest, rl_result);
-    return;
-  }
-
-  // It wasn't in registers, so it better be in memory.
-  DCHECK((rl_dest.location == kLocDalvikFrame) || (rl_dest.location == kLocCompilerTemp));
-  rl_src1 = UpdateLocWideTyped(rl_src1);
-  rl_src2 = UpdateLocWideTyped(rl_src2);
-
-  // Get one of the source operands into temporary register.
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  if (cu_->target64) {
-    if (IsTemp(rl_src1.reg)) {
-      GenLongRegOrMemOp(rl_src1, rl_src2, op);
-    } else if (is_commutative) {
-      rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-      // We need at least one of them to be a temporary.
-      if (!IsTemp(rl_src2.reg)) {
-        rl_src1 = ForceTempWide(rl_src1);
-        GenLongRegOrMemOp(rl_src1, rl_src2, op);
-      } else {
-        GenLongRegOrMemOp(rl_src2, rl_src1, op);
-        StoreFinalValueWide(rl_dest, rl_src2);
-        return;
-      }
-    } else {
-      // Need LHS to be the temp.
-      rl_src1 = ForceTempWide(rl_src1);
-      GenLongRegOrMemOp(rl_src1, rl_src2, op);
-    }
-  } else {
-    if (IsTemp(rl_src1.reg.GetLow()) && IsTemp(rl_src1.reg.GetHigh())) {
-      GenLongRegOrMemOp(rl_src1, rl_src2, op);
-    } else if (is_commutative) {
-      rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-      // We need at least one of them to be a temporary.
-      if (!(IsTemp(rl_src2.reg.GetLow()) && IsTemp(rl_src2.reg.GetHigh()))) {
-        rl_src1 = ForceTempWide(rl_src1);
-        GenLongRegOrMemOp(rl_src1, rl_src2, op);
-      } else {
-        GenLongRegOrMemOp(rl_src2, rl_src1, op);
-        StoreFinalValueWide(rl_dest, rl_src2);
-        return;
-      }
-    } else {
-      // Need LHS to be the temp.
-      rl_src1 = ForceTempWide(rl_src1);
-      GenLongRegOrMemOp(rl_src1, rl_src2, op);
-    }
-  }
-
-  StoreFinalValueWide(rl_dest, rl_src1);
-}
-
-void X86Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
-  if (cu_->target64) {
-    rl_src = LoadValueWide(rl_src, kCoreReg);
-    RegLocation rl_result;
-    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    OpRegCopy(rl_result.reg, rl_src.reg);
-    OpReg(kOpNot, rl_result.reg);
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    LOG(FATAL) << "Unexpected use GenNotLong()";
-  }
-}
-
-void X86Mir2Lir::GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src,
-                                  int64_t imm, bool is_div) {
-  if (imm == 0) {
-    GenDivZeroException();
-  } else if (imm == 1) {
-    if (is_div) {
-      // x / 1 == x.
-      StoreValueWide(rl_dest, rl_src);
-    } else {
-      // x % 1 == 0.
-      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-      LoadConstantWide(rl_result.reg, 0);
-      StoreValueWide(rl_dest, rl_result);
-    }
-  } else if (imm == -1) {  // handle 0x8000000000000000 / -1 special case.
-    if (is_div) {
-      rl_src = LoadValueWide(rl_src, kCoreReg);
-      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-      RegStorage rs_temp = AllocTempWide();
-
-      OpRegCopy(rl_result.reg, rl_src.reg);
-      LoadConstantWide(rs_temp, 0x8000000000000000);
-
-      // If x == MIN_LONG, return MIN_LONG.
-      OpRegReg(kOpCmp, rl_src.reg, rs_temp);
-      LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
-
-      // For x != MIN_LONG, x / -1 == -x.
-      OpReg(kOpNeg, rl_result.reg);
-
-      minint_branch->target = NewLIR0(kPseudoTargetLabel);
-      FreeTemp(rs_temp);
-      StoreValueWide(rl_dest, rl_result);
-    } else {
-      // x % -1 == 0.
-      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-      LoadConstantWide(rl_result.reg, 0);
-      StoreValueWide(rl_dest, rl_result);
-    }
-  } else if (is_div && IsPowerOfTwo(std::abs(imm))) {
-    // Division using shifting.
-    rl_src = LoadValueWide(rl_src, kCoreReg);
-    RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    if (IsSameReg(rl_result.reg, rl_src.reg)) {
-      RegStorage rs_temp = AllocTypedTempWide(false, kCoreReg);
-      rl_result.reg.SetReg(rs_temp.GetReg());
-    }
-    LoadConstantWide(rl_result.reg, std::abs(imm) - 1);
-    OpRegReg(kOpAdd, rl_result.reg, rl_src.reg);
-    NewLIR2(kX86Test64RR, rl_src.reg.GetReg(), rl_src.reg.GetReg());
-    OpCondRegReg(kOpCmov, kCondPl, rl_result.reg, rl_src.reg);
-    int shift_amount = CTZ(imm);
-    OpRegImm(kOpAsr, rl_result.reg, shift_amount);
-    if (imm < 0) {
-      OpReg(kOpNeg, rl_result.reg);
-    }
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    CHECK(imm <= -2 || imm >= 2);
-
-    FlushReg(rs_r0q);
-    Clobber(rs_r0q);
-    LockTemp(rs_r0q);
-    FlushReg(rs_r2q);
-    Clobber(rs_r2q);
-    LockTemp(rs_r2q);
-
-    RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
-                             is_div ? rs_r2q : rs_r0q, INVALID_SREG, INVALID_SREG};
-
-    // Use H.S.Warren's Hacker's Delight Chapter 10 and
-    // T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication.
-    int64_t magic;
-    int shift;
-    CalculateMagicAndShift(imm, magic, shift, true /* is_long */);
-
-    /*
-     * For imm >= 2,
-     *     int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n > 0
-     *     int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1, while n < 0.
-     * For imm <= -2,
-     *     int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1 , while n > 0
-     *     int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n < 0.
-     * We implement this algorithm in the following way:
-     * 1. multiply magic number m and numerator n, get the higher 64bit result in RDX
-     * 2. if imm > 0 and magic < 0, add numerator to RDX
-     *    if imm < 0 and magic > 0, sub numerator from RDX
-     * 3. if S !=0, SAR S bits for RDX
-     * 4. add 1 to RDX if RDX < 0
-     * 5. Thus, RDX is the quotient
-     */
-
-    // RAX = magic.
-    LoadConstantWide(rs_r0q, magic);
-
-    // Multiply by numerator.
-    RegStorage numerator_reg;
-    if (!is_div || (imm > 0 && magic < 0) || (imm < 0 && magic > 0)) {
-      // We will need the value later.
-      rl_src = LoadValueWide(rl_src, kCoreReg);
-      numerator_reg = rl_src.reg;
-
-      // RDX:RAX = magic * numerator.
-      NewLIR1(kX86Imul64DaR, numerator_reg.GetReg());
-    } else {
-      // Only need this once.  Multiply directly from the value.
-      rl_src = UpdateLocWideTyped(rl_src);
-      if (rl_src.location != kLocPhysReg) {
-        // Okay, we can do this from memory.
-        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-        int displacement = SRegOffset(rl_src.s_reg_low);
-        // RDX:RAX = magic * numerator.
-        LIR *m = NewLIR2(kX86Imul64DaM, rs_rX86_SP_32.GetReg(), displacement);
-        AnnotateDalvikRegAccess(m, displacement >> 2,
-                                true /* is_load */, true /* is_64bit */);
-      } else {
-        // RDX:RAX = magic * numerator.
-        NewLIR1(kX86Imul64DaR, rl_src.reg.GetReg());
-      }
-    }
-
-    if (imm > 0 && magic < 0) {
-      // Add numerator to RDX.
-      DCHECK(numerator_reg.Valid());
-      OpRegReg(kOpAdd, rs_r2q, numerator_reg);
-    } else if (imm < 0 && magic > 0) {
-      DCHECK(numerator_reg.Valid());
-      OpRegReg(kOpSub, rs_r2q, numerator_reg);
-    }
-
-    // Do we need the shift?
-    if (shift != 0) {
-      // Shift RDX by 'shift' bits.
-      OpRegImm(kOpAsr, rs_r2q, shift);
-    }
-
-    // Move RDX to RAX.
-    OpRegCopyWide(rs_r0q, rs_r2q);
-
-    // Move sign bit to bit 0, zeroing the rest.
-    OpRegImm(kOpLsr, rs_r2q, 63);
-
-    // RDX = RDX + RAX.
-    OpRegReg(kOpAdd, rs_r2q, rs_r0q);
-
-    // Quotient is in RDX.
-    if (!is_div) {
-      // We need to compute the remainder.
-      // Remainder is divisor - (quotient * imm).
-      DCHECK(numerator_reg.Valid());
-      OpRegCopyWide(rs_r0q, numerator_reg);
-
-      // Imul doesn't support 64-bit imms.
-      if (imm > std::numeric_limits<int32_t>::max() ||
-          imm < std::numeric_limits<int32_t>::min()) {
-        RegStorage rs_temp = AllocTempWide();
-        LoadConstantWide(rs_temp, imm);
-
-        // RAX = numerator * imm.
-        NewLIR2(kX86Imul64RR, rs_r2q.GetReg(), rs_temp.GetReg());
-
-        FreeTemp(rs_temp);
-      } else {
-        // RAX = numerator * imm.
-        int short_imm = static_cast<int>(imm);
-        NewLIR3(kX86Imul64RRI, rs_r2q.GetReg(), rs_r2q.GetReg(), short_imm);
-      }
-
-      // RAX -= RDX.
-      OpRegReg(kOpSub, rs_r0q, rs_r2q);
-
-      // Result in RAX.
-    } else {
-      // Result in RDX.
-    }
-    StoreValueWide(rl_dest, rl_result);
-    FreeTemp(rs_r0q);
-    FreeTemp(rs_r2q);
-  }
-}
-
-void X86Mir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                               RegLocation rl_src2, bool is_div, int flags) {
-  if (!cu_->target64) {
-    LOG(FATAL) << "Unexpected use GenDivRemLong()";
-    return;
-  }
-
-  if (rl_src2.is_const) {
-    DCHECK(rl_src2.wide);
-    int64_t imm = mir_graph_->ConstantValueWide(rl_src2);
-    GenDivRemLongLit(rl_dest, rl_src1, imm, is_div);
-    return;
-  }
-
-  // We have to use fixed registers, so flush all the temps.
-  // Prepare for explicit register usage.
-  ExplicitTempRegisterLock(this, 4, &rs_r0q, &rs_r1q, &rs_r2q, &rs_r6q);
-
-  // Load LHS into RAX.
-  LoadValueDirectWideFixed(rl_src1, rs_r0q);
-
-  // Load RHS into RCX.
-  LoadValueDirectWideFixed(rl_src2, rs_r1q);
-
-  // Copy LHS sign bit into RDX.
-  NewLIR0(kx86Cqo64Da);
-
-  // Handle division by zero case.
-  if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
-    GenDivZeroCheckWide(rs_r1q);
-  }
-
-  // Have to catch 0x8000000000000000/-1 case, or we will get an exception!
-  NewLIR2(kX86Cmp64RI8, rs_r1q.GetReg(), -1);
-  LIR* minus_one_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
-
-  // RHS is -1.
-  LoadConstantWide(rs_r6q, 0x8000000000000000);
-  NewLIR2(kX86Cmp64RR, rs_r0q.GetReg(), rs_r6q.GetReg());
-  LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
-
-  // In 0x8000000000000000/-1 case.
-  if (!is_div) {
-    // For DIV, RAX is already right. For REM, we need RDX 0.
-    NewLIR2(kX86Xor64RR, rs_r2q.GetReg(), rs_r2q.GetReg());
-  }
-  LIR* done = NewLIR1(kX86Jmp8, 0);
-
-  // Expected case.
-  minus_one_branch->target = NewLIR0(kPseudoTargetLabel);
-  minint_branch->target = minus_one_branch->target;
-  NewLIR1(kX86Idivmod64DaR, rs_r1q.GetReg());
-  done->target = NewLIR0(kPseudoTargetLabel);
-
-  // Result is in RAX for div and RDX for rem.
-  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rs_r0q, INVALID_SREG, INVALID_SREG};
-  if (!is_div) {
-    rl_result.reg.SetReg(r2q);
-  }
-
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  RegLocation rl_result;
-  if (cu_->target64) {
-    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
-  } else {
-    rl_result = ForceTempWide(rl_src);
-    OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_result.reg.GetLow());    // rLow = -rLow
-    OpRegImm(kOpAdc, rl_result.reg.GetHigh(), 0);                   // rHigh = rHigh + CF
-    OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_result.reg.GetHigh());  // rHigh = -rHigh
-  }
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset) {
-  DCHECK_EQ(kX86, cu_->instruction_set);
-  X86OpCode opcode = kX86Bkpt;
-  switch (op) {
-  case kOpCmp: opcode = kX86Cmp32RT;  break;
-  case kOpMov: opcode = kX86Mov32RT;  break;
-  default:
-    LOG(FATAL) << "Bad opcode: " << op;
-    break;
-  }
-  NewLIR2(opcode, r_dest.GetReg(), thread_offset.Int32Value());
-}
-
-void X86Mir2Lir::OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset) {
-  DCHECK_EQ(kX86_64, cu_->instruction_set);
-  X86OpCode opcode = kX86Bkpt;
-  if (cu_->target64 && r_dest.Is64BitSolo()) {
-    switch (op) {
-    case kOpCmp: opcode = kX86Cmp64RT;  break;
-    case kOpMov: opcode = kX86Mov64RT;  break;
-    default:
-      LOG(FATAL) << "Bad opcode(OpRegThreadMem 64): " << op;
-      break;
-    }
-  } else {
-    switch (op) {
-    case kOpCmp: opcode = kX86Cmp32RT;  break;
-    case kOpMov: opcode = kX86Mov32RT;  break;
-    default:
-      LOG(FATAL) << "Bad opcode: " << op;
-      break;
-    }
-  }
-  NewLIR2(opcode, r_dest.GetReg(), thread_offset.Int32Value());
-}
-
-/*
- * Generate array load
- */
-void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
-                             RegLocation rl_index, RegLocation rl_dest, int scale) {
-  RegisterClass reg_class = RegClassForFieldLoadStore(size, false);
-  int len_offset = mirror::Array::LengthOffset().Int32Value();
-  RegLocation rl_result;
-  rl_array = LoadValue(rl_array, kRefReg);
-
-  int data_offset;
-  if (size == k64 || size == kDouble) {
-    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
-  } else {
-    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
-  }
-
-  bool constant_index = rl_index.is_const;
-  int32_t constant_index_value = 0;
-  if (!constant_index) {
-    rl_index = LoadValue(rl_index, kCoreReg);
-  } else {
-    constant_index_value = mir_graph_->ConstantValue(rl_index);
-    // If index is constant, just fold it into the data offset
-    data_offset += constant_index_value << scale;
-    // treat as non array below
-    rl_index.reg = RegStorage::InvalidReg();
-  }
-
-  /* null object? */
-  GenNullCheck(rl_array.reg, opt_flags);
-
-  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
-    if (constant_index) {
-      GenArrayBoundsCheck(constant_index_value, rl_array.reg, len_offset);
-    } else {
-      GenArrayBoundsCheck(rl_index.reg, rl_array.reg, len_offset);
-    }
-  }
-  rl_result = EvalLoc(rl_dest, reg_class, true);
-  LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg, size);
-  if ((size == k64) || (size == kDouble)) {
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    StoreValue(rl_dest, rl_result);
-  }
-}
-
-/*
- * Generate array store
- *
- */
-void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
-                             RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
-  RegisterClass reg_class = RegClassForFieldLoadStore(size, false);
-  int len_offset = mirror::Array::LengthOffset().Int32Value();
-  int data_offset;
-
-  if (size == k64 || size == kDouble) {
-    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
-  } else {
-    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
-  }
-
-  rl_array = LoadValue(rl_array, kRefReg);
-  bool constant_index = rl_index.is_const;
-  int32_t constant_index_value = 0;
-  if (!constant_index) {
-    rl_index = LoadValue(rl_index, kCoreReg);
-  } else {
-    // If index is constant, just fold it into the data offset
-    constant_index_value = mir_graph_->ConstantValue(rl_index);
-    data_offset += constant_index_value << scale;
-    // treat as non array below
-    rl_index.reg = RegStorage::InvalidReg();
-  }
-
-  /* null object? */
-  GenNullCheck(rl_array.reg, opt_flags);
-
-  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
-    if (constant_index) {
-      GenArrayBoundsCheck(constant_index_value, rl_array.reg, len_offset);
-    } else {
-      GenArrayBoundsCheck(rl_index.reg, rl_array.reg, len_offset);
-    }
-  }
-  if ((size == k64) || (size == kDouble)) {
-    rl_src = LoadValueWide(rl_src, reg_class);
-  } else {
-    rl_src = LoadValue(rl_src, reg_class);
-  }
-  // If the src reg can't be byte accessed, move it to a temp first.
-  if ((size == kSignedByte || size == kUnsignedByte) && !IsByteRegister(rl_src.reg)) {
-    RegStorage temp = AllocTemp();
-    OpRegCopy(temp, rl_src.reg);
-    StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, temp, size, opt_flags);
-  } else {
-    StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg, size, opt_flags);
-  }
-  if (card_mark) {
-    // Free rl_index if its a temp. Ensures there are 2 free regs for card mark.
-    if (!constant_index) {
-      FreeTemp(rl_index.reg);
-    }
-    MarkGCCard(opt_flags, rl_src.reg, rl_array.reg);
-  }
-}
-
-RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
-                                          RegLocation rl_dest,
-                                          RegLocation rl_src,
-                                          int shift_amount,
-                                          int flags ATTRIBUTE_UNUSED) {
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  if (cu_->target64) {
-    OpKind op = static_cast<OpKind>(0);    /* Make gcc happy */
-    switch (opcode) {
-      case Instruction::SHL_LONG:
-      case Instruction::SHL_LONG_2ADDR:
-        op = kOpLsl;
-        break;
-      case Instruction::SHR_LONG:
-      case Instruction::SHR_LONG_2ADDR:
-        op = kOpAsr;
-        break;
-      case Instruction::USHR_LONG:
-      case Instruction::USHR_LONG_2ADDR:
-        op = kOpLsr;
-        break;
-      default:
-        LOG(FATAL) << "Unexpected case";
-    }
-    OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
-  } else {
-    switch (opcode) {
-      case Instruction::SHL_LONG:
-      case Instruction::SHL_LONG_2ADDR:
-        DCHECK_NE(shift_amount, 1);  // Prevent a double store from happening.
-        if (shift_amount == 32) {
-          OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetLow());
-          LoadConstant(rl_result.reg.GetLow(), 0);
-        } else if (shift_amount > 31) {
-          OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetLow());
-          NewLIR2(kX86Sal32RI, rl_result.reg.GetHighReg(), shift_amount - 32);
-          LoadConstant(rl_result.reg.GetLow(), 0);
-        } else {
-          OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow());
-          OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
-          NewLIR3(kX86Shld32RRI, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(),
-                  shift_amount);
-          NewLIR2(kX86Sal32RI, rl_result.reg.GetLowReg(), shift_amount);
-        }
-        break;
-      case Instruction::SHR_LONG:
-      case Instruction::SHR_LONG_2ADDR:
-        if (shift_amount == 32) {
-          OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
-          OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
-          NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
-        } else if (shift_amount > 31) {
-          OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
-          OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
-          NewLIR2(kX86Sar32RI, rl_result.reg.GetLowReg(), shift_amount - 32);
-          NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
-        } else {
-          OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow());
-          OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
-          NewLIR3(kX86Shrd32RRI, rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg(),
-                  shift_amount);
-          NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), shift_amount);
-        }
-        break;
-      case Instruction::USHR_LONG:
-      case Instruction::USHR_LONG_2ADDR:
-        if (shift_amount == 32) {
-          OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
-          LoadConstant(rl_result.reg.GetHigh(), 0);
-        } else if (shift_amount > 31) {
-          OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
-          NewLIR2(kX86Shr32RI, rl_result.reg.GetLowReg(), shift_amount - 32);
-          LoadConstant(rl_result.reg.GetHigh(), 0);
-        } else {
-          OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow());
-          OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
-          NewLIR3(kX86Shrd32RRI, rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg(),
-                  shift_amount);
-          NewLIR2(kX86Shr32RI, rl_result.reg.GetHighReg(), shift_amount);
-        }
-        break;
-      default:
-        LOG(FATAL) << "Unexpected case";
-    }
-  }
-  return rl_result;
-}
-
-void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                                   RegLocation rl_src, RegLocation rl_shift, int flags) {
-  // Per spec, we only care about low 6 bits of shift amount.
-  int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
-  if (shift_amount == 0) {
-    rl_src = LoadValueWide(rl_src, kCoreReg);
-    StoreValueWide(rl_dest, rl_src);
-    return;
-  } else if (shift_amount == 1 &&
-            (opcode ==  Instruction::SHL_LONG || opcode == Instruction::SHL_LONG_2ADDR)) {
-    // Need to handle this here to avoid calling StoreValueWide twice.
-    GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src, flags);
-    return;
-  }
-  if (PartiallyIntersects(rl_src, rl_dest)) {
-    GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
-    return;
-  }
-  rl_src = LoadValueWide(rl_src, kCoreReg);
-  RegLocation rl_result = GenShiftImmOpLong(opcode, rl_dest, rl_src, shift_amount, flags);
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::GenArithImmOpLong(Instruction::Code opcode,
-                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                                   int flags) {
-  bool isConstSuccess = false;
-  switch (opcode) {
-    case Instruction::ADD_LONG:
-    case Instruction::AND_LONG:
-    case Instruction::OR_LONG:
-    case Instruction::XOR_LONG:
-      if (rl_src2.is_const) {
-        isConstSuccess = GenLongLongImm(rl_dest, rl_src1, rl_src2, opcode);
-      } else {
-        DCHECK(rl_src1.is_const);
-        isConstSuccess = GenLongLongImm(rl_dest, rl_src2, rl_src1, opcode);
-      }
-      break;
-    case Instruction::SUB_LONG:
-    case Instruction::SUB_LONG_2ADDR:
-      if (rl_src2.is_const) {
-        isConstSuccess = GenLongLongImm(rl_dest, rl_src1, rl_src2, opcode);
-      } else {
-        GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-        isConstSuccess = true;
-      }
-      break;
-    case Instruction::ADD_LONG_2ADDR:
-    case Instruction::OR_LONG_2ADDR:
-    case Instruction::XOR_LONG_2ADDR:
-    case Instruction::AND_LONG_2ADDR:
-      if (rl_src2.is_const) {
-        if (GenerateTwoOperandInstructions()) {
-          isConstSuccess = GenLongImm(rl_dest, rl_src2, opcode);
-        } else {
-          isConstSuccess = GenLongLongImm(rl_dest, rl_src1, rl_src2, opcode);
-        }
-      } else {
-        DCHECK(rl_src1.is_const);
-        isConstSuccess = GenLongLongImm(rl_dest, rl_src2, rl_src1, opcode);
-      }
-      break;
-    default:
-      isConstSuccess = false;
-      break;
-  }
-
-  if (!isConstSuccess) {
-    // Default - bail to non-const handler.
-    GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-  }
-}
-
-bool X86Mir2Lir::IsNoOp(Instruction::Code op, int32_t value) {
-  switch (op) {
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::AND_LONG:
-      return value == -1;
-    case Instruction::OR_LONG:
-    case Instruction::OR_LONG_2ADDR:
-    case Instruction::XOR_LONG:
-    case Instruction::XOR_LONG_2ADDR:
-      return value == 0;
-    default:
-      return false;
-  }
-}
-
-X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation dest, RegLocation rhs,
-                                bool is_high_op) {
-  bool rhs_in_mem = rhs.location != kLocPhysReg;
-  bool dest_in_mem = dest.location != kLocPhysReg;
-  bool is64Bit = cu_->target64;
-  DCHECK(!rhs_in_mem || !dest_in_mem);
-  switch (op) {
-    case Instruction::ADD_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-      if (dest_in_mem) {
-        return is64Bit ? kX86Add64MR : is_high_op ? kX86Adc32MR : kX86Add32MR;
-      } else if (rhs_in_mem) {
-        return is64Bit ? kX86Add64RM : is_high_op ? kX86Adc32RM : kX86Add32RM;
-      }
-      return is64Bit ? kX86Add64RR : is_high_op ? kX86Adc32RR : kX86Add32RR;
-    case Instruction::SUB_LONG:
-    case Instruction::SUB_LONG_2ADDR:
-      if (dest_in_mem) {
-        return is64Bit ? kX86Sub64MR : is_high_op ? kX86Sbb32MR : kX86Sub32MR;
-      } else if (rhs_in_mem) {
-        return is64Bit ? kX86Sub64RM : is_high_op ? kX86Sbb32RM : kX86Sub32RM;
-      }
-      return is64Bit ? kX86Sub64RR : is_high_op ? kX86Sbb32RR : kX86Sub32RR;
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::AND_LONG:
-      if (dest_in_mem) {
-        return is64Bit ? kX86And64MR : kX86And32MR;
-      }
-      if (is64Bit) {
-        return rhs_in_mem ? kX86And64RM : kX86And64RR;
-      }
-      return rhs_in_mem ? kX86And32RM : kX86And32RR;
-    case Instruction::OR_LONG:
-    case Instruction::OR_LONG_2ADDR:
-      if (dest_in_mem) {
-        return is64Bit ? kX86Or64MR : kX86Or32MR;
-      }
-      if (is64Bit) {
-        return rhs_in_mem ? kX86Or64RM : kX86Or64RR;
-      }
-      return rhs_in_mem ? kX86Or32RM : kX86Or32RR;
-    case Instruction::XOR_LONG:
-    case Instruction::XOR_LONG_2ADDR:
-      if (dest_in_mem) {
-        return is64Bit ? kX86Xor64MR : kX86Xor32MR;
-      }
-      if (is64Bit) {
-        return rhs_in_mem ? kX86Xor64RM : kX86Xor64RR;
-      }
-      return rhs_in_mem ? kX86Xor32RM : kX86Xor32RR;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << op;
-      return kX86Add32RR;
-  }
-}
-
-X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation loc, bool is_high_op,
-                                int32_t value) {
-  bool in_mem = loc.location != kLocPhysReg;
-  bool is64Bit = cu_->target64;
-  bool byte_imm = IS_SIMM8(value);
-  DCHECK(in_mem || !loc.reg.IsFloat());
-  switch (op) {
-    case Instruction::ADD_LONG:
-    case Instruction::ADD_LONG_2ADDR:
-      if (byte_imm) {
-        if (in_mem) {
-          return is64Bit ? kX86Add64MI8 : is_high_op ? kX86Adc32MI8 : kX86Add32MI8;
-        }
-        return is64Bit ? kX86Add64RI8 : is_high_op ? kX86Adc32RI8 : kX86Add32RI8;
-      }
-      if (in_mem) {
-        return is64Bit ? kX86Add64MI : is_high_op ? kX86Adc32MI : kX86Add32MI;
-      }
-      return is64Bit ? kX86Add64RI : is_high_op ? kX86Adc32RI : kX86Add32RI;
-    case Instruction::SUB_LONG:
-    case Instruction::SUB_LONG_2ADDR:
-      if (byte_imm) {
-        if (in_mem) {
-          return is64Bit ? kX86Sub64MI8 : is_high_op ? kX86Sbb32MI8 : kX86Sub32MI8;
-        }
-        return is64Bit ? kX86Sub64RI8 : is_high_op ? kX86Sbb32RI8 : kX86Sub32RI8;
-      }
-      if (in_mem) {
-        return is64Bit ? kX86Sub64MI : is_high_op ? kX86Sbb32MI : kX86Sub32MI;
-      }
-      return is64Bit ? kX86Sub64RI : is_high_op ? kX86Sbb32RI : kX86Sub32RI;
-    case Instruction::AND_LONG_2ADDR:
-    case Instruction::AND_LONG:
-      if (byte_imm) {
-        if (is64Bit) {
-          return in_mem ? kX86And64MI8 : kX86And64RI8;
-        }
-        return in_mem ? kX86And32MI8 : kX86And32RI8;
-      }
-      if (is64Bit) {
-        return in_mem ? kX86And64MI : kX86And64RI;
-      }
-      return in_mem ? kX86And32MI : kX86And32RI;
-    case Instruction::OR_LONG:
-    case Instruction::OR_LONG_2ADDR:
-      if (byte_imm) {
-        if (is64Bit) {
-          return in_mem ? kX86Or64MI8 : kX86Or64RI8;
-        }
-        return in_mem ? kX86Or32MI8 : kX86Or32RI8;
-      }
-      if (is64Bit) {
-        return in_mem ? kX86Or64MI : kX86Or64RI;
-      }
-      return in_mem ? kX86Or32MI : kX86Or32RI;
-    case Instruction::XOR_LONG:
-    case Instruction::XOR_LONG_2ADDR:
-      if (byte_imm) {
-        if (is64Bit) {
-          return in_mem ? kX86Xor64MI8 : kX86Xor64RI8;
-        }
-        return in_mem ? kX86Xor32MI8 : kX86Xor32RI8;
-      }
-      if (is64Bit) {
-        return in_mem ? kX86Xor64MI : kX86Xor64RI;
-      }
-      return in_mem ? kX86Xor32MI : kX86Xor32RI;
-    default:
-      LOG(FATAL) << "Unexpected opcode: " << op;
-      UNREACHABLE();
-  }
-}
-
-bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op) {
-  DCHECK(rl_src.is_const);
-  int64_t val = mir_graph_->ConstantValueWide(rl_src);
-
-  if (cu_->target64) {
-    // We can do with imm only if it fits 32 bit
-    if (val != (static_cast<int64_t>(static_cast<int32_t>(val)))) {
-      return false;
-    }
-
-    rl_dest = UpdateLocWideTyped(rl_dest);
-
-    if ((rl_dest.location == kLocDalvikFrame) ||
-        (rl_dest.location == kLocCompilerTemp)) {
-      int r_base = rs_rX86_SP_32.GetReg();
-      int displacement = SRegOffset(rl_dest.s_reg_low);
-
-      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-      X86OpCode x86op = GetOpcode(op, rl_dest, false, val);
-      LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val);
-      AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
-                              true /* is_load */, true /* is64bit */);
-      AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
-                              false /* is_load */, true /* is64bit */);
-      return true;
-    }
-
-    RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    DCHECK_EQ(rl_result.location, kLocPhysReg);
-    DCHECK(!rl_result.reg.IsFloat());
-
-    X86OpCode x86op = GetOpcode(op, rl_result, false, val);
-    NewLIR2(x86op, rl_result.reg.GetReg(), val);
-
-    StoreValueWide(rl_dest, rl_result);
-    return true;
-  }
-
-  int32_t val_lo = Low32Bits(val);
-  int32_t val_hi = High32Bits(val);
-  rl_dest = UpdateLocWideTyped(rl_dest);
-
-  // Can we just do this into memory?
-  if ((rl_dest.location == kLocDalvikFrame) ||
-      (rl_dest.location == kLocCompilerTemp)) {
-    int r_base = rs_rX86_SP_32.GetReg();
-    int displacement = SRegOffset(rl_dest.s_reg_low);
-
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    if (!IsNoOp(op, val_lo)) {
-      X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
-      LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val_lo);
-      AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
-                              true /* is_load */, true /* is64bit */);
-      AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
-                              false /* is_load */, true /* is64bit */);
-    }
-    if (!IsNoOp(op, val_hi)) {
-      X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi);
-      LIR *lir = NewLIR3(x86op, r_base, displacement + HIWORD_OFFSET, val_hi);
-      AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
-                                true /* is_load */, true /* is64bit */);
-      AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
-                                false /* is_load */, true /* is64bit */);
-    }
-    return true;
-  }
-
-  RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-  DCHECK_EQ(rl_result.location, kLocPhysReg);
-  DCHECK(!rl_result.reg.IsFloat());
-
-  if (!IsNoOp(op, val_lo)) {
-    X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
-    NewLIR2(x86op, rl_result.reg.GetLowReg(), val_lo);
-  }
-  if (!IsNoOp(op, val_hi)) {
-    X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
-    NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi);
-  }
-  StoreValueWide(rl_dest, rl_result);
-  return true;
-}
-
-bool X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1,
-                                RegLocation rl_src2, Instruction::Code op) {
-  DCHECK(rl_src2.is_const);
-  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-
-  if (cu_->target64) {
-    // We can do with imm only if it fits 32 bit
-    if (val != (static_cast<int64_t>(static_cast<int32_t>(val)))) {
-      return false;
-    }
-    if (rl_dest.location == kLocPhysReg &&
-        rl_src1.location == kLocPhysReg && !rl_dest.reg.IsFloat()) {
-      X86OpCode x86op = GetOpcode(op, rl_dest, false, val);
-      OpRegCopy(rl_dest.reg, rl_src1.reg);
-      NewLIR2(x86op, rl_dest.reg.GetReg(), val);
-      StoreFinalValueWide(rl_dest, rl_dest);
-      return true;
-    }
-
-    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-    // We need the values to be in a temporary
-    RegLocation rl_result = ForceTempWide(rl_src1);
-
-    X86OpCode x86op = GetOpcode(op, rl_result, false, val);
-    NewLIR2(x86op, rl_result.reg.GetReg(), val);
-
-    StoreFinalValueWide(rl_dest, rl_result);
-    return true;
-  }
-
-  int32_t val_lo = Low32Bits(val);
-  int32_t val_hi = High32Bits(val);
-  rl_dest = UpdateLocWideTyped(rl_dest);
-  rl_src1 = UpdateLocWideTyped(rl_src1);
-
-  // Can we do this directly into the destination registers?
-  if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
-      rl_dest.reg.GetLowReg() == rl_src1.reg.GetLowReg() &&
-      rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() && !rl_dest.reg.IsFloat()) {
-    if (!IsNoOp(op, val_lo)) {
-      X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
-      NewLIR2(x86op, rl_dest.reg.GetLowReg(), val_lo);
-    }
-    if (!IsNoOp(op, val_hi)) {
-      X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi);
-      NewLIR2(x86op, rl_dest.reg.GetHighReg(), val_hi);
-    }
-
-    StoreFinalValueWide(rl_dest, rl_dest);
-    return true;
-  }
-
-  rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-  DCHECK_EQ(rl_src1.location, kLocPhysReg);
-
-  // We need the values to be in a temporary
-  RegLocation rl_result = ForceTempWide(rl_src1);
-  if (!IsNoOp(op, val_lo)) {
-    X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
-    NewLIR2(x86op, rl_result.reg.GetLowReg(), val_lo);
-  }
-  if (!IsNoOp(op, val_hi)) {
-    X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
-    NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi);
-  }
-
-  StoreFinalValueWide(rl_dest, rl_result);
-  return true;
-}
-
-// For final classes there are no sub-classes to check and so we can answer the instance-of
-// question with simple comparisons. Use compares to memory and SETEQ to optimize for x86.
-void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
-                                    RegLocation rl_dest, RegLocation rl_src) {
-  RegLocation object = LoadValue(rl_src, kRefReg);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegStorage result_reg = rl_result.reg;
-
-  // For 32-bit, SETcc only works with EAX..EDX.
-  RegStorage object_32reg = object.reg.Is64Bit() ? As32BitReg(object.reg) : object.reg;
-  if (result_reg.GetRegNum() == object_32reg.GetRegNum() || !IsByteRegister(result_reg)) {
-    result_reg = AllocateByteRegister();
-  }
-
-  // Assume that there is no match.
-  LoadConstant(result_reg, 0);
-  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
-
-  // We will use this register to compare to memory below.
-  // References are 32 bit in memory, and 64 bit in registers (in 64 bit mode).
-  // For this reason, force allocation of a 32 bit register to use, so that the
-  // compare to memory will be done using a 32 bit comparision.
-  // The LoadRefDisp(s) below will work normally, even in 64 bit mode.
-  RegStorage check_class = AllocTemp();
-
-  if (use_declaring_class) {
-    RegStorage r_method = LoadCurrMethodWithHint(check_class);
-    LoadRefDisp(r_method, ArtMethod::DeclaringClassOffset().Int32Value(),
-                check_class, kNotVolatile);
-  } else {
-    LoadTypeFromCache(type_idx, check_class);
-  }
-
-  // Compare the computed class to the class in the object.
-  DCHECK_EQ(object.location, kLocPhysReg);
-  OpRegMem(kOpCmp, check_class, object.reg, mirror::Object::ClassOffset().Int32Value());
-
-  // Set the low byte of the result to 0 or 1 from the compare condition code.
-  NewLIR2(kX86Set8R, result_reg.GetReg(), kX86CondEq);
-
-  LIR* target = NewLIR0(kPseudoTargetLabel);
-  null_branchover->target = target;
-  FreeTemp(check_class);
-  if (IsTemp(result_reg)) {
-    OpRegCopy(rl_result.reg, result_reg);
-    FreeTemp(result_reg);
-  }
-  StoreValue(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
-                               RegLocation rl_lhs, RegLocation rl_rhs, int flags) {
-  OpKind op = kOpBkpt;
-  bool is_div_rem = false;
-  bool unary = false;
-  bool shift_op = false;
-  bool is_two_addr = false;
-  RegLocation rl_result;
-  switch (opcode) {
-    case Instruction::NEG_INT:
-      op = kOpNeg;
-      unary = true;
-      break;
-    case Instruction::NOT_INT:
-      op = kOpMvn;
-      unary = true;
-      break;
-    case Instruction::ADD_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::ADD_INT:
-      op = kOpAdd;
-      break;
-    case Instruction::SUB_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::SUB_INT:
-      op = kOpSub;
-      break;
-    case Instruction::MUL_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::MUL_INT:
-      op = kOpMul;
-      break;
-    case Instruction::DIV_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::DIV_INT:
-      op = kOpDiv;
-      is_div_rem = true;
-      break;
-    /* NOTE: returns in kArg1 */
-    case Instruction::REM_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::REM_INT:
-      op = kOpRem;
-      is_div_rem = true;
-      break;
-    case Instruction::AND_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::AND_INT:
-      op = kOpAnd;
-      break;
-    case Instruction::OR_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::OR_INT:
-      op = kOpOr;
-      break;
-    case Instruction::XOR_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::XOR_INT:
-      op = kOpXor;
-      break;
-    case Instruction::SHL_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::SHL_INT:
-      shift_op = true;
-      op = kOpLsl;
-      break;
-    case Instruction::SHR_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::SHR_INT:
-      shift_op = true;
-      op = kOpAsr;
-      break;
-    case Instruction::USHR_INT_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::USHR_INT:
-      shift_op = true;
-      op = kOpLsr;
-      break;
-    default:
-      LOG(FATAL) << "Invalid word arith op: " << opcode;
-  }
-
-  // Can we convert to a two address instruction?
-  if (!is_two_addr &&
-        (mir_graph_->SRegToVReg(rl_dest.s_reg_low) ==
-         mir_graph_->SRegToVReg(rl_lhs.s_reg_low))) {
-    is_two_addr = true;
-  }
-
-  if (!GenerateTwoOperandInstructions()) {
-    is_two_addr = false;
-  }
-
-  // Get the div/rem stuff out of the way.
-  if (is_div_rem) {
-    rl_result = GenDivRem(rl_dest, rl_lhs, rl_rhs, op == kOpDiv, flags);
-    StoreValue(rl_dest, rl_result);
-    return;
-  }
-
-  // If we generate any memory access below, it will reference a dalvik reg.
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-
-  if (unary) {
-    rl_lhs = LoadValue(rl_lhs, kCoreReg);
-    rl_result = UpdateLocTyped(rl_dest);
-    rl_result = EvalLoc(rl_dest, kCoreReg, true);
-    OpRegReg(op, rl_result.reg, rl_lhs.reg);
-  } else {
-    if (shift_op) {
-      // X86 doesn't require masking and must use ECX.
-      RegStorage t_reg = TargetReg(kCount, kNotWide);  // rCX
-      LoadValueDirectFixed(rl_rhs, t_reg);
-      if (is_two_addr) {
-        // Can we do this directly into memory?
-        rl_result = UpdateLocTyped(rl_dest);
-        if (rl_result.location != kLocPhysReg) {
-          // Okay, we can do this into memory
-          OpMemReg(op, rl_result, t_reg.GetReg());
-          FreeTemp(t_reg);
-          return;
-        } else if (!rl_result.reg.IsFloat()) {
-          // Can do this directly into the result register
-          OpRegReg(op, rl_result.reg, t_reg);
-          FreeTemp(t_reg);
-          StoreFinalValue(rl_dest, rl_result);
-          return;
-        }
-      }
-      // Three address form, or we can't do directly.
-      rl_lhs = LoadValue(rl_lhs, kCoreReg);
-      rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      OpRegRegReg(op, rl_result.reg, rl_lhs.reg, t_reg);
-      FreeTemp(t_reg);
-    } else {
-      // Multiply is 3 operand only (sort of).
-      if (is_two_addr && op != kOpMul) {
-        // Can we do this directly into memory?
-        rl_result = UpdateLocTyped(rl_dest);
-        if (rl_result.location == kLocPhysReg) {
-          // Ensure res is in a core reg
-          rl_result = EvalLoc(rl_dest, kCoreReg, true);
-          // Can we do this from memory directly?
-          rl_rhs = UpdateLocTyped(rl_rhs);
-          if (rl_rhs.location != kLocPhysReg) {
-            OpRegMem(op, rl_result.reg, rl_rhs);
-            StoreFinalValue(rl_dest, rl_result);
-            return;
-          } else if (!rl_rhs.reg.IsFloat()) {
-            OpRegReg(op, rl_result.reg, rl_rhs.reg);
-            StoreFinalValue(rl_dest, rl_result);
-            return;
-          }
-        }
-        rl_rhs = LoadValue(rl_rhs, kCoreReg);
-        // It might happen rl_rhs and rl_dest are the same VR
-        // in this case rl_dest is in reg after LoadValue while
-        // rl_result is not updated yet, so do this
-        rl_result = UpdateLocTyped(rl_dest);
-        if (rl_result.location != kLocPhysReg) {
-          // Okay, we can do this into memory.
-          OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
-          return;
-        } else if (!rl_result.reg.IsFloat()) {
-          // Can do this directly into the result register.
-          OpRegReg(op, rl_result.reg, rl_rhs.reg);
-          StoreFinalValue(rl_dest, rl_result);
-          return;
-        } else {
-          rl_lhs = LoadValue(rl_lhs, kCoreReg);
-          rl_result = EvalLoc(rl_dest, kCoreReg, true);
-          OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg);
-        }
-      } else {
-        // Try to use reg/memory instructions.
-        rl_lhs = UpdateLocTyped(rl_lhs);
-        rl_rhs = UpdateLocTyped(rl_rhs);
-        // We can't optimize with FP registers.
-        if (!IsOperationSafeWithoutTemps(rl_lhs, rl_rhs)) {
-          // Something is difficult, so fall back to the standard case.
-          rl_lhs = LoadValue(rl_lhs, kCoreReg);
-          rl_rhs = LoadValue(rl_rhs, kCoreReg);
-          rl_result = EvalLoc(rl_dest, kCoreReg, true);
-          OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg);
-        } else {
-          // We can optimize by moving to result and using memory operands.
-          if (rl_rhs.location != kLocPhysReg) {
-            // Force LHS into result.
-            // We should be careful with order here
-            // If rl_dest and rl_lhs points to the same VR we should load first
-            // If the are different we should find a register first for dest
-            if (mir_graph_->SRegToVReg(rl_dest.s_reg_low) ==
-                mir_graph_->SRegToVReg(rl_lhs.s_reg_low)) {
-              rl_lhs = LoadValue(rl_lhs, kCoreReg);
-              rl_result = EvalLoc(rl_dest, kCoreReg, true);
-              // No-op if these are the same.
-              OpRegCopy(rl_result.reg, rl_lhs.reg);
-            } else {
-              rl_result = EvalLoc(rl_dest, kCoreReg, true);
-              LoadValueDirect(rl_lhs, rl_result.reg);
-            }
-            OpRegMem(op, rl_result.reg, rl_rhs);
-          } else if (rl_lhs.location != kLocPhysReg) {
-            // RHS is in a register; LHS is in memory.
-            if (op != kOpSub) {
-              // Force RHS into result and operate on memory.
-              rl_result = EvalLoc(rl_dest, kCoreReg, true);
-              OpRegCopy(rl_result.reg, rl_rhs.reg);
-              OpRegMem(op, rl_result.reg, rl_lhs);
-            } else {
-              // Subtraction isn't commutative.
-              rl_lhs = LoadValue(rl_lhs, kCoreReg);
-              rl_rhs = LoadValue(rl_rhs, kCoreReg);
-              rl_result = EvalLoc(rl_dest, kCoreReg, true);
-              OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg);
-            }
-          } else {
-            // Both are in registers.
-            rl_lhs = LoadValue(rl_lhs, kCoreReg);
-            rl_rhs = LoadValue(rl_rhs, kCoreReg);
-            rl_result = EvalLoc(rl_dest, kCoreReg, true);
-            OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg);
-          }
-        }
-      }
-    }
-  }
-  StoreValue(rl_dest, rl_result);
-}
-
-bool X86Mir2Lir::IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs) {
-  // If we have non-core registers, then we can't do good things.
-  if (rl_lhs.location == kLocPhysReg && rl_lhs.reg.IsFloat()) {
-    return false;
-  }
-  if (rl_rhs.location == kLocPhysReg && rl_rhs.reg.IsFloat()) {
-    return false;
-  }
-
-  // Everything will be fine :-).
-  return true;
-}
-
-void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
-  if (!cu_->target64) {
-    Mir2Lir::GenIntToLong(rl_dest, rl_src);
-    return;
-  }
-  rl_src = UpdateLocTyped(rl_src);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (rl_src.location == kLocPhysReg) {
-    NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
-  } else {
-    int displacement = SRegOffset(rl_src.s_reg_low);
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    LIR *m = NewLIR3(kX86MovsxdRM, rl_result.reg.GetReg(), rs_rX86_SP_32.GetReg(),
-                     displacement + LOWORD_OFFSET);
-    AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
-                            true /* is_load */, true /* is_64bit */);
-  }
-  StoreValueWide(rl_dest, rl_result);
-}
-
-void X86Mir2Lir::GenLongToInt(RegLocation rl_dest, RegLocation rl_src) {
-  rl_src = UpdateLocWide(rl_src);
-  rl_src = NarrowRegLoc(rl_src);
-  StoreValue(rl_dest, rl_src);
-
-  if (cu_->target64) {
-    // if src and dest are in the same phys reg then StoreValue generates
-    // no operation but we need explicit 32-bit mov R, R to clear
-    // the higher 32-bits
-    rl_dest = UpdateLoc(rl_dest);
-    if (rl_src.location == kLocPhysReg && rl_dest.location == kLocPhysReg
-           && IsSameReg(rl_src.reg, rl_dest.reg)) {
-        LIR* copy_lir = OpRegCopyNoInsert(rl_dest.reg, rl_dest.reg);
-        // remove nop flag set by OpRegCopyNoInsert if src == dest
-        copy_lir->flags.is_nop = false;
-        AppendLIR(copy_lir);
-    }
-  }
-}
-
-void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                        RegLocation rl_src1, RegLocation rl_shift) {
-  if (!cu_->target64) {
-    // Long shift operations in 32-bit. Use shld or shrd to create a 32-bit register filled from
-    // the other half, shift the other half, if the shift amount is less than 32 we're done,
-    // otherwise move one register to the other and place zero or sign bits in the other.
-    LIR* branch;
-    FlushAllRegs();
-    LockCallTemps();
-    LoadValueDirectFixed(rl_shift, rs_rCX);
-    RegStorage r_tmp = RegStorage::MakeRegPair(rs_rAX, rs_rDX);
-    LoadValueDirectWideFixed(rl_src1, r_tmp);
-    switch (opcode) {
-      case Instruction::SHL_LONG:
-      case Instruction::SHL_LONG_2ADDR:
-        NewLIR3(kX86Shld32RRC, r_tmp.GetHighReg(), r_tmp.GetLowReg(), rs_rCX.GetReg());
-        NewLIR2(kX86Sal32RC, r_tmp.GetLowReg(), rs_rCX.GetReg());
-        NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
-        branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
-        OpRegCopy(r_tmp.GetHigh(), r_tmp.GetLow());
-        LoadConstant(r_tmp.GetLow(), 0);
-        branch->target = NewLIR0(kPseudoTargetLabel);
-        break;
-      case Instruction::SHR_LONG:
-      case Instruction::SHR_LONG_2ADDR:
-        NewLIR3(kX86Shrd32RRC, r_tmp.GetLowReg(), r_tmp.GetHighReg(), rs_rCX.GetReg());
-        NewLIR2(kX86Sar32RC, r_tmp.GetHighReg(), rs_rCX.GetReg());
-        NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
-        branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
-        OpRegCopy(r_tmp.GetLow(), r_tmp.GetHigh());
-        NewLIR2(kX86Sar32RI, r_tmp.GetHighReg(), 31);
-        branch->target = NewLIR0(kPseudoTargetLabel);
-        break;
-      case Instruction::USHR_LONG:
-      case Instruction::USHR_LONG_2ADDR:
-        NewLIR3(kX86Shrd32RRC, r_tmp.GetLowReg(), r_tmp.GetHighReg(),
-               rs_rCX.GetReg());
-        NewLIR2(kX86Shr32RC, r_tmp.GetHighReg(), rs_rCX.GetReg());
-        NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
-        branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
-        OpRegCopy(r_tmp.GetLow(), r_tmp.GetHigh());
-        LoadConstant(r_tmp.GetHigh(), 0);
-        branch->target = NewLIR0(kPseudoTargetLabel);
-        break;
-      default:
-        LOG(FATAL) << "Unexpected case: " << opcode;
-        return;
-    }
-    RegLocation rl_result = LocCReturnWide();
-    StoreValueWide(rl_dest, rl_result);
-    return;
-  }
-
-  bool is_two_addr = false;
-  OpKind op = kOpBkpt;
-  RegLocation rl_result;
-
-  switch (opcode) {
-    case Instruction::SHL_LONG_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::SHL_LONG:
-      op = kOpLsl;
-      break;
-    case Instruction::SHR_LONG_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::SHR_LONG:
-      op = kOpAsr;
-      break;
-    case Instruction::USHR_LONG_2ADDR:
-      is_two_addr = true;
-      FALLTHROUGH_INTENDED;
-    case Instruction::USHR_LONG:
-      op = kOpLsr;
-      break;
-    default:
-      op = kOpBkpt;
-  }
-
-  // X86 doesn't require masking and must use ECX.
-  RegStorage t_reg = TargetReg(kCount, kNotWide);  // rCX
-  LoadValueDirectFixed(rl_shift, t_reg);
-  if (is_two_addr) {
-    // Can we do this directly into memory?
-    rl_result = UpdateLocWideTyped(rl_dest);
-    if (rl_result.location != kLocPhysReg) {
-      // Okay, we can do this into memory
-      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-      OpMemReg(op, rl_result, t_reg.GetReg());
-    } else if (!rl_result.reg.IsFloat()) {
-      // Can do this directly into the result register
-      OpRegReg(op, rl_result.reg, t_reg);
-      StoreFinalValueWide(rl_dest, rl_result);
-    }
-  } else {
-    // Three address form, or we can't do directly.
-    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg);
-    StoreFinalValueWide(rl_dest, rl_result);
-  }
-
-  FreeTemp(t_reg);
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
deleted file mode 100644
index ff0ecea..0000000
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dex/quick/quick_compiler.h"
-#include "dex/pass_manager.h"
-#include "dex/verification_results.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "runtime/dex_file.h"
-#include "driver/compiler_options.h"
-#include "driver/compiler_driver.h"
-#include "codegen_x86.h"
-#include "gtest/gtest.h"
-#include "utils/assembler_test_base.h"
-
-namespace art {
-
-class QuickAssembleX86TestBase : public testing::Test {
- protected:
-  X86Mir2Lir* Prepare(InstructionSet target) {
-    isa_ = target;
-    pool_.reset(new ArenaPool());
-    compiler_options_.reset(new CompilerOptions(
-        CompilerOptions::kDefaultCompilerFilter,
-        CompilerOptions::kDefaultHugeMethodThreshold,
-        CompilerOptions::kDefaultLargeMethodThreshold,
-        CompilerOptions::kDefaultSmallMethodThreshold,
-        CompilerOptions::kDefaultTinyMethodThreshold,
-        CompilerOptions::kDefaultNumDexMethodsThreshold,
-        CompilerOptions::kDefaultInlineDepthLimit,
-        CompilerOptions::kDefaultInlineMaxCodeUnits,
-        nullptr,
-        false,
-        CompilerOptions::kDefaultTopKProfileThreshold,
-        false,
-        CompilerOptions::kDefaultGenerateDebugInfo,
-        false,
-        false,
-        false,
-        false,
-        nullptr,
-        nullptr,
-        false,
-        "",
-        false,
-        false));
-    verification_results_.reset(new VerificationResults(compiler_options_.get()));
-    method_inliner_map_.reset(new DexFileToMethodInlinerMap());
-    compiler_driver_.reset(new CompilerDriver(
-        compiler_options_.get(),
-        verification_results_.get(),
-        method_inliner_map_.get(),
-        Compiler::kQuick,
-        isa_,
-        /* instruction_set_features*/ nullptr,
-        /* boot_image */ false,
-        /* image_classes */ nullptr,
-        /* compiled_classes */ nullptr,
-        /* compiled_methods */ nullptr,
-        /* thread_count */ 0,
-        /* dump_stats */ false,
-        /* dump_passes */ false,
-        /* timer */ nullptr,
-        /* swap_fd */ -1,
-        /* profile_compilation_info */ nullptr));
-    cu_.reset(new CompilationUnit(pool_.get(), isa_, compiler_driver_.get(), nullptr));
-    DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
-        cu_->arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
-    memset(code_item, 0, sizeof(DexFile::CodeItem));
-    cu_->mir_graph.reset(new MIRGraph(cu_.get(), &cu_->arena));
-    cu_->mir_graph->current_code_item_ = code_item;
-    cu_->cg.reset(QuickCompiler::GetCodeGenerator(cu_.get(), nullptr));
-
-    test_helper_.reset(new AssemblerTestInfrastructure(
-        isa_ == kX86 ? "x86" : "x86_64",
-        "as",
-        isa_ == kX86 ? " --32" : "",
-        "objdump",
-        " -h",
-        "objdump",
-        isa_ == kX86 ?
-            " -D -bbinary -mi386 --no-show-raw-insn" :
-            " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn",
-        nullptr));
-
-    X86Mir2Lir* m2l = static_cast<X86Mir2Lir*>(cu_->cg.get());
-    m2l->CompilerInitializeRegAlloc();
-    return m2l;
-  }
-
-  void Release() {
-    cu_.reset();
-    compiler_driver_.reset();
-    method_inliner_map_.reset();
-    verification_results_.reset();
-    compiler_options_.reset();
-    pool_.reset();
-
-    test_helper_.reset();
-  }
-
-  void TearDown() OVERRIDE {
-    Release();
-  }
-
-  bool CheckTools(InstructionSet target) {
-    Prepare(target);
-    bool result = test_helper_->CheckTools();
-    Release();
-    return result;
-  }
-
-  std::unique_ptr<CompilationUnit> cu_;
-  std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
-
- private:
-  InstructionSet isa_;
-  std::unique_ptr<ArenaPool> pool_;
-  std::unique_ptr<CompilerOptions> compiler_options_;
-  std::unique_ptr<VerificationResults> verification_results_;
-  std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
-  std::unique_ptr<CompilerDriver> compiler_driver_;
-};
-
-class QuickAssembleX86LowLevelTest : public QuickAssembleX86TestBase {
- protected:
-  void Test(InstructionSet target, std::string test_name, std::string gcc_asm,
-            int opcode, int op0 = 0, int op1 = 0, int op2 = 0, int op3 = 0, int op4 = 0) {
-    X86Mir2Lir* m2l = Prepare(target);
-
-    LIR lir;
-    memset(&lir, 0, sizeof(LIR));
-    lir.opcode = opcode;
-    lir.operands[0] = op0;
-    lir.operands[1] = op1;
-    lir.operands[2] = op2;
-    lir.operands[3] = op3;
-    lir.operands[4] = op4;
-    lir.flags.size = m2l->GetInsnSize(&lir);
-
-    AssemblerStatus status = m2l->AssembleInstructions(&lir, 0);
-    // We don't expect a retry.
-    ASSERT_EQ(status, AssemblerStatus::kSuccess);
-
-    // Need a "base" std::vector.
-    std::vector<uint8_t> buffer(m2l->code_buffer_.begin(), m2l->code_buffer_.end());
-    test_helper_->Driver(buffer, gcc_asm, test_name);
-
-    Release();
-  }
-};
-
-TEST_F(QuickAssembleX86LowLevelTest, Addpd) {
-  Test(kX86, "Addpd", "addpd %xmm1, %xmm0\n", kX86AddpdRR,
-       RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
-  Test(kX86_64, "Addpd", "addpd %xmm1, %xmm0\n", kX86AddpdRR,
-       RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
-}
-
-TEST_F(QuickAssembleX86LowLevelTest, Subpd) {
-  Test(kX86, "Subpd", "subpd %xmm1, %xmm0\n", kX86SubpdRR,
-       RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
-  Test(kX86_64, "Subpd", "subpd %xmm1, %xmm0\n", kX86SubpdRR,
-       RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
-}
-
-TEST_F(QuickAssembleX86LowLevelTest, Mulpd) {
-  Test(kX86, "Mulpd", "mulpd %xmm1, %xmm0\n", kX86MulpdRR,
-       RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
-  Test(kX86_64, "Mulpd", "mulpd %xmm1, %xmm0\n", kX86MulpdRR,
-       RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
-}
-
-TEST_F(QuickAssembleX86LowLevelTest, Pextrw) {
-  Test(kX86, "Pextrw", "pextrw $7, %xmm3, 8(%eax)\n", kX86PextrwMRI,
-       RegStorage::Solo32(r0).GetReg(), 8, RegStorage::Solo128(3).GetReg(), 7);
-  Test(kX86_64, "Pextrw", "pextrw $7, %xmm8, 8(%r10)\n", kX86PextrwMRI,
-       RegStorage::Solo64(r10q).GetReg(), 8, RegStorage::Solo128(8).GetReg(), 7);
-}
-
-class QuickAssembleX86MacroTest : public QuickAssembleX86TestBase {
- protected:
-  typedef void (X86Mir2Lir::*AsmFn)(MIR*);
-
-  void TestVectorFn(InstructionSet target,
-                    Instruction::Code opcode,
-                    AsmFn f,
-                    std::string inst_string) {
-    X86Mir2Lir *m2l = Prepare(target);
-
-    // Create a vector MIR.
-    MIR* mir = cu_->mir_graph->NewMIR();
-    mir->dalvikInsn.opcode = opcode;
-    mir->dalvikInsn.vA = 0;  // Destination and source.
-    mir->dalvikInsn.vB = 1;  // Source.
-    int vector_size = 128;
-    int vector_type = kDouble;
-    mir->dalvikInsn.vC = (vector_type << 16) | vector_size;  // Type size.
-    (m2l->*f)(mir);
-    m2l->AssembleLIR();
-
-    std::string gcc_asm = inst_string + " %xmm1, %xmm0\n";
-    // Need a "base" std::vector.
-    std::vector<uint8_t> buffer(m2l->code_buffer_.begin(), m2l->code_buffer_.end());
-    test_helper_->Driver(buffer, gcc_asm, inst_string);
-
-    Release();
-  }
-
-  // Tests are member functions as many of the assembler functions are protected or private,
-  // and it would be inelegant to define ART_FRIEND_TEST for all the tests.
-
-  void TestAddpd() {
-    TestVectorFn(kX86,
-                 static_cast<Instruction::Code>(kMirOpPackedAddition),
-                 &X86Mir2Lir::GenAddVector,
-                 "addpd");
-    TestVectorFn(kX86_64,
-                 static_cast<Instruction::Code>(kMirOpPackedAddition),
-                 &X86Mir2Lir::GenAddVector,
-                 "addpd");
-  }
-
-  void TestSubpd() {
-    TestVectorFn(kX86,
-                 static_cast<Instruction::Code>(kMirOpPackedSubtract),
-                 &X86Mir2Lir::GenSubtractVector,
-                 "subpd");
-    TestVectorFn(kX86_64,
-                 static_cast<Instruction::Code>(kMirOpPackedSubtract),
-                 &X86Mir2Lir::GenSubtractVector,
-                 "subpd");
-  }
-
-  void TestMulpd() {
-    TestVectorFn(kX86,
-                 static_cast<Instruction::Code>(kMirOpPackedMultiply),
-                 &X86Mir2Lir::GenMultiplyVector,
-                 "mulpd");
-    TestVectorFn(kX86_64,
-                 static_cast<Instruction::Code>(kMirOpPackedMultiply),
-                 &X86Mir2Lir::GenMultiplyVector,
-                 "mulpd");
-  }
-};
-
-TEST_F(QuickAssembleX86MacroTest, CheckTools) {
-  ASSERT_TRUE(CheckTools(kX86)) << "x86 tools not found.";
-  ASSERT_TRUE(CheckTools(kX86_64)) << "x86_64 tools not found.";
-}
-
-#define DECLARE_TEST(name)             \
-  TEST_F(QuickAssembleX86MacroTest, name) { \
-    Test ## name();                    \
-  }
-
-DECLARE_TEST(Addpd)
-DECLARE_TEST(Subpd)
-DECLARE_TEST(Mulpd)
-
-}  // namespace art
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
deleted file mode 100755
index 4ff7993..0000000
--- a/compiler/dex/quick/x86/target_x86.cc
+++ /dev/null
@@ -1,2654 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_x86.h"
-
-#include <cstdarg>
-#include <inttypes.h>
-#include <string>
-
-#include "arch/x86/instruction_set_features_x86.h"
-#include "art_method.h"
-#include "backend_x86.h"
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "driver/compiler_driver.h"
-#include "mirror/array-inl.h"
-#include "mirror/string.h"
-#include "oat.h"
-#include "oat_quick_method_header.h"
-#include "x86_lir.h"
-
-namespace art {
-
-static constexpr RegStorage core_regs_arr_32[] = {
-    rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
-};
-static constexpr RegStorage core_regs_arr_64[] = {
-    rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
-    rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
-};
-static constexpr RegStorage core_regs_arr_64q[] = {
-    rs_r0q, rs_r1q, rs_r2q, rs_r3q, rs_rX86_SP_64, rs_r5q, rs_r6q, rs_r7q,
-    rs_r8q, rs_r9q, rs_r10q, rs_r11q, rs_r12q, rs_r13q, rs_r14q, rs_r15q
-};
-static constexpr RegStorage sp_regs_arr_32[] = {
-    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
-};
-static constexpr RegStorage sp_regs_arr_64[] = {
-    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
-    rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
-};
-static constexpr RegStorage dp_regs_arr_32[] = {
-    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
-};
-static constexpr RegStorage dp_regs_arr_64[] = {
-    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
-    rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
-};
-static constexpr RegStorage xp_regs_arr_32[] = {
-    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
-};
-static constexpr RegStorage xp_regs_arr_64[] = {
-    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
-    rs_xr8, rs_xr9, rs_xr10, rs_xr11, rs_xr12, rs_xr13, rs_xr14, rs_xr15
-};
-static constexpr RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32};
-static constexpr RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_32};
-static constexpr RegStorage reserved_regs_arr_64q[] = {rs_rX86_SP_64};
-static constexpr RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
-static constexpr RegStorage core_temps_arr_64[] = {
-    rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI,
-    rs_r8, rs_r9, rs_r10, rs_r11
-};
-
-// How to add register to be available for promotion:
-// 1) Remove register from array defining temp
-// 2) Update ClobberCallerSave
-// 3) Update JNI compiler ABI:
-// 3.1) add reg in JniCallingConvention method
-// 3.2) update CoreSpillMask/FpSpillMask
-// 4) Update entrypoints
-// 4.1) Update constants in asm_support_x86_64.h for new frame size
-// 4.2) Remove entry in SmashCallerSaves
-// 4.3) Update jni_entrypoints to spill/unspill new callee save reg
-// 4.4) Update quick_entrypoints to spill/unspill new callee save reg
-// 5) Update runtime ABI
-// 5.1) Update quick_method_frame_info with new required spills
-// 5.2) Update QuickArgumentVisitor with new offsets to gprs and xmms
-// Note that you cannot use register corresponding to incoming args
-// according to ABI and QCG needs one additional XMM temp for
-// bulk copy in preparation to call.
-static constexpr RegStorage core_temps_arr_64q[] = {
-    rs_r0q, rs_r1q, rs_r2q, rs_r6q, rs_r7q,
-    rs_r8q, rs_r9q, rs_r10q, rs_r11q
-};
-static constexpr RegStorage sp_temps_arr_32[] = {
-    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
-};
-static constexpr RegStorage sp_temps_arr_64[] = {
-    rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
-    rs_fr8, rs_fr9, rs_fr10, rs_fr11
-};
-static constexpr RegStorage dp_temps_arr_32[] = {
-    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
-};
-static constexpr RegStorage dp_temps_arr_64[] = {
-    rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
-    rs_dr8, rs_dr9, rs_dr10, rs_dr11
-};
-
-static constexpr RegStorage xp_temps_arr_32[] = {
-    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
-};
-static constexpr RegStorage xp_temps_arr_64[] = {
-    rs_xr0, rs_xr1, rs_xr2, rs_xr3, rs_xr4, rs_xr5, rs_xr6, rs_xr7,
-    rs_xr8, rs_xr9, rs_xr10, rs_xr11
-};
-
-static constexpr ArrayRef<const RegStorage> empty_pool;
-static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
-static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
-static constexpr ArrayRef<const RegStorage> core_regs_64q(core_regs_arr_64q);
-static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
-static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
-static constexpr ArrayRef<const RegStorage> dp_regs_32(dp_regs_arr_32);
-static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
-static constexpr ArrayRef<const RegStorage> xp_regs_32(xp_regs_arr_32);
-static constexpr ArrayRef<const RegStorage> xp_regs_64(xp_regs_arr_64);
-static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
-static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
-static constexpr ArrayRef<const RegStorage> reserved_regs_64q(reserved_regs_arr_64q);
-static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
-static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
-static constexpr ArrayRef<const RegStorage> core_temps_64q(core_temps_arr_64q);
-static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
-static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
-static constexpr ArrayRef<const RegStorage> dp_temps_32(dp_temps_arr_32);
-static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
-
-static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
-static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
-
-RegLocation X86Mir2Lir::LocCReturn() {
-  return x86_loc_c_return;
-}
-
-RegLocation X86Mir2Lir::LocCReturnRef() {
-  return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref;
-}
-
-RegLocation X86Mir2Lir::LocCReturnWide() {
-  return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide;
-}
-
-RegLocation X86Mir2Lir::LocCReturnFloat() {
-  return x86_loc_c_return_float;
-}
-
-RegLocation X86Mir2Lir::LocCReturnDouble() {
-  return x86_loc_c_return_double;
-}
-
-// 32-bit reg storage locations for 32-bit targets.
-static const RegStorage RegStorage32FromSpecialTargetRegister_Target32[] {
-  RegStorage::InvalidReg(),  // kSelf - Thread pointer.
-  RegStorage::InvalidReg(),  // kSuspend - Used to reduce suspend checks for some targets.
-  RegStorage::InvalidReg(),  // kLr - no register as the return address is pushed on entry.
-  RegStorage::InvalidReg(),  // kPc - not exposed on X86 see kX86StartOfMethod.
-  rs_rX86_SP_32,             // kSp
-  rs_rAX,                    // kArg0
-  rs_rCX,                    // kArg1
-  rs_rDX,                    // kArg2
-  rs_rBX,                    // kArg3
-  RegStorage::InvalidReg(),  // kArg4
-  RegStorage::InvalidReg(),  // kArg5
-  RegStorage::InvalidReg(),  // kArg6
-  RegStorage::InvalidReg(),  // kArg7
-  rs_fr0,                    // kFArg0
-  rs_fr1,                    // kFArg1
-  rs_fr2,                    // kFArg2
-  rs_fr3,                    // kFArg3
-  RegStorage::InvalidReg(),  // kFArg4
-  RegStorage::InvalidReg(),  // kFArg5
-  RegStorage::InvalidReg(),  // kFArg6
-  RegStorage::InvalidReg(),  // kFArg7
-  RegStorage::InvalidReg(),  // kFArg8
-  RegStorage::InvalidReg(),  // kFArg9
-  RegStorage::InvalidReg(),  // kFArg10
-  RegStorage::InvalidReg(),  // kFArg11
-  RegStorage::InvalidReg(),  // kFArg12
-  RegStorage::InvalidReg(),  // kFArg13
-  RegStorage::InvalidReg(),  // kFArg14
-  RegStorage::InvalidReg(),  // kFArg15
-  rs_rAX,                    // kRet0
-  rs_rDX,                    // kRet1
-  rs_rAX,                    // kInvokeTgt
-  rs_rAX,                    // kHiddenArg - used to hold the method index before copying to fr0.
-  rs_fr7,                    // kHiddenFpArg
-  rs_rCX,                    // kCount
-};
-
-// 32-bit reg storage locations for 64-bit targets.
-static const RegStorage RegStorage32FromSpecialTargetRegister_Target64[] {
-  RegStorage::InvalidReg(),  // kSelf - Thread pointer.
-  RegStorage::InvalidReg(),  // kSuspend - Used to reduce suspend checks for some targets.
-  RegStorage::InvalidReg(),  // kLr - no register as the return address is pushed on entry.
-  RegStorage(kRIPReg),       // kPc
-  rs_rX86_SP_32,             // kSp
-  rs_rDI,                    // kArg0
-  rs_rSI,                    // kArg1
-  rs_rDX,                    // kArg2
-  rs_rCX,                    // kArg3
-  rs_r8,                     // kArg4
-  rs_r9,                     // kArg5
-  RegStorage::InvalidReg(),  // kArg6
-  RegStorage::InvalidReg(),  // kArg7
-  rs_fr0,                    // kFArg0
-  rs_fr1,                    // kFArg1
-  rs_fr2,                    // kFArg2
-  rs_fr3,                    // kFArg3
-  rs_fr4,                    // kFArg4
-  rs_fr5,                    // kFArg5
-  rs_fr6,                    // kFArg6
-  rs_fr7,                    // kFArg7
-  RegStorage::InvalidReg(),  // kFArg8
-  RegStorage::InvalidReg(),  // kFArg9
-  RegStorage::InvalidReg(),  // kFArg10
-  RegStorage::InvalidReg(),  // kFArg11
-  RegStorage::InvalidReg(),  // kFArg12
-  RegStorage::InvalidReg(),  // kFArg13
-  RegStorage::InvalidReg(),  // kFArg14
-  RegStorage::InvalidReg(),  // kFArg15
-  rs_rAX,                    // kRet0
-  rs_rDX,                    // kRet1
-  rs_rAX,                    // kInvokeTgt
-  rs_rAX,                    // kHiddenArg
-  RegStorage::InvalidReg(),  // kHiddenFpArg
-  rs_rCX,                    // kCount
-};
-static_assert(arraysize(RegStorage32FromSpecialTargetRegister_Target32) ==
-              arraysize(RegStorage32FromSpecialTargetRegister_Target64),
-              "Mismatch in RegStorage array sizes");
-
-// Return a target-dependent special register for 32-bit.
-RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) const {
-  DCHECK_EQ(RegStorage32FromSpecialTargetRegister_Target32[kCount], rs_rCX);
-  DCHECK_EQ(RegStorage32FromSpecialTargetRegister_Target64[kCount], rs_rCX);
-  DCHECK_LT(reg, arraysize(RegStorage32FromSpecialTargetRegister_Target32));
-  return cu_->target64 ? RegStorage32FromSpecialTargetRegister_Target64[reg]
-                       : RegStorage32FromSpecialTargetRegister_Target32[reg];
-}
-
-RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Do not use this function!!!";
-  UNREACHABLE();
-}
-
-/*
- * Decode the register id.
- */
-ResourceMask X86Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
-  /* Double registers in x86 are just a single FP register. This is always just a single bit. */
-  return ResourceMask::Bit(
-      /* FP register starts at bit position 16 */
-      ((reg.IsFloat() || reg.StorageSize() > 8) ? kX86FPReg0 : 0) + reg.GetRegNum());
-}
-
-ResourceMask X86Mir2Lir::GetPCUseDefEncoding() const {
-  return kEncodeNone;
-}
-
-void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                          ResourceMask* use_mask, ResourceMask* def_mask) {
-  DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
-  DCHECK(!lir->flags.use_def_invalid);
-
-  // X86-specific resource map setup here.
-  if (flags & REG_USE_SP) {
-    use_mask->SetBit(kX86RegSP);
-  }
-
-  if (flags & REG_DEF_SP) {
-    def_mask->SetBit(kX86RegSP);
-  }
-
-  if (flags & REG_DEFA) {
-    SetupRegMask(def_mask, rs_rAX.GetReg());
-  }
-
-  if (flags & REG_DEFD) {
-    SetupRegMask(def_mask, rs_rDX.GetReg());
-  }
-  if (flags & REG_USEA) {
-    SetupRegMask(use_mask, rs_rAX.GetReg());
-  }
-
-  if (flags & REG_USEC) {
-    SetupRegMask(use_mask, rs_rCX.GetReg());
-  }
-
-  if (flags & REG_USED) {
-    SetupRegMask(use_mask, rs_rDX.GetReg());
-  }
-
-  if (flags & REG_USEB) {
-    SetupRegMask(use_mask, rs_rBX.GetReg());
-  }
-
-  // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
-  if (lir->opcode == kX86RepneScasw) {
-    SetupRegMask(use_mask, rs_rAX.GetReg());
-    SetupRegMask(use_mask, rs_rCX.GetReg());
-    SetupRegMask(use_mask, rs_rDI.GetReg());
-    SetupRegMask(def_mask, rs_rDI.GetReg());
-  }
-
-  if (flags & USE_FP_STACK) {
-    use_mask->SetBit(kX86FPStack);
-    def_mask->SetBit(kX86FPStack);
-  }
-}
-
-/* For dumping instructions */
-static const char* x86RegName[] = {
-  "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
-  "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-};
-
-static const char* x86CondName[] = {
-  "O",
-  "NO",
-  "B/NAE/C",
-  "NB/AE/NC",
-  "Z/EQ",
-  "NZ/NE",
-  "BE/NA",
-  "NBE/A",
-  "S",
-  "NS",
-  "P/PE",
-  "NP/PO",
-  "L/NGE",
-  "NL/GE",
-  "LE/NG",
-  "NLE/G"
-};
-
-/*
- * Interpret a format string and build a string no longer than size
- * See format key in Assemble.cc.
- */
-std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
-  std::string buf;
-  size_t i = 0;
-  size_t fmt_len = strlen(fmt);
-  while (i < fmt_len) {
-    if (fmt[i] != '!') {
-      buf += fmt[i];
-      i++;
-    } else {
-      i++;
-      DCHECK_LT(i, fmt_len);
-      char operand_number_ch = fmt[i];
-      i++;
-      if (operand_number_ch == '!') {
-        buf += "!";
-      } else {
-        int operand_number = operand_number_ch - '0';
-        DCHECK_LT(operand_number, 6);  // Expect upto 6 LIR operands.
-        DCHECK_LT(i, fmt_len);
-        int operand = lir->operands[operand_number];
-        switch (fmt[i]) {
-          case 'c':
-            DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
-            buf += x86CondName[operand];
-            break;
-          case 'd':
-            buf += StringPrintf("%d", operand);
-            break;
-          case 'q': {
-             int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 |
-                             static_cast<uint32_t>(lir->operands[operand_number+1]));
-             buf +=StringPrintf("%" PRId64, value);
-             break;
-          }
-          case 'p': {
-            const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(operand);
-            buf += StringPrintf("0x%08x", tab_rec->offset);
-            break;
-          }
-          case 'r':
-            if (RegStorage::IsFloat(operand)) {
-              int fp_reg = RegStorage::RegNum(operand);
-              buf += StringPrintf("xmm%d", fp_reg);
-            } else {
-              int reg_num = RegStorage::RegNum(operand);
-              DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName));
-              buf += x86RegName[reg_num];
-            }
-            break;
-          case 't':
-            buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
-                                reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
-                                lir->target);
-            break;
-          default:
-            buf += StringPrintf("DecodeError '%c'", fmt[i]);
-            break;
-        }
-        i++;
-      }
-    }
-  }
-  return buf;
-}
-
-void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, const ResourceMask& mask, const char *prefix) {
-  char buf[256];
-  buf[0] = 0;
-
-  if (mask.Equals(kEncodeAll)) {
-    strcpy(buf, "all");
-  } else {
-    char num[8];
-    int i;
-
-    for (i = 0; i < kX86RegEnd; i++) {
-      if (mask.HasBit(i)) {
-        snprintf(num, arraysize(num), "%d ", i);
-        strcat(buf, num);
-      }
-    }
-
-    if (mask.HasBit(ResourceMask::kCCode)) {
-      strcat(buf, "cc ");
-    }
-    /* Memory bits */
-    if (x86LIR && (mask.HasBit(ResourceMask::kDalvikReg))) {
-      snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
-               DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
-               (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
-    }
-    if (mask.HasBit(ResourceMask::kLiteral)) {
-      strcat(buf, "lit ");
-    }
-
-    if (mask.HasBit(ResourceMask::kHeapRef)) {
-      strcat(buf, "heap ");
-    }
-    if (mask.HasBit(ResourceMask::kMustNotAlias)) {
-      strcat(buf, "noalias ");
-    }
-  }
-  if (buf[0]) {
-    LOG(INFO) << prefix << ": " <<  buf;
-  }
-}
-
-void X86Mir2Lir::AdjustSpillMask() {
-  // Adjustment for LR spilling, x86 has no LR so nothing to do here
-  core_spill_mask_ |= (1 << rs_rRET.GetRegNum());
-  num_core_spills_++;
-}
-
-RegStorage X86Mir2Lir::AllocateByteRegister() {
-  RegStorage reg = AllocTypedTemp(false, kCoreReg);
-  if (!cu_->target64) {
-    DCHECK_LT(reg.GetRegNum(), rs_rX86_SP_32.GetRegNum());
-  }
-  return reg;
-}
-
-RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) {
-  return GetRegInfo(reg)->Master()->GetReg();
-}
-
-bool X86Mir2Lir::IsByteRegister(RegStorage reg) const {
-  return cu_->target64 || reg.GetRegNum() < rs_rX86_SP_32.GetRegNum();
-}
-
-/* Clobber all regs that might be used by an external C call */
-void X86Mir2Lir::ClobberCallerSave() {
-  if (cu_->target64) {
-    Clobber(rs_rAX);
-    Clobber(rs_rCX);
-    Clobber(rs_rDX);
-    Clobber(rs_rSI);
-    Clobber(rs_rDI);
-
-    Clobber(rs_r8);
-    Clobber(rs_r9);
-    Clobber(rs_r10);
-    Clobber(rs_r11);
-
-    Clobber(rs_fr8);
-    Clobber(rs_fr9);
-    Clobber(rs_fr10);
-    Clobber(rs_fr11);
-  } else {
-    Clobber(rs_rAX);
-    Clobber(rs_rCX);
-    Clobber(rs_rDX);
-    Clobber(rs_rBX);
-  }
-
-  Clobber(rs_fr0);
-  Clobber(rs_fr1);
-  Clobber(rs_fr2);
-  Clobber(rs_fr3);
-  Clobber(rs_fr4);
-  Clobber(rs_fr5);
-  Clobber(rs_fr6);
-  Clobber(rs_fr7);
-}
-
-RegLocation X86Mir2Lir::GetReturnWideAlt() {
-  RegLocation res = LocCReturnWide();
-  DCHECK_EQ(res.reg.GetLowReg(), rs_rAX.GetReg());
-  DCHECK_EQ(res.reg.GetHighReg(), rs_rDX.GetReg());
-  Clobber(rs_rAX);
-  Clobber(rs_rDX);
-  MarkInUse(rs_rAX);
-  MarkInUse(rs_rDX);
-  MarkWide(res.reg);
-  return res;
-}
-
-RegLocation X86Mir2Lir::GetReturnAlt() {
-  RegLocation res = LocCReturn();
-  res.reg.SetReg(rs_rDX.GetReg());
-  Clobber(rs_rDX);
-  MarkInUse(rs_rDX);
-  return res;
-}
-
-/* To be used when explicitly managing register use */
-void X86Mir2Lir::LockCallTemps() {
-  LockTemp(TargetReg32(kArg0));
-  LockTemp(TargetReg32(kArg1));
-  LockTemp(TargetReg32(kArg2));
-  LockTemp(TargetReg32(kArg3));
-  LockTemp(TargetReg32(kFArg0));
-  LockTemp(TargetReg32(kFArg1));
-  LockTemp(TargetReg32(kFArg2));
-  LockTemp(TargetReg32(kFArg3));
-  if (cu_->target64) {
-    LockTemp(TargetReg32(kArg4));
-    LockTemp(TargetReg32(kArg5));
-    LockTemp(TargetReg32(kFArg4));
-    LockTemp(TargetReg32(kFArg5));
-    LockTemp(TargetReg32(kFArg6));
-    LockTemp(TargetReg32(kFArg7));
-  }
-}
-
-/* To be used when explicitly managing register use */
-void X86Mir2Lir::FreeCallTemps() {
-  FreeTemp(TargetReg32(kArg0));
-  FreeTemp(TargetReg32(kArg1));
-  FreeTemp(TargetReg32(kArg2));
-  FreeTemp(TargetReg32(kArg3));
-  FreeTemp(TargetReg32(kHiddenArg));
-  FreeTemp(TargetReg32(kFArg0));
-  FreeTemp(TargetReg32(kFArg1));
-  FreeTemp(TargetReg32(kFArg2));
-  FreeTemp(TargetReg32(kFArg3));
-  if (cu_->target64) {
-    FreeTemp(TargetReg32(kArg4));
-    FreeTemp(TargetReg32(kArg5));
-    FreeTemp(TargetReg32(kFArg4));
-    FreeTemp(TargetReg32(kFArg5));
-    FreeTemp(TargetReg32(kFArg6));
-    FreeTemp(TargetReg32(kFArg7));
-  }
-}
-
-bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
-    switch (opcode) {
-      case kX86LockCmpxchgMR:
-      case kX86LockCmpxchgAR:
-      case kX86LockCmpxchg64M:
-      case kX86LockCmpxchg64A:
-      case kX86LockCmpxchg64AR:
-      case kX86LockAdd32MI8:
-      case kX86XchgMR:
-      case kX86Mfence:
-        // Atomic memory instructions provide full barrier.
-        return true;
-      default:
-        break;
-    }
-
-    // Conservative if cannot prove it provides full barrier.
-    return false;
-}
-
-bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
-  const X86InstructionSetFeatures* features =
-    cu_->compiler_driver->GetInstructionSetFeatures()->AsX86InstructionSetFeatures();
-  if (!features->IsSmp()) {
-    return false;
-  }
-  // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
-  LIR* mem_barrier = last_lir_insn_;
-
-  bool ret = false;
-  /*
-   * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence.
-   * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model.
-   * For those cases, all we need to ensure is that there is a scheduling barrier in place.
-   */
-  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-  bool use_locked_add = features->PrefersLockedAddSynchronization();
-  if (barrier_kind == kAnyAny) {
-    // If no LIR exists already that can be used a barrier, then generate a barrier.
-    if (mem_barrier == nullptr) {
-      if (use_locked_add) {
-        mem_barrier = NewLIR3(kX86LockAdd32MI8, rs_rSP.GetReg(), 0, 0);
-      } else {
-        mem_barrier = NewLIR0(kX86Mfence);
-      }
-      ret = true;
-    }
-
-    // If last instruction does not provide full barrier, then insert a barrier.
-    if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
-      if (use_locked_add) {
-        mem_barrier = NewLIR3(kX86LockAdd32MI8, rs_rSP.GetReg(), 0, 0);
-      } else {
-        mem_barrier = NewLIR0(kX86Mfence);
-      }
-      ret = true;
-    }
-  } else if (barrier_kind == kNTStoreStore) {
-      if (use_locked_add) {
-        mem_barrier = NewLIR3(kX86LockAdd32MI8, rs_rSP.GetReg(), 0, 0);
-      } else {
-        mem_barrier = NewLIR0(kX86Sfence);
-      }
-      ret = true;
-  }
-
-  // Now ensure that a scheduling barrier is in place.
-  if (mem_barrier == nullptr) {
-    GenBarrier();
-  } else {
-    // Mark as a scheduling barrier.
-    DCHECK(!mem_barrier->flags.use_def_invalid);
-    mem_barrier->u.m.def_mask = &kEncodeAll;
-  }
-  return ret;
-}
-
-void X86Mir2Lir::CompilerInitializeRegAlloc() {
-  if (cu_->target64) {
-    reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
-                                              dp_regs_64, reserved_regs_64, reserved_regs_64q,
-                                              core_temps_64, core_temps_64q,
-                                              sp_temps_64, dp_temps_64));
-  } else {
-    reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, sp_regs_32,
-                                              dp_regs_32, reserved_regs_32, empty_pool,
-                                              core_temps_32, empty_pool,
-                                              sp_temps_32, dp_temps_32));
-  }
-
-  // Target-specific adjustments.
-
-  // Add in XMM registers.
-  const ArrayRef<const RegStorage> *xp_regs = cu_->target64 ? &xp_regs_64 : &xp_regs_32;
-  for (RegStorage reg : *xp_regs) {
-    RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
-    reginfo_map_[reg.GetReg()] = info;
-  }
-  const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32;
-  for (RegStorage reg : *xp_temps) {
-    RegisterInfo* xp_reg_info = GetRegInfo(reg);
-    xp_reg_info->SetIsTemp(true);
-  }
-
-  // Special Handling for x86_64 RIP addressing.
-  if (cu_->target64) {
-    RegisterInfo* info = new (arena_) RegisterInfo(RegStorage(kRIPReg), kEncodeNone);
-    reginfo_map_[kRIPReg] = info;
-  }
-
-  // Alias single precision xmm to double xmms.
-  // TODO: as needed, add larger vector sizes - alias all to the largest.
-  for (RegisterInfo* info : reg_pool_->sp_regs_) {
-    int sp_reg_num = info->GetReg().GetRegNum();
-    RegStorage xp_reg = RegStorage::Solo128(sp_reg_num);
-    RegisterInfo* xp_reg_info = GetRegInfo(xp_reg);
-    // 128-bit xmm vector register's master storage should refer to itself.
-    DCHECK_EQ(xp_reg_info, xp_reg_info->Master());
-
-    // Redirect 32-bit vector's master storage to 128-bit vector.
-    info->SetMaster(xp_reg_info);
-
-    RegStorage dp_reg = RegStorage::FloatSolo64(sp_reg_num);
-    RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
-    // Redirect 64-bit vector's master storage to 128-bit vector.
-    dp_reg_info->SetMaster(xp_reg_info);
-    // Singles should show a single 32-bit mask bit, at first referring to the low half.
-    DCHECK_EQ(info->StorageMask(), 0x1U);
-  }
-
-  if (cu_->target64) {
-    // Alias 32bit W registers to corresponding 64bit X registers.
-    for (RegisterInfo* info : reg_pool_->core_regs_) {
-      int x_reg_num = info->GetReg().GetRegNum();
-      RegStorage x_reg = RegStorage::Solo64(x_reg_num);
-      RegisterInfo* x_reg_info = GetRegInfo(x_reg);
-      // 64bit X register's master storage should refer to itself.
-      DCHECK_EQ(x_reg_info, x_reg_info->Master());
-      // Redirect 32bit W master storage to 64bit X.
-      info->SetMaster(x_reg_info);
-      // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
-      DCHECK_EQ(info->StorageMask(), 0x1U);
-    }
-  }
-
-  // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
-  // TODO: adjust for x86/hard float calling convention.
-  reg_pool_->next_core_reg_ = 2;
-  reg_pool_->next_sp_reg_ = 2;
-  reg_pool_->next_dp_reg_ = 1;
-}
-
-int X86Mir2Lir::VectorRegisterSize() {
-  return 128;
-}
-
-int X86Mir2Lir::NumReservableVectorRegisters(bool long_or_fp) {
-  int num_vector_temps = cu_->target64 ? xp_temps_64.size() : xp_temps_32.size();
-
-  // Leave a few temps for use by backend as scratch.
-  return long_or_fp ? num_vector_temps - 2 : num_vector_temps - 1;
-}
-
-static dwarf::Reg DwarfCoreReg(bool is_x86_64, int num) {
-  return is_x86_64 ? dwarf::Reg::X86_64Core(num) : dwarf::Reg::X86Core(num);
-}
-
-static dwarf::Reg DwarfFpReg(bool is_x86_64, int num) {
-  return is_x86_64 ? dwarf::Reg::X86_64Fp(num) : dwarf::Reg::X86Fp(num);
-}
-
-void X86Mir2Lir::SpillCoreRegs() {
-  if (num_core_spills_ == 0) {
-    return;
-  }
-  // Spill mask not including fake return address register
-  uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
-  int offset =
-      frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
-  OpSize size = cu_->target64 ? k64 : k32;
-  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-  for (int reg = 0; mask != 0u; mask >>= 1, reg++) {
-    if ((mask & 0x1) != 0u) {
-      DCHECK_NE(offset, 0) << "offset 0 should be for method";
-      RegStorage r_src = cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg);
-      StoreBaseDisp(rs_rSP, offset, r_src, size, kNotVolatile);
-      cfi_.RelOffset(DwarfCoreReg(cu_->target64, reg), offset);
-      offset += GetInstructionSetPointerSize(cu_->instruction_set);
-    }
-  }
-}
-
-void X86Mir2Lir::UnSpillCoreRegs() {
-  if (num_core_spills_ == 0) {
-    return;
-  }
-  // Spill mask not including fake return address register
-  uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
-  int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
-  OpSize size = cu_->target64 ? k64 : k32;
-  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-  for (int reg = 0; mask != 0u; mask >>= 1, reg++) {
-    if ((mask & 0x1) != 0u) {
-      RegStorage r_dest = cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg);
-      LoadBaseDisp(rs_rSP, offset, r_dest, size, kNotVolatile);
-      cfi_.Restore(DwarfCoreReg(cu_->target64, reg));
-      offset += GetInstructionSetPointerSize(cu_->instruction_set);
-    }
-  }
-}
-
-void X86Mir2Lir::SpillFPRegs() {
-  if (num_fp_spills_ == 0) {
-    return;
-  }
-  uint32_t mask = fp_spill_mask_;
-  int offset = frame_size_ -
-      (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
-  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-  for (int reg = 0; mask != 0u; mask >>= 1, reg++) {
-    if ((mask & 0x1) != 0u) {
-      StoreBaseDisp(rs_rSP, offset, RegStorage::FloatSolo64(reg), k64, kNotVolatile);
-      cfi_.RelOffset(DwarfFpReg(cu_->target64, reg), offset);
-      offset += sizeof(double);
-    }
-  }
-}
-void X86Mir2Lir::UnSpillFPRegs() {
-  if (num_fp_spills_ == 0) {
-    return;
-  }
-  uint32_t mask = fp_spill_mask_;
-  int offset = frame_size_ -
-      (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
-  const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
-  for (int reg = 0; mask != 0u; mask >>= 1, reg++) {
-    if ((mask & 0x1) != 0u) {
-      LoadBaseDisp(rs_rSP, offset, RegStorage::FloatSolo64(reg),
-                   k64, kNotVolatile);
-      cfi_.Restore(DwarfFpReg(cu_->target64, reg));
-      offset += sizeof(double);
-    }
-  }
-}
-
-
-bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
-  return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
-}
-
-RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
-  // Prefer XMM registers.  Fixes a problem with iget/iput to a FP when cached temporary
-  // with same VR is a Core register.
-  if (size == kSingle || size == kDouble) {
-    return kFPReg;
-  }
-
-  // X86_64 can handle any size.
-  if (cu_->target64) {
-    return RegClassBySize(size);
-  }
-
-  if (UNLIKELY(is_volatile)) {
-    // On x86, atomic 64-bit load/store requires an fp register.
-    // Smaller aligned load/store is atomic for both core and fp registers.
-    if (size == k64 || size == kDouble) {
-      return kFPReg;
-    }
-  }
-  return RegClassBySize(size);
-}
-
-X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
-    : Mir2Lir(cu, mir_graph, arena),
-      in_to_reg_storage_x86_64_mapper_(this), in_to_reg_storage_x86_mapper_(this),
-      pc_rel_base_reg_(RegStorage::InvalidReg()),
-      pc_rel_base_reg_used_(false),
-      setup_pc_rel_base_reg_(nullptr),
-      method_address_insns_(arena->Adapter()),
-      class_type_address_insns_(arena->Adapter()),
-      call_method_insns_(arena->Adapter()),
-      dex_cache_access_insns_(arena->Adapter()),
-      const_vectors_(nullptr) {
-  method_address_insns_.reserve(100);
-  class_type_address_insns_.reserve(100);
-  call_method_insns_.reserve(100);
-  for (int i = 0; i < kX86Last; i++) {
-    DCHECK_EQ(X86Mir2Lir::EncodingMap[i].opcode, i)
-        << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
-        << " is wrong: expecting " << i << ", seeing "
-        << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
-  }
-}
-
-Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                          ArenaAllocator* const arena) {
-  return new X86Mir2Lir(cu, mir_graph, arena);
-}
-
-// Not used in x86(-64)
-RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unexpected use of LoadHelper in x86";
-  UNREACHABLE();
-}
-
-LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
-  // First load the pointer in fs:[suspend-trigger] into eax
-  // Then use a test instruction to indirect via that address.
-  if (cu_->target64) {
-    NewLIR2(kX86Mov64RT, rs_rAX.GetReg(),
-        Thread::ThreadSuspendTriggerOffset<8>().Int32Value());
-  } else {
-    NewLIR2(kX86Mov32RT, rs_rAX.GetReg(),
-        Thread::ThreadSuspendTriggerOffset<4>().Int32Value());
-  }
-  return NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rAX.GetReg(), 0);
-}
-
-uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return X86Mir2Lir::EncodingMap[opcode].flags;
-}
-
-const char* X86Mir2Lir::GetTargetInstName(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return X86Mir2Lir::EncodingMap[opcode].name;
-}
-
-const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
-  DCHECK(!IsPseudoLirOp(opcode));
-  return X86Mir2Lir::EncodingMap[opcode].fmt;
-}
-
-void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
-  // Can we do this directly to memory?
-  rl_dest = UpdateLocWide(rl_dest);
-  if ((rl_dest.location == kLocDalvikFrame) ||
-      (rl_dest.location == kLocCompilerTemp)) {
-    int32_t val_lo = Low32Bits(value);
-    int32_t val_hi = High32Bits(value);
-    int r_base = rs_rX86_SP_32.GetReg();
-    int displacement = SRegOffset(rl_dest.s_reg_low);
-
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-    LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
-    AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
-                              false /* is_load */, true /* is64bit */);
-    store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
-    AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
-                              false /* is_load */, true /* is64bit */);
-    return;
-  }
-
-  // Just use the standard code to do the generation.
-  Mir2Lir::GenConstWide(rl_dest, value);
-}
-
-// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
-void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
-  LOG(INFO)  << "location: " << loc.location << ','
-             << (loc.wide ? " w" : "  ")
-             << (loc.defined ? " D" : "  ")
-             << (loc.is_const ? " c" : "  ")
-             << (loc.fp ? " F" : "  ")
-             << (loc.core ? " C" : "  ")
-             << (loc.ref ? " r" : "  ")
-             << (loc.high_word ? " h" : "  ")
-             << (loc.home ? " H" : "  ")
-             << ", low: " << static_cast<int>(loc.reg.GetLowReg())
-             << ", high: " << static_cast<int>(loc.reg.GetHighReg())
-             << ", s_reg: " << loc.s_reg_low
-             << ", orig: " << loc.orig_sreg;
-}
-
-void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
-                                   SpecialTargetRegister symbolic_reg) {
-  /*
-   * For x86, just generate a 32 bit move immediate instruction, that will be filled
-   * in at 'link time'.  For now, put a unique value based on target to ensure that
-   * code deduplication works.
-   */
-  int target_method_idx = target_method.dex_method_index;
-  const DexFile* target_dex_file = target_method.dex_file;
-  const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
-  uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
-
-  // Generate the move instruction with the unique pointer and save index, dex_file, and type.
-  LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI,
-                     TargetReg(symbolic_reg, kNotWide).GetReg(),
-                     static_cast<int>(target_method_id_ptr), target_method_idx,
-                     WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
-  AppendLIR(move);
-  method_address_insns_.push_back(move);
-}
-
-void X86Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
-                               SpecialTargetRegister symbolic_reg) {
-  /*
-   * For x86, just generate a 32 bit move immediate instruction, that will be filled
-   * in at 'link time'.  For now, put a unique value based on target to ensure that
-   * code deduplication works.
-   */
-  const DexFile::TypeId& id = dex_file.GetTypeId(type_idx);
-  uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
-
-  // Generate the move instruction with the unique pointer and save index and type.
-  LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI,
-                     TargetReg(symbolic_reg, kNotWide).GetReg(),
-                     static_cast<int>(ptr), type_idx,
-                     WrapPointer(const_cast<DexFile*>(&dex_file)));
-  AppendLIR(move);
-  class_type_address_insns_.push_back(move);
-}
-
-LIR* X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
-  /*
-   * For x86, just generate a 32 bit call relative instruction, that will be filled
-   * in at 'link time'.
-   */
-  int target_method_idx = target_method.dex_method_index;
-  const DexFile* target_dex_file = target_method.dex_file;
-
-  // Generate the call instruction with the unique pointer and save index, dex_file, and type.
-  // NOTE: Method deduplication takes linker patches into account, so we can just pass 0
-  // as a placeholder for the offset.
-  LIR* call = RawLIR(current_dalvik_offset_, kX86CallI, 0,
-                     target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
-  AppendLIR(call);
-  call_method_insns_.push_back(call);
-  return call;
-}
-
-static LIR* GenInvokeNoInlineCall(Mir2Lir* mir_to_lir, InvokeType type) {
-  QuickEntrypointEnum trampoline;
-  switch (type) {
-    case kInterface:
-      trampoline = kQuickInvokeInterfaceTrampolineWithAccessCheck;
-      break;
-    case kDirect:
-      trampoline = kQuickInvokeDirectTrampolineWithAccessCheck;
-      break;
-    case kStatic:
-      trampoline = kQuickInvokeStaticTrampolineWithAccessCheck;
-      break;
-    case kSuper:
-      trampoline = kQuickInvokeSuperTrampolineWithAccessCheck;
-      break;
-    case kVirtual:
-      trampoline = kQuickInvokeVirtualTrampolineWithAccessCheck;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected invoke type";
-      trampoline = kQuickInvokeInterfaceTrampolineWithAccessCheck;
-  }
-  return mir_to_lir->InvokeTrampoline(kOpBlx, RegStorage::InvalidReg(), trampoline);
-}
-
-LIR* X86Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
-  LIR* call_insn;
-  if (method_info.FastPath()) {
-    if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
-      // We can have the linker fixup a call relative.
-      call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType());
-    } else {
-      call_insn = OpMem(kOpBlx, TargetReg(kArg0, kRef),
-                        ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-                            cu_->target64 ? 8 : 4).Int32Value());
-    }
-  } else {
-    call_insn = GenInvokeNoInlineCall(this, method_info.GetSharpType());
-  }
-  return call_insn;
-}
-
-void X86Mir2Lir::InstallLiteralPools() {
-  // These are handled differently for x86.
-  DCHECK(code_literal_list_ == nullptr);
-  DCHECK(method_literal_list_ == nullptr);
-  DCHECK(class_literal_list_ == nullptr);
-
-
-  if (const_vectors_ != nullptr) {
-    // Vector literals must be 16-byte aligned. The header that is placed
-    // in the code section causes misalignment so we take it into account.
-    // Otherwise, we are sure that for x86 method is aligned to 16.
-    DCHECK_EQ(GetInstructionSetAlignment(cu_->instruction_set), 16u);
-    uint32_t bytes_to_fill = (0x10 - ((code_buffer_.size() + sizeof(OatQuickMethodHeader)) & 0xF)) & 0xF;
-    while (bytes_to_fill > 0) {
-      code_buffer_.push_back(0);
-      bytes_to_fill--;
-    }
-
-    for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
-      Push32(&code_buffer_, p->operands[0]);
-      Push32(&code_buffer_, p->operands[1]);
-      Push32(&code_buffer_, p->operands[2]);
-      Push32(&code_buffer_, p->operands[3]);
-    }
-  }
-
-  patches_.reserve(method_address_insns_.size() + class_type_address_insns_.size() +
-                   call_method_insns_.size() + dex_cache_access_insns_.size());
-
-  // Handle the fixups for methods.
-  for (LIR* p : method_address_insns_) {
-      DCHECK_EQ(p->opcode, kX86Mov32RI);
-      uint32_t target_method_idx = p->operands[2];
-      const DexFile* target_dex_file = UnwrapPointer<DexFile>(p->operands[3]);
-
-      // The offset to patch is the last 4 bytes of the instruction.
-      int patch_offset = p->offset + p->flags.size - 4;
-      patches_.push_back(LinkerPatch::MethodPatch(patch_offset,
-                                                  target_dex_file, target_method_idx));
-  }
-
-  // Handle the fixups for class types.
-  for (LIR* p : class_type_address_insns_) {
-      DCHECK_EQ(p->opcode, kX86Mov32RI);
-
-      const DexFile* class_dex_file = UnwrapPointer<DexFile>(p->operands[3]);
-      uint32_t target_type_idx = p->operands[2];
-
-      // The offset to patch is the last 4 bytes of the instruction.
-      int patch_offset = p->offset + p->flags.size - 4;
-      patches_.push_back(LinkerPatch::TypePatch(patch_offset,
-                                                class_dex_file, target_type_idx));
-  }
-
-  // And now the PC-relative calls to methods.
-  for (LIR* p : call_method_insns_) {
-      DCHECK_EQ(p->opcode, kX86CallI);
-      uint32_t target_method_idx = p->operands[1];
-      const DexFile* target_dex_file = UnwrapPointer<DexFile>(p->operands[2]);
-
-      // The offset to patch is the last 4 bytes of the instruction.
-      int patch_offset = p->offset + p->flags.size - 4;
-      patches_.push_back(LinkerPatch::RelativeCodePatch(patch_offset,
-                                                        target_dex_file, target_method_idx));
-  }
-
-  // PC-relative references to dex cache arrays.
-  for (LIR* p : dex_cache_access_insns_) {
-    DCHECK(p->opcode == kX86Mov32RM || p->opcode == kX86Mov64RM);
-    const DexFile* dex_file = UnwrapPointer<DexFile>(p->operands[3]);
-    uint32_t offset = p->operands[4];
-    // The offset to patch is the last 4 bytes of the instruction.
-    int patch_offset = p->offset + p->flags.size - 4;
-    DCHECK(!p->flags.is_nop);
-    patches_.push_back(LinkerPatch::DexCacheArrayPatch(patch_offset, dex_file,
-                                                       p->target->offset, offset));
-  }
-
-  // And do the normal processing.
-  Mir2Lir::InstallLiteralPools();
-}
-
-bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
-  RegLocation rl_src = info->args[0];
-  RegLocation rl_srcPos = info->args[1];
-  RegLocation rl_dst = info->args[2];
-  RegLocation rl_dstPos = info->args[3];
-  RegLocation rl_length = info->args[4];
-  if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) {
-    return false;
-  }
-  if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) {
-    return false;
-  }
-  ClobberCallerSave();
-  LockCallTemps();  // Using fixed registers.
-  RegStorage tmp_reg = cu_->target64 ? rs_r11 : rs_rBX;
-  LoadValueDirectFixed(rl_src, rs_rAX);
-  LoadValueDirectFixed(rl_dst, rs_rCX);
-  LIR* src_dst_same  = OpCmpBranch(kCondEq, rs_rAX, rs_rCX, nullptr);
-  LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX, 0, nullptr);
-  LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
-  LoadValueDirectFixed(rl_length, rs_rDX);
-  // If the length of the copy is > 128 characters (256 bytes) or negative then go slow path.
-  LIR* len_too_big  = OpCmpImmBranch(kCondHi, rs_rDX, 128, nullptr);
-  LoadValueDirectFixed(rl_src, rs_rAX);
-  LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
-  LIR* src_bad_len  = nullptr;
-  LIR* src_bad_off = nullptr;
-  LIR* srcPos_negative  = nullptr;
-  if (!rl_srcPos.is_const) {
-    LoadValueDirectFixed(rl_srcPos, tmp_reg);
-    srcPos_negative  = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
-    // src_pos < src_len
-    src_bad_off = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
-    // src_len - src_pos < copy_len
-    OpRegRegReg(kOpSub, tmp_reg, rs_rAX, tmp_reg);
-    src_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
-  } else {
-    int32_t pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
-    if (pos_val == 0) {
-      src_bad_len  = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
-    } else {
-      // src_pos < src_len
-      src_bad_off = OpCmpImmBranch(kCondLt, rs_rAX, pos_val, nullptr);
-      // src_len - src_pos < copy_len
-      OpRegRegImm(kOpSub, tmp_reg, rs_rAX, pos_val);
-      src_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
-    }
-  }
-  LIR* dstPos_negative = nullptr;
-  LIR* dst_bad_len = nullptr;
-  LIR* dst_bad_off = nullptr;
-  LoadValueDirectFixed(rl_dst, rs_rAX);
-  LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
-  if (!rl_dstPos.is_const) {
-    LoadValueDirectFixed(rl_dstPos, tmp_reg);
-    dstPos_negative = OpCmpImmBranch(kCondLt, tmp_reg, 0, nullptr);
-    // dst_pos < dst_len
-    dst_bad_off = OpCmpBranch(kCondLt, rs_rAX, tmp_reg, nullptr);
-    // dst_len - dst_pos < copy_len
-    OpRegRegReg(kOpSub, tmp_reg, rs_rAX, tmp_reg);
-    dst_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
-  } else {
-    int32_t pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
-    if (pos_val == 0) {
-      dst_bad_len = OpCmpBranch(kCondLt, rs_rAX, rs_rDX, nullptr);
-    } else {
-      // dst_pos < dst_len
-      dst_bad_off = OpCmpImmBranch(kCondLt, rs_rAX, pos_val, nullptr);
-      // dst_len - dst_pos < copy_len
-      OpRegRegImm(kOpSub, tmp_reg, rs_rAX, pos_val);
-      dst_bad_len = OpCmpBranch(kCondLt, tmp_reg, rs_rDX, nullptr);
-    }
-  }
-  // Everything is checked now.
-  LoadValueDirectFixed(rl_src, rs_rAX);
-  LoadValueDirectFixed(rl_dst, tmp_reg);
-  LoadValueDirectFixed(rl_srcPos, rs_rCX);
-  NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(),
-       rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value());
-  // RAX now holds the address of the first src element to be copied.
-
-  LoadValueDirectFixed(rl_dstPos, rs_rCX);
-  NewLIR5(kX86Lea32RA, tmp_reg.GetReg(), tmp_reg.GetReg(),
-       rs_rCX.GetReg(), 1, mirror::Array::DataOffset(2).Int32Value() );
-  // RBX now holds the address of the first dst element to be copied.
-
-  // Check if the number of elements to be copied is odd or even. If odd
-  // then copy the first element (so that the remaining number of elements
-  // is even).
-  LoadValueDirectFixed(rl_length, rs_rCX);
-  OpRegImm(kOpAnd, rs_rCX, 1);
-  LIR* jmp_to_begin_loop  = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
-  OpRegImm(kOpSub, rs_rDX, 1);
-  LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
-  StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
-
-  // Since the remaining number of elements is even, we will copy by
-  // two elements at a time.
-  LIR* beginLoop = NewLIR0(kPseudoTargetLabel);
-  LIR* jmp_to_ret  = OpCmpImmBranch(kCondEq, rs_rDX, 0, nullptr);
-  OpRegImm(kOpSub, rs_rDX, 2);
-  LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle);
-  StoreBaseIndexedDisp(tmp_reg, rs_rDX, 1, 0, rs_rCX, kSingle);
-  OpUnconditionalBranch(beginLoop);
-  LIR *check_failed = NewLIR0(kPseudoTargetLabel);
-  LIR* launchpad_branch  = OpUnconditionalBranch(nullptr);
-  LIR *return_point = NewLIR0(kPseudoTargetLabel);
-  jmp_to_ret->target = return_point;
-  jmp_to_begin_loop->target = beginLoop;
-  src_dst_same->target = check_failed;
-  len_too_big->target = check_failed;
-  src_null_branch->target = check_failed;
-  if (srcPos_negative != nullptr)
-    srcPos_negative ->target = check_failed;
-  if (src_bad_off != nullptr)
-    src_bad_off->target = check_failed;
-  if (src_bad_len != nullptr)
-    src_bad_len->target = check_failed;
-  dst_null_branch->target = check_failed;
-  if (dstPos_negative != nullptr)
-    dstPos_negative->target = check_failed;
-  if (dst_bad_off != nullptr)
-    dst_bad_off->target = check_failed;
-  if (dst_bad_len != nullptr)
-    dst_bad_len->target = check_failed;
-  AddIntrinsicSlowPath(info, launchpad_branch, return_point);
-  ClobberCallerSave();  // We must clobber everything because slow path will return here
-  return true;
-}
-
-
-/*
- * Fast string.index_of(I) & (II).  Inline check for simple case of char <= 0xffff,
- * otherwise bails to standard library code.
- */
-bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
-  RegLocation rl_obj = info->args[0];
-  RegLocation rl_char = info->args[1];
-  RegLocation rl_start;  // Note: only present in III flavor or IndexOf.
-  // RBX is promotable in 64-bit mode.
-  RegStorage rs_tmp = cu_->target64 ? rs_r11 : rs_rBX;
-  int start_value = -1;
-
-  uint32_t char_value =
-    rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
-
-  if (char_value > 0xFFFF) {
-    // We have to punt to the real String.indexOf.
-    return false;
-  }
-
-  // Okay, we are commited to inlining this.
-  // EAX: 16 bit character being searched.
-  // ECX: count: number of words to be searched.
-  // EDI: String being searched.
-  // EDX: temporary during execution.
-  // EBX or R11: temporary during execution (depending on mode).
-  // REP SCASW: search instruction.
-
-  FlushAllRegs();
-
-  RegLocation rl_return = GetReturn(kCoreReg);
-  RegLocation rl_dest = InlineTarget(info);
-
-  // Is the string non-null?
-  LoadValueDirectFixed(rl_obj, rs_rDX);
-  GenNullCheck(rs_rDX, info->opt_flags);
-  info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
-
-  LIR *slowpath_branch = nullptr, *length_compare = nullptr;
-
-  // We need the value in EAX.
-  if (rl_char.is_const) {
-    LoadConstantNoClobber(rs_rAX, char_value);
-  } else {
-    // Does the character fit in 16 bits? Compare it at runtime.
-    LoadValueDirectFixed(rl_char, rs_rAX);
-    slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
-  }
-
-  // From here down, we know that we are looking for a char that fits in 16 bits.
-  // Location of reference to data array within the String object.
-  int value_offset = mirror::String::ValueOffset().Int32Value();
-  // Location of count within the String object.
-  int count_offset = mirror::String::CountOffset().Int32Value();
-
-  // Compute the number of words to search in to rCX.
-  Load32Disp(rs_rDX, count_offset, rs_rCX);
-
-  // Possible signal here due to null pointer dereference.
-  // Note that the signal handler will expect the top word of
-  // the stack to be the ArtMethod*.  If the PUSH edi instruction
-  // below is ahead of the load above then this will not be true
-  // and the signal handler will not work.
-  MarkPossibleNullPointerException(0);
-
-  if (!cu_->target64) {
-    // EDI is promotable in 32-bit mode.
-    NewLIR1(kX86Push32R, rs_rDI.GetReg());
-    cfi_.AdjustCFAOffset(4);
-    // Record cfi only if it is not already spilled.
-    if (!CoreSpillMaskContains(rs_rDI.GetReg())) {
-      cfi_.RelOffset(DwarfCoreReg(cu_->target64, rs_rDI.GetReg()), 0);
-    }
-  }
-
-  if (zero_based) {
-    // Start index is not present.
-    // We have to handle an empty string.  Use special instruction JECXZ.
-    length_compare = NewLIR0(kX86Jecxz8);
-
-    // Copy the number of words to search in a temporary register.
-    // We will use the register at the end to calculate result.
-    OpRegReg(kOpMov, rs_tmp, rs_rCX);
-  } else {
-    // Start index is present.
-    rl_start = info->args[2];
-
-    // We have to offset by the start index.
-    if (rl_start.is_const) {
-      start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
-      start_value = std::max(start_value, 0);
-
-      // Is the start > count?
-      length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
-      OpRegImm(kOpMov, rs_rDI, start_value);
-
-      // Copy the number of words to search in a temporary register.
-      // We will use the register at the end to calculate result.
-      OpRegReg(kOpMov, rs_tmp, rs_rCX);
-
-      if (start_value != 0) {
-        // Decrease the number of words to search by the start index.
-        OpRegImm(kOpSub, rs_rCX, start_value);
-      }
-    } else {
-      // Handle "start index < 0" case.
-      if (!cu_->target64 && rl_start.location != kLocPhysReg) {
-        // Load the start index from stack, remembering that we pushed EDI.
-        int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
-        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-        Load32Disp(rs_rX86_SP_32, displacement, rs_rDI);
-        // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
-        DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
-        int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - 1;
-        AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
-      } else {
-        LoadValueDirectFixed(rl_start, rs_rDI);
-      }
-      OpRegReg(kOpXor, rs_tmp, rs_tmp);
-      OpRegReg(kOpCmp, rs_rDI, rs_tmp);
-      OpCondRegReg(kOpCmov, kCondLt, rs_rDI, rs_tmp);
-
-      // The length of the string should be greater than the start index.
-      length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rDI, nullptr);
-
-      // Copy the number of words to search in a temporary register.
-      // We will use the register at the end to calculate result.
-      OpRegReg(kOpMov, rs_tmp, rs_rCX);
-
-      // Decrease the number of words to search by the start index.
-      OpRegReg(kOpSub, rs_rCX, rs_rDI);
-    }
-  }
-
-  // Load the address of the string into EDI.
-  // In case of start index we have to add the address to existing value in EDI.
-  if (zero_based || (!zero_based && rl_start.is_const && start_value == 0)) {
-    OpRegRegImm(kOpAdd, rs_rDI, rs_rDX, value_offset);
-  } else {
-    OpRegImm(kOpLsl, rs_rDI, 1);
-    OpRegReg(kOpAdd, rs_rDI, rs_rDX);
-    OpRegImm(kOpAdd, rs_rDI, value_offset);
-  }
-
-  // EDI now contains the start of the string to be searched.
-  // We are all prepared to do the search for the character.
-  NewLIR0(kX86RepneScasw);
-
-  // Did we find a match?
-  LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
-
-  // yes, we matched.  Compute the index of the result.
-  OpRegReg(kOpSub, rs_tmp, rs_rCX);
-  NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_tmp.GetReg(), -1);
-
-  LIR *all_done = NewLIR1(kX86Jmp8, 0);
-
-  // Failed to match; return -1.
-  LIR *not_found = NewLIR0(kPseudoTargetLabel);
-  length_compare->target = not_found;
-  failed_branch->target = not_found;
-  LoadConstantNoClobber(rl_return.reg, -1);
-
-  // And join up at the end.
-  all_done->target = NewLIR0(kPseudoTargetLabel);
-
-  if (!cu_->target64) {
-    NewLIR1(kX86Pop32R, rs_rDI.GetReg());
-    cfi_.AdjustCFAOffset(-4);
-    if (!CoreSpillMaskContains(rs_rDI.GetReg())) {
-      cfi_.Restore(DwarfCoreReg(cu_->target64, rs_rDI.GetReg()));
-    }
-  }
-
-  // Out of line code returns here.
-  if (slowpath_branch != nullptr) {
-    LIR *return_point = NewLIR0(kPseudoTargetLabel);
-    AddIntrinsicSlowPath(info, slowpath_branch, return_point);
-    ClobberCallerSave();  // We must clobber everything because slow path will return here
-  }
-
-  StoreValue(rl_dest, rl_return);
-  return true;
-}
-
-void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
-  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
-    case kMirOpReserveVectorRegisters:
-      ReserveVectorRegisters(mir);
-      break;
-    case kMirOpReturnVectorRegisters:
-      ReturnVectorRegisters(mir);
-      break;
-    case kMirOpConstVector:
-      GenConst128(mir);
-      break;
-    case kMirOpMoveVector:
-      GenMoveVector(mir);
-      break;
-    case kMirOpPackedMultiply:
-      GenMultiplyVector(mir);
-      break;
-    case kMirOpPackedAddition:
-      GenAddVector(mir);
-      break;
-    case kMirOpPackedSubtract:
-      GenSubtractVector(mir);
-      break;
-    case kMirOpPackedShiftLeft:
-      GenShiftLeftVector(mir);
-      break;
-    case kMirOpPackedSignedShiftRight:
-      GenSignedShiftRightVector(mir);
-      break;
-    case kMirOpPackedUnsignedShiftRight:
-      GenUnsignedShiftRightVector(mir);
-      break;
-    case kMirOpPackedAnd:
-      GenAndVector(mir);
-      break;
-    case kMirOpPackedOr:
-      GenOrVector(mir);
-      break;
-    case kMirOpPackedXor:
-      GenXorVector(mir);
-      break;
-    case kMirOpPackedAddReduce:
-      GenAddReduceVector(mir);
-      break;
-    case kMirOpPackedReduce:
-      GenReduceVector(mir);
-      break;
-    case kMirOpPackedSet:
-      GenSetVector(mir);
-      break;
-    case kMirOpMemBarrier:
-      GenMemBarrier(static_cast<MemBarrierKind>(mir->dalvikInsn.vA));
-      break;
-    case kMirOpPackedArrayGet:
-      GenPackedArrayGet(bb, mir);
-      break;
-    case kMirOpPackedArrayPut:
-      GenPackedArrayPut(bb, mir);
-      break;
-    default:
-      break;
-  }
-}
-
-void X86Mir2Lir::ReserveVectorRegisters(MIR* mir) {
-  for (uint32_t i = mir->dalvikInsn.vA; i <= mir->dalvikInsn.vB; i++) {
-    RegStorage xp_reg = RegStorage::Solo128(i);
-    RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
-    Clobber(xp_reg);
-
-    for (RegisterInfo *info = xp_reg_info->GetAliasChain();
-                       info != nullptr;
-                       info = info->GetAliasChain()) {
-      ArenaVector<RegisterInfo*>* regs =
-          info->GetReg().IsSingle() ? &reg_pool_->sp_regs_ : &reg_pool_->dp_regs_;
-      auto it = std::find(regs->begin(), regs->end(), info);
-      DCHECK(it != regs->end());
-      regs->erase(it);
-    }
-  }
-}
-
-void X86Mir2Lir::ReturnVectorRegisters(MIR* mir) {
-  for (uint32_t i = mir->dalvikInsn.vA; i <= mir->dalvikInsn.vB; i++) {
-    RegStorage xp_reg = RegStorage::Solo128(i);
-    RegisterInfo *xp_reg_info = GetRegInfo(xp_reg);
-
-    for (RegisterInfo *info = xp_reg_info->GetAliasChain();
-                       info != nullptr;
-                       info = info->GetAliasChain()) {
-      if (info->GetReg().IsSingle()) {
-        reg_pool_->sp_regs_.push_back(info);
-      } else {
-        reg_pool_->dp_regs_.push_back(info);
-      }
-    }
-  }
-}
-
-void X86Mir2Lir::GenConst128(MIR* mir) {
-  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest);
-
-  uint32_t *args = mir->dalvikInsn.arg;
-  int reg = rs_dest.GetReg();
-  // Check for all 0 case.
-  if (args[0] == 0 && args[1] == 0 && args[2] == 0 && args[3] == 0) {
-    NewLIR2(kX86XorpsRR, reg, reg);
-    return;
-  }
-
-  // Append the mov const vector to reg opcode.
-  AppendOpcodeWithConst(kX86MovdqaRM, reg, mir);
-}
-
-void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) {
-  // To deal with correct memory ordering, reverse order of constants.
-  int32_t constants[4];
-  constants[3] = mir->dalvikInsn.arg[0];
-  constants[2] = mir->dalvikInsn.arg[1];
-  constants[1] = mir->dalvikInsn.arg[2];
-  constants[0] = mir->dalvikInsn.arg[3];
-
-  // Search if there is already a constant in pool with this value.
-  LIR *data_target = ScanVectorLiteral(constants);
-  if (data_target == nullptr) {
-    data_target = AddVectorLiteral(constants);
-  }
-
-  // Load the proper value from the literal area.
-  // We don't know the proper offset for the value, so pick one that will force
-  // 4 byte offset.  We will fix this up in the assembler later to have the
-  // right value.
-  LIR* load;
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-  if (cu_->target64) {
-    load = NewLIR3(opcode, reg, kRIPReg, kDummy32BitOffset);
-  } else {
-    // Get the PC to a register and get the anchor.
-    LIR* anchor;
-    RegStorage r_pc = GetPcAndAnchor(&anchor);
-
-    load = NewLIR3(opcode, reg, r_pc.GetReg(), kDummy32BitOffset);
-    load->operands[4] = WrapPointer(anchor);
-    if (IsTemp(r_pc)) {
-      FreeTemp(r_pc);
-    }
-  }
-  load->flags.fixup = kFixupLoad;
-  load->target = data_target;
-}
-
-void X86Mir2Lir::GenMoveVector(MIR* mir) {
-  // We only support 128 bit registers.
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest);
-  RegStorage rs_src = RegStorage::Solo128(mir->dalvikInsn.vB);
-  NewLIR2(kX86MovdqaRR, rs_dest.GetReg(), rs_src.GetReg());
-}
-
-void X86Mir2Lir::GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2) {
-  /*
-   * Emulate the behavior of a kSignedByte by separating out the 16 values in the two XMM
-   * and multiplying 8 at a time before recombining back into one XMM register.
-   *
-   *   let xmm1, xmm2 be real srcs (keep low bits of 16bit lanes)
-   *       xmm3 is tmp             (operate on high bits of 16bit lanes)
-   *
-   *    xmm3 = xmm1
-   *    xmm1 = xmm1 .* xmm2
-   *    xmm1 = xmm1 & 0x00ff00ff00ff00ff00ff00ff00ff00ff  // xmm1 now has low bits
-   *    xmm3 = xmm3 .>> 8
-   *    xmm2 = xmm2 & 0xff00ff00ff00ff00ff00ff00ff00ff00
-   *    xmm2 = xmm2 .* xmm3                               // xmm2 now has high bits
-   *    xmm1 = xmm1 | xmm2                                // combine results
-   */
-
-  // Copy xmm1.
-  RegStorage rs_src1_high_tmp = Get128BitRegister(AllocTempDouble());
-  RegStorage rs_dest_high_tmp = Get128BitRegister(AllocTempDouble());
-  NewLIR2(kX86MovdqaRR, rs_src1_high_tmp.GetReg(), rs_src2.GetReg());
-  NewLIR2(kX86MovdqaRR, rs_dest_high_tmp.GetReg(), rs_dest_src1.GetReg());
-
-  // Multiply low bits.
-  // x7 *= x3
-  NewLIR2(kX86PmullwRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
-
-  // xmm1 now has low bits.
-  AndMaskVectorRegister(rs_dest_src1, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF);
-
-  // Prepare high bits for multiplication.
-  NewLIR2(kX86PsrlwRI, rs_src1_high_tmp.GetReg(), 0x8);
-  AndMaskVectorRegister(rs_dest_high_tmp,  0xFF00FF00, 0xFF00FF00, 0xFF00FF00, 0xFF00FF00);
-
-  // Multiply high bits and xmm2 now has high bits.
-  NewLIR2(kX86PmullwRR, rs_src1_high_tmp.GetReg(), rs_dest_high_tmp.GetReg());
-
-  // Combine back into dest XMM register.
-  NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src1_high_tmp.GetReg());
-}
-
-void X86Mir2Lir::GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2) {
-  /*
-   * We need to emulate the packed long multiply.
-   * For kMirOpPackedMultiply xmm1, xmm0:
-   * - xmm1 is src/dest
-   * - xmm0 is src
-   * - Get xmm2 and xmm3 as temp
-   * - Idea is to multiply the lower 32 of each operand with the higher 32 of the other.
-   * - Then add the two results.
-   * - Move it to the upper 32 of the destination
-   * - Then multiply the lower 32-bits of the operands and add the result to the destination.
-   *
-   * (op     dest   src )
-   * movdqa  %xmm2, %xmm1
-   * movdqa  %xmm3, %xmm0
-   * psrlq   %xmm3, $0x20
-   * pmuludq %xmm3, %xmm2
-   * psrlq   %xmm1, $0x20
-   * pmuludq %xmm1, %xmm0
-   * paddq   %xmm1, %xmm3
-   * psllq   %xmm1, $0x20
-   * pmuludq %xmm2, %xmm0
-   * paddq   %xmm1, %xmm2
-   *
-   * When both the operands are the same, then we need to calculate the lower-32 * higher-32
-   * calculation only once. Thus we don't need the xmm3 temp above. That sequence becomes:
-   *
-   * (op     dest   src )
-   * movdqa  %xmm2, %xmm1
-   * psrlq   %xmm1, $0x20
-   * pmuludq %xmm1, %xmm0
-   * paddq   %xmm1, %xmm1
-   * psllq   %xmm1, $0x20
-   * pmuludq %xmm2, %xmm0
-   * paddq   %xmm1, %xmm2
-   *
-   */
-
-  bool both_operands_same = (rs_dest_src1.GetReg() == rs_src2.GetReg());
-
-  RegStorage rs_tmp_vector_1;
-  RegStorage rs_tmp_vector_2;
-  rs_tmp_vector_1 = Get128BitRegister(AllocTempDouble());
-  NewLIR2(kX86MovdqaRR, rs_tmp_vector_1.GetReg(), rs_dest_src1.GetReg());
-
-  if (both_operands_same == false) {
-    rs_tmp_vector_2 = Get128BitRegister(AllocTempDouble());
-    NewLIR2(kX86MovdqaRR, rs_tmp_vector_2.GetReg(), rs_src2.GetReg());
-    NewLIR2(kX86PsrlqRI, rs_tmp_vector_2.GetReg(), 0x20);
-    NewLIR2(kX86PmuludqRR, rs_tmp_vector_2.GetReg(), rs_tmp_vector_1.GetReg());
-  }
-
-  NewLIR2(kX86PsrlqRI, rs_dest_src1.GetReg(), 0x20);
-  NewLIR2(kX86PmuludqRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
-
-  if (both_operands_same == false) {
-    NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_2.GetReg());
-  } else {
-    NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_dest_src1.GetReg());
-  }
-
-  NewLIR2(kX86PsllqRI, rs_dest_src1.GetReg(), 0x20);
-  NewLIR2(kX86PmuludqRR, rs_tmp_vector_1.GetReg(), rs_src2.GetReg());
-  NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_1.GetReg());
-}
-
-void X86Mir2Lir::GenMultiplyVector(MIR* mir) {
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest_src1);
-  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
-  int opcode = 0;
-  switch (opsize) {
-    case k32:
-      opcode = kX86PmulldRR;
-      break;
-    case kSignedHalf:
-      opcode = kX86PmullwRR;
-      break;
-    case kSingle:
-      opcode = kX86MulpsRR;
-      break;
-    case kDouble:
-      opcode = kX86MulpdRR;
-      break;
-    case kSignedByte:
-      // HW doesn't support 16x16 byte multiplication so emulate it.
-      GenMultiplyVectorSignedByte(rs_dest_src1, rs_src2);
-      return;
-    case k64:
-      GenMultiplyVectorLong(rs_dest_src1, rs_src2);
-      return;
-    default:
-      LOG(FATAL) << "Unsupported vector multiply " << opsize;
-      break;
-  }
-  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
-}
-
-void X86Mir2Lir::GenAddVector(MIR* mir) {
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest_src1);
-  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
-  int opcode = 0;
-  switch (opsize) {
-    case k32:
-      opcode = kX86PadddRR;
-      break;
-    case k64:
-      opcode = kX86PaddqRR;
-      break;
-    case kSignedHalf:
-    case kUnsignedHalf:
-      opcode = kX86PaddwRR;
-      break;
-    case kUnsignedByte:
-    case kSignedByte:
-      opcode = kX86PaddbRR;
-      break;
-    case kSingle:
-      opcode = kX86AddpsRR;
-      break;
-    case kDouble:
-      opcode = kX86AddpdRR;
-      break;
-    default:
-      LOG(FATAL) << "Unsupported vector addition " << opsize;
-      break;
-  }
-  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
-}
-
-void X86Mir2Lir::GenSubtractVector(MIR* mir) {
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest_src1);
-  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
-  int opcode = 0;
-  switch (opsize) {
-    case k32:
-      opcode = kX86PsubdRR;
-      break;
-    case k64:
-      opcode = kX86PsubqRR;
-      break;
-    case kSignedHalf:
-    case kUnsignedHalf:
-      opcode = kX86PsubwRR;
-      break;
-    case kUnsignedByte:
-    case kSignedByte:
-      opcode = kX86PsubbRR;
-      break;
-    case kSingle:
-      opcode = kX86SubpsRR;
-      break;
-    case kDouble:
-      opcode = kX86SubpdRR;
-      break;
-    default:
-      LOG(FATAL) << "Unsupported vector subtraction " << opsize;
-      break;
-  }
-  NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
-}
-
-void X86Mir2Lir::GenShiftByteVector(MIR* mir) {
-  // Destination does not need clobbered because it has already been as part
-  // of the general packed shift handler (caller of this method).
-  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
-
-  int opcode = 0;
-  switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
-    case kMirOpPackedShiftLeft:
-      opcode = kX86PsllwRI;
-      break;
-    case kMirOpPackedSignedShiftRight:
-    case kMirOpPackedUnsignedShiftRight:
-      // TODO Add support for emulated byte shifts.
-    default:
-      LOG(FATAL) << "Unsupported shift operation on byte vector " << opcode;
-      break;
-  }
-
-  // Clear xmm register and return if shift more than byte length.
-  int imm = mir->dalvikInsn.vB;
-  if (imm >= 8) {
-    NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_dest_src1.GetReg());
-    return;
-  }
-
-  // Shift lower values.
-  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
-
-  /*
-   * The above shift will shift the whole word, but that means
-   * both the bytes will shift as well. To emulate a byte level
-   * shift, we can just throw away the lower (8 - N) bits of the
-   * upper byte, and we are done.
-   */
-  uint8_t byte_mask = 0xFF << imm;
-  uint32_t int_mask = byte_mask;
-  int_mask = int_mask << 8 | byte_mask;
-  int_mask = int_mask << 8 | byte_mask;
-  int_mask = int_mask << 8 | byte_mask;
-
-  // And the destination with the mask
-  AndMaskVectorRegister(rs_dest_src1, int_mask, int_mask, int_mask, int_mask);
-}
-
-void X86Mir2Lir::GenShiftLeftVector(MIR* mir) {
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest_src1);
-  int imm = mir->dalvikInsn.vB;
-  int opcode = 0;
-  switch (opsize) {
-    case k32:
-      opcode = kX86PslldRI;
-      break;
-    case k64:
-      opcode = kX86PsllqRI;
-      break;
-    case kSignedHalf:
-    case kUnsignedHalf:
-      opcode = kX86PsllwRI;
-      break;
-    case kSignedByte:
-    case kUnsignedByte:
-      GenShiftByteVector(mir);
-      return;
-    default:
-      LOG(FATAL) << "Unsupported vector shift left " << opsize;
-      break;
-  }
-  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
-}
-
-void X86Mir2Lir::GenSignedShiftRightVector(MIR* mir) {
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest_src1);
-  int imm = mir->dalvikInsn.vB;
-  int opcode = 0;
-  switch (opsize) {
-    case k32:
-      opcode = kX86PsradRI;
-      break;
-    case kSignedHalf:
-    case kUnsignedHalf:
-      opcode = kX86PsrawRI;
-      break;
-    case kSignedByte:
-    case kUnsignedByte:
-      GenShiftByteVector(mir);
-      return;
-    case k64:
-      // TODO Implement emulated shift algorithm.
-    default:
-      LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
-      UNREACHABLE();
-  }
-  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
-}
-
-void X86Mir2Lir::GenUnsignedShiftRightVector(MIR* mir) {
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest_src1);
-  int imm = mir->dalvikInsn.vB;
-  int opcode = 0;
-  switch (opsize) {
-    case k32:
-      opcode = kX86PsrldRI;
-      break;
-    case k64:
-      opcode = kX86PsrlqRI;
-      break;
-    case kSignedHalf:
-    case kUnsignedHalf:
-      opcode = kX86PsrlwRI;
-      break;
-    case kSignedByte:
-    case kUnsignedByte:
-      GenShiftByteVector(mir);
-      return;
-    default:
-      LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
-      break;
-  }
-  NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
-}
-
-void X86Mir2Lir::GenAndVector(MIR* mir) {
-  // We only support 128 bit registers.
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest_src1);
-  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
-  NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
-}
-
-void X86Mir2Lir::GenOrVector(MIR* mir) {
-  // We only support 128 bit registers.
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest_src1);
-  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
-  NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
-}
-
-void X86Mir2Lir::GenXorVector(MIR* mir) {
-  // We only support 128 bit registers.
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest_src1);
-  RegStorage rs_src2 = RegStorage::Solo128(mir->dalvikInsn.vB);
-  NewLIR2(kX86PxorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
-}
-
-void X86Mir2Lir::AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4) {
-  MaskVectorRegister(kX86PandRM, rs_src1, m1, m2, m3, m4);
-}
-
-void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m0, uint32_t m1, uint32_t m2, uint32_t m3) {
-  // Create temporary MIR as container for 128-bit binary mask.
-  MIR const_mir;
-  MIR* const_mirp = &const_mir;
-  const_mirp->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpConstVector);
-  const_mirp->dalvikInsn.arg[0] = m0;
-  const_mirp->dalvikInsn.arg[1] = m1;
-  const_mirp->dalvikInsn.arg[2] = m2;
-  const_mirp->dalvikInsn.arg[3] = m3;
-
-  // Mask vector with const from literal pool.
-  AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
-}
-
-void X86Mir2Lir::GenAddReduceVector(MIR* mir) {
-  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-  RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
-  bool is_wide = opsize == k64 || opsize == kDouble;
-
-  // Get the location of the virtual register. Since this bytecode is overloaded
-  // for different types (and sizes), we need different logic for each path.
-  // The design of bytecode uses same VR for source and destination.
-  RegLocation rl_src, rl_dest, rl_result;
-  if (is_wide) {
-    rl_src = mir_graph_->GetSrcWide(mir, 0);
-    rl_dest = mir_graph_->GetDestWide(mir);
-  } else {
-    rl_src = mir_graph_->GetSrc(mir, 0);
-    rl_dest = mir_graph_->GetDest(mir);
-  }
-
-  // We need a temp for byte and short values
-  RegStorage temp;
-
-  // There is a different path depending on type and size.
-  if (opsize == kSingle) {
-    // Handle float case.
-    // TODO Add support for fast math (not value safe) and do horizontal add in that case.
-
-    rl_src = LoadValue(rl_src, kFPReg);
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-
-    // Since we are doing an add-reduce, we move the reg holding the VR
-    // into the result so we include it in result.
-    OpRegCopy(rl_result.reg, rl_src.reg);
-    NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
-
-    // Since FP must keep order of operation for value safety, we shift to low
-    // 32-bits and add to result.
-    for (int i = 0; i < 3; i++) {
-      NewLIR3(kX86ShufpsRRI, vector_src.GetReg(), vector_src.GetReg(), 0x39);
-      NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
-    }
-
-    StoreValue(rl_dest, rl_result);
-  } else if (opsize == kDouble) {
-    // Handle double case.
-    rl_src = LoadValueWide(rl_src, kFPReg);
-    rl_result = EvalLocWide(rl_dest, kFPReg, true);
-    LOG(FATAL) << "Unsupported vector add reduce for double.";
-  } else if (opsize == k64) {
-    /*
-     * Handle long case:
-     * 1) Reduce the vector register to lower half (with addition).
-     * 1-1) Get an xmm temp and fill it with vector register.
-     * 1-2) Shift the xmm temp by 8-bytes.
-     * 1-3) Add the xmm temp to vector register that is being reduced.
-     * 2) Allocate temp GP / GP pair.
-     * 2-1) In 64-bit case, use movq to move result to a 64-bit GP.
-     * 2-2) In 32-bit case, use movd twice to move to 32-bit GP pair.
-     * 3) Finish the add reduction by doing what add-long/2addr does,
-     * but instead of having a VR as one of the sources, we have our temp GP.
-     */
-    RegStorage rs_tmp_vector = Get128BitRegister(AllocTempDouble());
-    NewLIR2(kX86MovdqaRR, rs_tmp_vector.GetReg(), vector_src.GetReg());
-    NewLIR2(kX86PsrldqRI, rs_tmp_vector.GetReg(), 8);
-    NewLIR2(kX86PaddqRR, vector_src.GetReg(), rs_tmp_vector.GetReg());
-    FreeTemp(rs_tmp_vector);
-
-    // We would like to be able to reuse the add-long implementation, so set up a fake
-    // register location to pass it.
-    RegLocation temp_loc = mir_graph_->GetBadLoc();
-    temp_loc.core = 1;
-    temp_loc.wide = 1;
-    temp_loc.location = kLocPhysReg;
-    temp_loc.reg = AllocTempWide();
-
-    if (cu_->target64) {
-      DCHECK(!temp_loc.reg.IsPair());
-      NewLIR2(kX86MovqrxRR, temp_loc.reg.GetReg(), vector_src.GetReg());
-    } else {
-      NewLIR2(kX86MovdrxRR, temp_loc.reg.GetLowReg(), vector_src.GetReg());
-      NewLIR2(kX86PsrlqRI, vector_src.GetReg(), 0x20);
-      NewLIR2(kX86MovdrxRR, temp_loc.reg.GetHighReg(), vector_src.GetReg());
-    }
-
-    GenArithOpLong(Instruction::ADD_LONG_2ADDR, rl_dest, temp_loc, temp_loc, mir->optimization_flags);
-  } else if (opsize == kSignedByte || opsize == kUnsignedByte) {
-    RegStorage rs_tmp = Get128BitRegister(AllocTempDouble());
-    NewLIR2(kX86PxorRR, rs_tmp.GetReg(), rs_tmp.GetReg());
-    NewLIR2(kX86PsadbwRR, vector_src.GetReg(), rs_tmp.GetReg());
-    NewLIR3(kX86PshufdRRI, rs_tmp.GetReg(), vector_src.GetReg(), 0x4e);
-    NewLIR2(kX86PaddbRR, vector_src.GetReg(), rs_tmp.GetReg());
-    // Move to a GPR
-    temp = AllocTemp();
-    NewLIR2(kX86MovdrxRR, temp.GetReg(), vector_src.GetReg());
-  } else {
-    // Handle and the int and short cases together
-
-    // Initialize as if we were handling int case. Below we update
-    // the opcode if handling byte or short.
-    int vec_bytes = (mir->dalvikInsn.vC & 0xFFFF) / 8;
-    int vec_unit_size;
-    int horizontal_add_opcode;
-    int extract_opcode;
-
-    if (opsize == kSignedHalf || opsize == kUnsignedHalf) {
-      extract_opcode = kX86PextrwRRI;
-      horizontal_add_opcode = kX86PhaddwRR;
-      vec_unit_size = 2;
-    } else if (opsize == k32) {
-      vec_unit_size = 4;
-      horizontal_add_opcode = kX86PhadddRR;
-      extract_opcode = kX86PextrdRRI;
-    } else {
-      LOG(FATAL) << "Unsupported vector add reduce " << opsize;
-      return;
-    }
-
-    int elems = vec_bytes / vec_unit_size;
-
-    while (elems > 1) {
-      NewLIR2(horizontal_add_opcode, vector_src.GetReg(), vector_src.GetReg());
-      elems >>= 1;
-    }
-
-    // Handle this as arithmetic unary case.
-    ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-
-    // Extract to a GP register because this is integral typed.
-    temp = AllocTemp();
-    NewLIR3(extract_opcode, temp.GetReg(), vector_src.GetReg(), 0);
-  }
-
-  if (opsize != k64 && opsize != kSingle && opsize != kDouble) {
-    // The logic below looks very similar to the handling of ADD_INT_2ADDR
-    // except the rhs is not a VR but a physical register allocated above.
-    // No load of source VR is done because it assumes that rl_result will
-    // share physical register / memory location.
-    rl_result = UpdateLocTyped(rl_dest);
-    if (rl_result.location == kLocPhysReg) {
-      // Ensure res is in a core reg.
-      rl_result = EvalLoc(rl_dest, kCoreReg, true);
-      OpRegReg(kOpAdd, rl_result.reg, temp);
-      StoreFinalValue(rl_dest, rl_result);
-    } else {
-      // Do the addition directly to memory.
-      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-      OpMemReg(kOpAdd, rl_result, temp.GetReg());
-    }
-  }
-}
-
-void X86Mir2Lir::GenReduceVector(MIR* mir) {
-  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-  RegLocation rl_dest = mir_graph_->GetDest(mir);
-  RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
-  RegLocation rl_result;
-  bool is_wide = false;
-
-  // There is a different path depending on type and size.
-  if (opsize == kSingle) {
-    // Handle float case.
-    // TODO Add support for fast math (not value safe) and do horizontal add in that case.
-
-    int extract_index = mir->dalvikInsn.arg[0];
-
-    rl_result = EvalLoc(rl_dest, kFPReg, true);
-    NewLIR2(kX86PxorRR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-
-    if (LIKELY(extract_index != 0)) {
-      // We know the index of element which we want to extract. We want to extract it and
-      // keep values in vector register correct for future use. So the way we act is:
-      // 1. Generate shuffle mask that allows to swap zeroth and required elements;
-      // 2. Shuffle vector register with this mask;
-      // 3. Extract zeroth element where required value lies;
-      // 4. Shuffle with same mask again to restore original values in vector register.
-      // The mask is generated from equivalence mask 0b11100100 swapping 0th and extracted
-      // element indices.
-      int shuffle[4] = {0b00, 0b01, 0b10, 0b11};
-      shuffle[0] = extract_index;
-      shuffle[extract_index] = 0;
-      int mask = 0;
-      for (int i = 0; i < 4; i++) {
-        mask |= (shuffle[i] << (2 * i));
-      }
-      NewLIR3(kX86ShufpsRRI, vector_src.GetReg(), vector_src.GetReg(), mask);
-      NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
-      NewLIR3(kX86ShufpsRRI, vector_src.GetReg(), vector_src.GetReg(), mask);
-    } else {
-      // We need to extract zeroth element and don't need any complex stuff to do it.
-      NewLIR2(kX86AddssRR, rl_result.reg.GetReg(), vector_src.GetReg());
-    }
-
-    StoreFinalValue(rl_dest, rl_result);
-  } else if (opsize == kDouble) {
-    // TODO Handle double case.
-    LOG(FATAL) << "Unsupported add reduce for double.";
-  } else if (opsize == k64) {
-    /*
-     * Handle long case:
-     * 1) Reduce the vector register to lower half (with addition).
-     * 1-1) Get an xmm temp and fill it with vector register.
-     * 1-2) Shift the xmm temp by 8-bytes.
-     * 1-3) Add the xmm temp to vector register that is being reduced.
-     * 2) Evaluate destination to a GP / GP pair.
-     * 2-1) In 64-bit case, use movq to move result to a 64-bit GP.
-     * 2-2) In 32-bit case, use movd twice to move to 32-bit GP pair.
-     * 3) Store the result to the final destination.
-     */
-    NewLIR2(kX86PsrldqRI, vector_src.GetReg(), 8);
-    rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    if (cu_->target64) {
-      DCHECK(!rl_result.reg.IsPair());
-      NewLIR2(kX86MovqrxRR, rl_result.reg.GetReg(), vector_src.GetReg());
-    } else {
-      NewLIR2(kX86MovdrxRR, rl_result.reg.GetLowReg(), vector_src.GetReg());
-      NewLIR2(kX86PsrlqRI, vector_src.GetReg(), 0x20);
-      NewLIR2(kX86MovdrxRR, rl_result.reg.GetHighReg(), vector_src.GetReg());
-    }
-
-    StoreValueWide(rl_dest, rl_result);
-  } else {
-    int extract_index = mir->dalvikInsn.arg[0];
-    int extr_opcode = 0;
-    rl_result = UpdateLocTyped(rl_dest);
-
-    // Handle the rest of integral types now.
-    switch (opsize) {
-      case k32:
-        extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrdRRI : kX86PextrdMRI;
-        break;
-      case kSignedHalf:
-      case kUnsignedHalf:
-        extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrwRRI : kX86PextrwMRI;
-        break;
-      case kSignedByte:
-        extr_opcode = (rl_result.location == kLocPhysReg) ? kX86PextrbRRI : kX86PextrbMRI;
-        break;
-      default:
-        LOG(FATAL) << "Unsupported vector reduce " << opsize;
-        UNREACHABLE();
-    }
-
-    if (rl_result.location == kLocPhysReg) {
-      NewLIR3(extr_opcode, rl_result.reg.GetReg(), vector_src.GetReg(), extract_index);
-      StoreFinalValue(rl_dest, rl_result);
-    } else {
-      int displacement = SRegOffset(rl_result.s_reg_low);
-      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-      LIR *l = NewLIR4(extr_opcode, rs_rX86_SP_32.GetReg(), displacement, vector_src.GetReg(),
-                       extract_index);
-      AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */);
-    }
-  }
-}
-
-void X86Mir2Lir::LoadVectorRegister(RegStorage rs_dest, RegStorage rs_src,
-                                    OpSize opsize, int op_mov) {
-  if (!cu_->target64 && opsize == k64) {
-    // Logic assumes that longs are loaded in GP register pairs.
-    NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rs_src.GetLowReg());
-    RegStorage r_tmp = AllocTempDouble();
-    NewLIR2(kX86MovdxrRR, r_tmp.GetReg(), rs_src.GetHighReg());
-    NewLIR2(kX86PunpckldqRR, rs_dest.GetReg(), r_tmp.GetReg());
-    FreeTemp(r_tmp);
-  } else {
-    NewLIR2(op_mov, rs_dest.GetReg(), rs_src.GetReg());
-  }
-}
-
-void X86Mir2Lir::GenSetVector(MIR* mir) {
-  DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
-  OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-  RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
-  Clobber(rs_dest);
-  int op_shuffle = 0, op_shuffle_high = 0, op_mov = kX86MovdxrRR;
-  RegisterClass reg_type = kCoreReg;
-  bool is_wide = false;
-
-  switch (opsize) {
-    case k32:
-      op_shuffle = kX86PshufdRRI;
-      break;
-    case kSingle:
-      op_shuffle = kX86PshufdRRI;
-      op_mov = kX86MovdqaRR;
-      reg_type = kFPReg;
-      break;
-    case k64:
-      op_shuffle = kX86PunpcklqdqRR;
-      op_mov = kX86MovqxrRR;
-      is_wide = true;
-      break;
-    case kSignedByte:
-    case kUnsignedByte:
-      // We will have the source loaded up in a
-      // double-word before we use this shuffle
-      op_shuffle = kX86PshufdRRI;
-      break;
-    case kSignedHalf:
-    case kUnsignedHalf:
-      // Handles low quadword.
-      op_shuffle = kX86PshuflwRRI;
-      // Handles upper quadword.
-      op_shuffle_high = kX86PshufdRRI;
-      break;
-    default:
-      LOG(FATAL) << "Unsupported vector set " << opsize;
-      break;
-  }
-
-  // Load the value from the VR into a physical register.
-  RegLocation rl_src;
-  if (!is_wide) {
-    rl_src = mir_graph_->GetSrc(mir, 0);
-    rl_src = LoadValue(rl_src, reg_type);
-  } else {
-    rl_src = mir_graph_->GetSrcWide(mir, 0);
-    rl_src = LoadValueWide(rl_src, reg_type);
-  }
-  RegStorage reg_to_shuffle = rl_src.reg;
-
-  // Load the value into the XMM register.
-  LoadVectorRegister(rs_dest, reg_to_shuffle, opsize, op_mov);
-
-  if (opsize == kSignedByte || opsize == kUnsignedByte) {
-    // In the byte case, first duplicate it to be a word
-    // Then duplicate it to be a double-word
-    NewLIR2(kX86PunpcklbwRR, rs_dest.GetReg(), rs_dest.GetReg());
-    NewLIR2(kX86PunpcklwdRR, rs_dest.GetReg(), rs_dest.GetReg());
-  }
-
-  // Now shuffle the value across the destination.
-  if (op_shuffle == kX86PunpcklqdqRR) {
-    NewLIR2(op_shuffle, rs_dest.GetReg(), rs_dest.GetReg());
-  } else {
-    NewLIR3(op_shuffle, rs_dest.GetReg(), rs_dest.GetReg(), 0);
-  }
-
-  // And then repeat as needed.
-  if (op_shuffle_high != 0) {
-    NewLIR3(op_shuffle_high, rs_dest.GetReg(), rs_dest.GetReg(), 0);
-  }
-}
-
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
-}
-
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
-}
-
-LIR* X86Mir2Lir::ScanVectorLiteral(int32_t* constants) {
-  for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
-    if (constants[0] == p->operands[0] && constants[1] == p->operands[1] &&
-        constants[2] == p->operands[2] && constants[3] == p->operands[3]) {
-      return p;
-    }
-  }
-  return nullptr;
-}
-
-LIR* X86Mir2Lir::AddVectorLiteral(int32_t* constants) {
-  LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
-  new_value->operands[0] = constants[0];
-  new_value->operands[1] = constants[1];
-  new_value->operands[2] = constants[2];
-  new_value->operands[3] = constants[3];
-  new_value->next = const_vectors_;
-  if (const_vectors_ == nullptr) {
-    estimated_native_code_size_ += 12;  // Maximum needed to align to 16 byte boundary.
-  }
-  estimated_native_code_size_ += 16;  // Space for one vector.
-  const_vectors_ = new_value;
-  return new_value;
-}
-
-// ------------ ABI support: mapping of args to physical registers -------------
-RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(ShortyArg arg) {
-  const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5};
-  const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
-  const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3,
-                                                             kFArg4, kFArg5, kFArg6, kFArg7};
-  const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
-
-  if (arg.IsFP()) {
-    if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
-      return m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++],
-                             arg.IsWide() ? kWide : kNotWide);
-    }
-  } else {
-    if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
-      return m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++],
-                             arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
-    }
-  }
-  return RegStorage::InvalidReg();
-}
-
-RegStorage X86Mir2Lir::InToRegStorageX86Mapper::GetNextReg(ShortyArg arg) {
-  const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3};
-  const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
-  const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3};
-  const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
-
-  RegStorage result = RegStorage::InvalidReg();
-  if (arg.IsFP()) {
-    if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
-      return m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++],
-                             arg.IsWide() ? kWide : kNotWide);
-    }
-  } else if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
-    result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++],
-                             arg.IsRef() ? kRef : kNotWide);
-    if (arg.IsWide()) {
-      // This must be a long, as double is handled above.
-      // Ensure that we don't split a long across the last register and the stack.
-      if (cur_core_reg_ == coreArgMappingToPhysicalRegSize) {
-        // Leave the last core register unused and force the whole long to the stack.
-        cur_core_reg_++;
-        result = RegStorage::InvalidReg();
-      } else if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
-        result = RegStorage::MakeRegPair(
-            result, m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], kNotWide));
-      }
-    }
-  }
-  return result;
-}
-
-// ---------End of ABI support: mapping of args to physical registers -------------
-
-bool X86Mir2Lir::GenInlinedCharAt(CallInfo* info) {
-  // Location of reference to data array
-  int value_offset = mirror::String::ValueOffset().Int32Value();
-  // Location of count
-  int count_offset = mirror::String::CountOffset().Int32Value();
-
-  RegLocation rl_obj = info->args[0];
-  RegLocation rl_idx = info->args[1];
-  rl_obj = LoadValue(rl_obj, kRefReg);
-  rl_idx = LoadValue(rl_idx, kCoreReg);
-  RegStorage reg_max;
-  GenNullCheck(rl_obj.reg, info->opt_flags);
-  bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
-  LIR* range_check_branch = nullptr;
-  if (range_check) {
-    // On x86, we can compare to memory directly
-    // Set up a launch pad to allow retry in case of bounds violation */
-    if (rl_idx.is_const) {
-      LIR* comparison;
-      range_check_branch = OpCmpMemImmBranch(
-          kCondLs, RegStorage::InvalidReg(), rl_obj.reg, count_offset,
-          mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr, &comparison);
-      MarkPossibleNullPointerExceptionAfter(0, comparison);
-    } else {
-      OpRegMem(kOpCmp, rl_idx.reg, rl_obj.reg, count_offset);
-      MarkPossibleNullPointerException(0);
-      range_check_branch = OpCondBranch(kCondUge, nullptr);
-    }
-  }
-  RegLocation rl_dest = InlineTarget(info);
-  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  LoadBaseIndexedDisp(rl_obj.reg, rl_idx.reg, 1, value_offset, rl_result.reg, kUnsignedHalf);
-  FreeTemp(rl_idx.reg);
-  FreeTemp(rl_obj.reg);
-  StoreValue(rl_dest, rl_result);
-  if (range_check) {
-    DCHECK(range_check_branch != nullptr);
-    info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
-    AddIntrinsicSlowPath(info, range_check_branch);
-  }
-  return true;
-}
-
-bool X86Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
-  RegLocation rl_dest = InlineTarget(info);
-
-  // Early exit if the result is unused.
-  if (rl_dest.orig_sreg < 0) {
-    return true;
-  }
-
-  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
-
-  if (cu_->target64) {
-    OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<8>());
-  } else {
-    OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<4>());
-  }
-
-  StoreValue(rl_dest, rl_result);
-  return true;
-}
-
-/**
- * Lock temp registers for explicit usage. Registers will be freed in destructor.
- */
-X86Mir2Lir::ExplicitTempRegisterLock::ExplicitTempRegisterLock(X86Mir2Lir* mir_to_lir,
-                                                               int n_regs, ...) :
-    temp_regs_(n_regs),
-    mir_to_lir_(mir_to_lir) {
-  va_list regs;
-  va_start(regs, n_regs);
-  for (int i = 0; i < n_regs; i++) {
-    RegStorage reg = *(va_arg(regs, RegStorage*));
-    RegisterInfo* info = mir_to_lir_->GetRegInfo(reg);
-
-    // Make sure we don't have promoted register here.
-    DCHECK(info->IsTemp());
-
-    temp_regs_.push_back(reg);
-    mir_to_lir_->FlushReg(reg);
-
-    if (reg.IsPair()) {
-      RegStorage partner = info->Partner();
-      temp_regs_.push_back(partner);
-      mir_to_lir_->FlushReg(partner);
-    }
-
-    mir_to_lir_->Clobber(reg);
-    mir_to_lir_->LockTemp(reg);
-  }
-
-  va_end(regs);
-}
-
-/*
- * Free all locked registers.
- */
-X86Mir2Lir::ExplicitTempRegisterLock::~ExplicitTempRegisterLock() {
-  // Free all locked temps.
-  for (auto it : temp_regs_) {
-    mir_to_lir_->FreeTemp(it);
-  }
-}
-
-int X86Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
-  if (count < 4) {
-    // It does not make sense to use this utility if we have no chance to use
-    // 128-bit move.
-    return count;
-  }
-  GenDalvikArgsFlushPromoted(info, first);
-
-  // The rest can be copied together
-  int current_src_offset = SRegOffset(info->args[first].s_reg_low);
-  int current_dest_offset = StackVisitor::GetOutVROffset(first, cu_->instruction_set);
-
-  // Only davik regs are accessed in this loop; no next_call_insn() calls.
-  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-  while (count > 0) {
-    // This is based on the knowledge that the stack itself is 16-byte aligned.
-    bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
-    bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
-    size_t bytes_to_move;
-
-    /*
-     * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
-     * a 128-bit move because we won't get the chance to try to aligned. If there are more than
-     * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
-     * We do this because we could potentially do a smaller move to align.
-     */
-    if (count == 4 || (count > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
-      // Moving 128-bits via xmm register.
-      bytes_to_move = sizeof(uint32_t) * 4;
-
-      // Allocate a free xmm temp. Since we are working through the calling sequence,
-      // we expect to have an xmm temporary available. AllocTempDouble will abort if
-      // there are no free registers.
-      RegStorage temp = AllocTempDouble();
-
-      LIR* ld1 = nullptr;
-      LIR* ld2 = nullptr;
-      LIR* st1 = nullptr;
-      LIR* st2 = nullptr;
-
-      /*
-       * The logic is similar for both loads and stores. If we have 16-byte alignment,
-       * do an aligned move. If we have 8-byte alignment, then do the move in two
-       * parts. This approach prevents possible cache line splits. Finally, fall back
-       * to doing an unaligned move. In most cases we likely won't split the cache
-       * line but we cannot prove it and thus take a conservative approach.
-       */
-      bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
-      bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
-
-      if (src_is_16b_aligned) {
-        ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovA128FP);
-      } else if (src_is_8b_aligned) {
-        ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovLo128FP);
-        ld2 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset + (bytes_to_move >> 1),
-                          kMovHi128FP);
-      } else {
-        ld1 = OpMovRegMem(temp, TargetPtrReg(kSp), current_src_offset, kMovU128FP);
-      }
-
-      if (dest_is_16b_aligned) {
-        st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovA128FP);
-      } else if (dest_is_8b_aligned) {
-        st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovLo128FP);
-        st2 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset + (bytes_to_move >> 1),
-                          temp, kMovHi128FP);
-      } else {
-        st1 = OpMovMemReg(TargetPtrReg(kSp), current_dest_offset, temp, kMovU128FP);
-      }
-
-      // TODO If we could keep track of aliasing information for memory accesses that are wider
-      // than 64-bit, we wouldn't need to set up a barrier.
-      if (ld1 != nullptr) {
-        if (ld2 != nullptr) {
-          // For 64-bit load we can actually set up the aliasing information.
-          AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
-          AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true,
-                                  true);
-        } else {
-          // Set barrier for 128-bit load.
-          ld1->u.m.def_mask = &kEncodeAll;
-        }
-      }
-      if (st1 != nullptr) {
-        if (st2 != nullptr) {
-          // For 64-bit store we can actually set up the aliasing information.
-          AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
-          AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false,
-                                  true);
-        } else {
-          // Set barrier for 128-bit store.
-          st1->u.m.def_mask = &kEncodeAll;
-        }
-      }
-
-      // Free the temporary used for the data movement.
-      FreeTemp(temp);
-    } else {
-      // Moving 32-bits via general purpose register.
-      bytes_to_move = sizeof(uint32_t);
-
-      // Instead of allocating a new temp, simply reuse one of the registers being used
-      // for argument passing.
-      RegStorage temp = TargetReg(kArg3, kNotWide);
-
-      // Now load the argument VR and store to the outs.
-      Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
-      Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
-    }
-
-    current_src_offset += bytes_to_move;
-    current_dest_offset += bytes_to_move;
-    count -= (bytes_to_move >> 2);
-  }
-  DCHECK_EQ(count, 0);
-  return count;
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
deleted file mode 100644
index 61354df..0000000
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ /dev/null
@@ -1,1167 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_x86.h"
-
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/dataflow_iterator-inl.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex/reg_storage_eq.h"
-#include "driver/compiler_driver.h"
-#include "x86_lir.h"
-
-namespace art {
-
-/* This file contains codegen for the X86 ISA */
-
-LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
-  int opcode;
-  /* must be both DOUBLE or both not DOUBLE */
-  DCHECK(r_dest.IsFloat() || r_src.IsFloat());
-  DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
-  if (r_dest.IsDouble()) {
-    opcode = kX86MovsdRR;
-  } else {
-    if (r_dest.IsSingle()) {
-      if (r_src.IsSingle()) {
-        opcode = kX86MovssRR;
-      } else {  // Fpr <- Gpr
-        opcode = kX86MovdxrRR;
-      }
-    } else {  // Gpr <- Fpr
-      DCHECK(r_src.IsSingle()) << "Raw: 0x" << std::hex << r_src.GetRawBits();
-      opcode = kX86MovdrxRR;
-    }
-  }
-  DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
-  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
-  if (r_dest == r_src) {
-    res->flags.is_nop = true;
-  }
-  return res;
-}
-
-bool X86Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
-  return true;
-}
-
-bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
-  return value == 0;
-}
-
-bool X86Mir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) {
-  return true;
-}
-
-bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) {
-  return value == 0;
-}
-
-/*
- * Load a immediate using a shortcut if possible; otherwise
- * grab from the per-translation literal pool.  If target is
- * a high register, build constant into a low register and copy.
- *
- * No additional register clobbering operation performed. Use this version when
- * 1) r_dest is freshly returned from AllocTemp or
- * 2) The codegen is under fixed register usage
- */
-LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
-  RegStorage r_dest_save = r_dest;
-  if (r_dest.IsFloat()) {
-    if (value == 0) {
-      return NewLIR2(kX86XorpsRR, r_dest.GetReg(), r_dest.GetReg());
-    }
-    r_dest = AllocTemp();
-  }
-
-  LIR *res;
-  if (value == 0) {
-    res = NewLIR2(kX86Xor32RR, r_dest.GetReg(), r_dest.GetReg());
-  } else {
-    // Note, there is no byte immediate form of a 32 bit immediate move.
-    // 64-bit immediate is not supported by LIR structure
-    res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value);
-  }
-
-  if (r_dest_save.IsFloat()) {
-    NewLIR2(kX86MovdxrRR, r_dest_save.GetReg(), r_dest.GetReg());
-    FreeTemp(r_dest);
-  }
-
-  return res;
-}
-
-LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
-  LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
-  res->target = target;
-  return res;
-}
-
-LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
-  LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
-                        X86ConditionEncoding(cc));
-  branch->target = target;
-  return branch;
-}
-
-LIR* X86Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
-  X86OpCode opcode = kX86Bkpt;
-  switch (op) {
-    case kOpNeg: opcode = r_dest_src.Is64Bit() ? kX86Neg64R : kX86Neg32R; break;
-    case kOpNot: opcode = r_dest_src.Is64Bit() ? kX86Not64R : kX86Not32R; break;
-    case kOpRev: opcode = r_dest_src.Is64Bit() ? kX86Bswap64R : kX86Bswap32R; break;
-    case kOpBlx: opcode = kX86CallR; break;
-    default:
-      LOG(FATAL) << "Bad case in OpReg " << op;
-  }
-  return NewLIR1(opcode, r_dest_src.GetReg());
-}
-
-LIR* X86Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
-  X86OpCode opcode = kX86Bkpt;
-  bool byte_imm = IS_SIMM8(value);
-  DCHECK(!r_dest_src1.IsFloat());
-  if (r_dest_src1.Is64Bit()) {
-    switch (op) {
-      case kOpAdd: opcode = byte_imm ? kX86Add64RI8 : kX86Add64RI; break;
-      case kOpSub: opcode = byte_imm ? kX86Sub64RI8 : kX86Sub64RI; break;
-      case kOpLsl: opcode = kX86Sal64RI; break;
-      case kOpLsr: opcode = kX86Shr64RI; break;
-      case kOpAsr: opcode = kX86Sar64RI; break;
-      case kOpCmp: opcode = byte_imm ? kX86Cmp64RI8 : kX86Cmp64RI; break;
-      default:
-        LOG(FATAL) << "Bad case in OpRegImm (64-bit) " << op;
-    }
-  } else {
-    switch (op) {
-      case kOpLsl: opcode = kX86Sal32RI; break;
-      case kOpLsr: opcode = kX86Shr32RI; break;
-      case kOpAsr: opcode = kX86Sar32RI; break;
-      case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
-      case kOpOr:  opcode = byte_imm ? kX86Or32RI8  : kX86Or32RI;  break;
-      case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
-      // case kOpSbb: opcode = kX86Sbb32RI; break;
-      case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
-      case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
-      case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
-      case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
-      case kOpMov:
-        /*
-         * Moving the constant zero into register can be specialized as an xor of the register.
-         * However, that sets eflags while the move does not. For that reason here, always do
-         * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
-         */
-        opcode = kX86Mov32RI;
-        break;
-      case kOpMul:
-        opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
-        return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value);
-      case kOp2Byte:
-        opcode = kX86Mov32RI;
-        value = static_cast<int8_t>(value);
-        break;
-      case kOp2Short:
-        opcode = kX86Mov32RI;
-        value = static_cast<int16_t>(value);
-        break;
-      case kOp2Char:
-        opcode = kX86Mov32RI;
-        value = static_cast<uint16_t>(value);
-        break;
-      case kOpNeg:
-        opcode = kX86Mov32RI;
-        value = -value;
-        break;
-      default:
-        LOG(FATAL) << "Bad case in OpRegImm " << op;
-    }
-  }
-  return NewLIR2(opcode, r_dest_src1.GetReg(), value);
-}
-
-LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
-    bool is64Bit = r_dest_src1.Is64Bit();
-    X86OpCode opcode = kX86Nop;
-    bool src2_must_be_cx = false;
-    switch (op) {
-        // X86 unary opcodes
-      case kOpMvn:
-        OpRegCopy(r_dest_src1, r_src2);
-        return OpReg(kOpNot, r_dest_src1);
-      case kOpNeg:
-        OpRegCopy(r_dest_src1, r_src2);
-        return OpReg(kOpNeg, r_dest_src1);
-      case kOpRev:
-        OpRegCopy(r_dest_src1, r_src2);
-        return OpReg(kOpRev, r_dest_src1);
-      case kOpRevsh:
-        OpRegCopy(r_dest_src1, r_src2);
-        OpReg(kOpRev, r_dest_src1);
-        return OpRegImm(kOpAsr, r_dest_src1, 16);
-        // X86 binary opcodes
-      case kOpSub: opcode = is64Bit ? kX86Sub64RR : kX86Sub32RR; break;
-      case kOpSbc: opcode = is64Bit ? kX86Sbb64RR : kX86Sbb32RR; break;
-      case kOpLsl: opcode = is64Bit ? kX86Sal64RC : kX86Sal32RC; src2_must_be_cx = true; break;
-      case kOpLsr: opcode = is64Bit ? kX86Shr64RC : kX86Shr32RC; src2_must_be_cx = true; break;
-      case kOpAsr: opcode = is64Bit ? kX86Sar64RC : kX86Sar32RC; src2_must_be_cx = true; break;
-      case kOpMov: opcode = is64Bit ? kX86Mov64RR : kX86Mov32RR; break;
-      case kOpCmp: opcode = is64Bit ? kX86Cmp64RR : kX86Cmp32RR; break;
-      case kOpAdd: opcode = is64Bit ? kX86Add64RR : kX86Add32RR; break;
-      case kOpAdc: opcode = is64Bit ? kX86Adc64RR : kX86Adc32RR; break;
-      case kOpAnd: opcode = is64Bit ? kX86And64RR : kX86And32RR; break;
-      case kOpOr:  opcode = is64Bit ? kX86Or64RR : kX86Or32RR; break;
-      case kOpXor: opcode = is64Bit ? kX86Xor64RR : kX86Xor32RR; break;
-      case kOp2Byte:
-        // TODO: there are several instances of this check.  A utility function perhaps?
-        // TODO: Similar to Arm's reg < 8 check.  Perhaps add attribute checks to RegStorage?
-        // Use shifts instead of a byte operand if the source can't be byte accessed.
-        if (r_src2.GetRegNum() >= rs_rX86_SP_32.GetRegNum()) {
-          NewLIR2(is64Bit ? kX86Mov64RR : kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg());
-          NewLIR2(is64Bit ? kX86Sal64RI : kX86Sal32RI, r_dest_src1.GetReg(), is64Bit ? 56 : 24);
-          return NewLIR2(is64Bit ? kX86Sar64RI : kX86Sar32RI, r_dest_src1.GetReg(),
-                         is64Bit ? 56 : 24);
-        } else {
-          opcode = is64Bit ? kX86Bkpt : kX86Movsx8RR;
-        }
-        break;
-      case kOp2Short: opcode = is64Bit ? kX86Bkpt : kX86Movsx16RR; break;
-      case kOp2Char: opcode = is64Bit ? kX86Bkpt : kX86Movzx16RR; break;
-      case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RR; break;
-      default:
-        LOG(FATAL) << "Bad case in OpRegReg " << op;
-        break;
-    }
-    CHECK(!src2_must_be_cx || r_src2.GetReg() == rs_rCX.GetReg());
-    return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
-}
-
-LIR* X86Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
-  DCHECK(!r_base.IsFloat());
-  X86OpCode opcode = kX86Nop;
-  int dest = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
-  switch (move_type) {
-    case kMov8GP:
-      CHECK(!r_dest.IsFloat());
-      opcode = kX86Mov8RM;
-      break;
-    case kMov16GP:
-      CHECK(!r_dest.IsFloat());
-      opcode = kX86Mov16RM;
-      break;
-    case kMov32GP:
-      CHECK(!r_dest.IsFloat());
-      opcode = kX86Mov32RM;
-      break;
-    case kMov32FP:
-      CHECK(r_dest.IsFloat());
-      opcode = kX86MovssRM;
-      break;
-    case kMov64FP:
-      CHECK(r_dest.IsFloat());
-      opcode = kX86MovsdRM;
-      break;
-    case kMovU128FP:
-      CHECK(r_dest.IsFloat());
-      opcode = kX86MovupsRM;
-      break;
-    case kMovA128FP:
-      CHECK(r_dest.IsFloat());
-      opcode = kX86MovapsRM;
-      break;
-    case kMovLo128FP:
-      CHECK(r_dest.IsFloat());
-      opcode = kX86MovlpsRM;
-      break;
-    case kMovHi128FP:
-      CHECK(r_dest.IsFloat());
-      opcode = kX86MovhpsRM;
-      break;
-    case kMov64GP:
-    case kMovLo64FP:
-    case kMovHi64FP:
-    default:
-      LOG(FATAL) << "Bad case in OpMovRegMem";
-      break;
-  }
-
-  return NewLIR3(opcode, dest, r_base.GetReg(), offset);
-}
-
-LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
-  DCHECK(!r_base.IsFloat());
-  int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
-
-  X86OpCode opcode = kX86Nop;
-  switch (move_type) {
-    case kMov8GP:
-      CHECK(!r_src.IsFloat());
-      opcode = kX86Mov8MR;
-      break;
-    case kMov16GP:
-      CHECK(!r_src.IsFloat());
-      opcode = kX86Mov16MR;
-      break;
-    case kMov32GP:
-      CHECK(!r_src.IsFloat());
-      opcode = kX86Mov32MR;
-      break;
-    case kMov32FP:
-      CHECK(r_src.IsFloat());
-      opcode = kX86MovssMR;
-      break;
-    case kMov64FP:
-      CHECK(r_src.IsFloat());
-      opcode = kX86MovsdMR;
-      break;
-    case kMovU128FP:
-      CHECK(r_src.IsFloat());
-      opcode = kX86MovupsMR;
-      break;
-    case kMovA128FP:
-      CHECK(r_src.IsFloat());
-      opcode = kX86MovapsMR;
-      break;
-    case kMovLo128FP:
-      CHECK(r_src.IsFloat());
-      opcode = kX86MovlpsMR;
-      break;
-    case kMovHi128FP:
-      CHECK(r_src.IsFloat());
-      opcode = kX86MovhpsMR;
-      break;
-    case kMov64GP:
-    case kMovLo64FP:
-    case kMovHi64FP:
-    default:
-      LOG(FATAL) << "Bad case in OpMovMemReg";
-      break;
-  }
-
-  return NewLIR3(opcode, r_base.GetReg(), offset, src);
-}
-
-LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
-  // The only conditional reg to reg operation supported is Cmov
-  DCHECK_EQ(op, kOpCmov);
-  DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
-  return NewLIR3(r_dest.Is64Bit() ? kX86Cmov64RRC : kX86Cmov32RRC, r_dest.GetReg(),
-                 r_src.GetReg(), X86ConditionEncoding(cc));
-}
-
-LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
-  bool is64Bit = r_dest.Is64Bit();
-  X86OpCode opcode = kX86Nop;
-  switch (op) {
-      // X86 binary opcodes
-    case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break;
-    case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break;
-    case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break;
-    case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break;
-    case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break;
-    case kOpOr:  opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break;
-    case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break;
-    case kOp2Byte: opcode = kX86Movsx8RM; break;
-    case kOp2Short: opcode = kX86Movsx16RM; break;
-    case kOp2Char: opcode = kX86Movzx16RM; break;
-    case kOpMul:
-    default:
-      LOG(FATAL) << "Bad case in OpRegMem " << op;
-      break;
-  }
-  LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
-  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
-    AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
-  }
-  return l;
-}
-
-LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) {
-  DCHECK_NE(rl_dest.location, kLocPhysReg);
-  int displacement = SRegOffset(rl_dest.s_reg_low);
-  bool is64Bit = rl_dest.wide != 0;
-  X86OpCode opcode = kX86Nop;
-  switch (op) {
-    case kOpSub: opcode = is64Bit ? kX86Sub64MR : kX86Sub32MR; break;
-    case kOpMov: opcode = is64Bit ? kX86Mov64MR : kX86Mov32MR; break;
-    case kOpCmp: opcode = is64Bit ? kX86Cmp64MR : kX86Cmp32MR; break;
-    case kOpAdd: opcode = is64Bit ? kX86Add64MR : kX86Add32MR; break;
-    case kOpAnd: opcode = is64Bit ? kX86And64MR : kX86And32MR; break;
-    case kOpOr:  opcode = is64Bit ? kX86Or64MR : kX86Or32MR; break;
-    case kOpXor: opcode = is64Bit ? kX86Xor64MR : kX86Xor32MR; break;
-    case kOpLsl: opcode = is64Bit ? kX86Sal64MC : kX86Sal32MC; break;
-    case kOpLsr: opcode = is64Bit ? kX86Shr64MC : kX86Shr32MC; break;
-    case kOpAsr: opcode = is64Bit ? kX86Sar64MC : kX86Sar32MC; break;
-    default:
-      LOG(FATAL) << "Bad case in OpMemReg " << op;
-      break;
-  }
-  LIR *l = NewLIR3(opcode, rs_rX86_SP_32.GetReg(), displacement, r_value);
-  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
-    AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
-  }
-  return l;
-}
-
-LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) {
-  DCHECK_NE(rl_value.location, kLocPhysReg);
-  bool is64Bit = r_dest.Is64Bit();
-  int displacement = SRegOffset(rl_value.s_reg_low);
-  X86OpCode opcode = kX86Nop;
-  switch (op) {
-    case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break;
-    case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break;
-    case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break;
-    case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break;
-    case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break;
-    case kOpOr:  opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break;
-    case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break;
-    case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RM; break;
-    default:
-      LOG(FATAL) << "Bad case in OpRegMem " << op;
-      break;
-  }
-  LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP_32.GetReg(), displacement);
-  if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-    AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
-  }
-  return l;
-}
-
-LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
-                             RegStorage r_src2) {
-  bool is64Bit = r_dest.Is64Bit();
-  if (r_dest != r_src1 && r_dest != r_src2) {
-    if (op == kOpAdd) {  // lea special case, except can't encode rbp as base
-      if (r_src1 == r_src2) {
-        OpRegCopy(r_dest, r_src1);
-        return OpRegImm(kOpLsl, r_dest, 1);
-      } else if (r_src1 != rs_rBP) {
-        return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
-                       r_src1.GetReg() /* base */, r_src2.GetReg() /* index */,
-                       0 /* scale */, 0 /* disp */);
-      } else {
-        return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
-                       r_src2.GetReg() /* base */, r_src1.GetReg() /* index */,
-                       0 /* scale */, 0 /* disp */);
-      }
-    } else {
-      OpRegCopy(r_dest, r_src1);
-      return OpRegReg(op, r_dest, r_src2);
-    }
-  } else if (r_dest == r_src1) {
-    return OpRegReg(op, r_dest, r_src2);
-  } else {  // r_dest == r_src2
-    switch (op) {
-      case kOpSub:  // non-commutative
-        OpReg(kOpNeg, r_dest);
-        op = kOpAdd;
-        break;
-      case kOpSbc:
-      case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
-        RegStorage t_reg = AllocTemp();
-        OpRegCopy(t_reg, r_src1);
-        OpRegReg(op, t_reg, r_src2);
-        LIR* res = OpRegCopyNoInsert(r_dest, t_reg);
-        AppendLIR(res);
-        FreeTemp(t_reg);
-        return res;
-      }
-      case kOpAdd:  // commutative
-      case kOpOr:
-      case kOpAdc:
-      case kOpAnd:
-      case kOpXor:
-      case kOpMul:
-        break;
-      default:
-        LOG(FATAL) << "Bad case in OpRegRegReg " << op;
-    }
-    return OpRegReg(op, r_dest, r_src1);
-  }
-}
-
-LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int value) {
-  if (op == kOpMul && !cu_->target64) {
-    X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
-    return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value);
-  } else if (op == kOpAnd && !cu_->target64) {
-    if (value == 0xFF && r_src.Low4()) {
-      return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg());
-    } else if (value == 0xFFFF) {
-      return NewLIR2(kX86Movzx16RR, r_dest.GetReg(), r_src.GetReg());
-    }
-  }
-  if (r_dest != r_src) {
-    if ((false) && op == kOpLsl && value >= 0 && value <= 3) {  // lea shift special case
-      // TODO: fix bug in LEA encoding when disp == 0
-      return NewLIR5(kX86Lea32RA, r_dest.GetReg(),  r5sib_no_base /* base */,
-                     r_src.GetReg() /* index */, value /* scale */, 0 /* disp */);
-    } else if (op == kOpAdd) {  // lea add special case
-      return NewLIR5(r_dest.Is64Bit() ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
-                     r_src.GetReg() /* base */, rs_rX86_SP_32.GetReg()/*r4sib_no_index*/ /* index */,
-                     0 /* scale */, value /* disp */);
-    }
-    OpRegCopy(r_dest, r_src);
-  }
-  return OpRegImm(op, r_dest, value);
-}
-
-LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
-  DCHECK_EQ(kX86, cu_->instruction_set);
-  X86OpCode opcode = kX86Bkpt;
-  switch (op) {
-    case kOpBlx: opcode = kX86CallT;  break;
-    case kOpBx: opcode = kX86JmpT;  break;
-    default:
-      LOG(FATAL) << "Bad opcode: " << op;
-      break;
-  }
-  return NewLIR1(opcode, thread_offset.Int32Value());
-}
-
-LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
-  DCHECK_EQ(kX86_64, cu_->instruction_set);
-  X86OpCode opcode = kX86Bkpt;
-  switch (op) {
-    case kOpBlx: opcode = kX86CallT;  break;
-    case kOpBx: opcode = kX86JmpT;  break;
-    default:
-      LOG(FATAL) << "Bad opcode: " << op;
-      break;
-  }
-  return NewLIR1(opcode, thread_offset.Int32Value());
-}
-
-LIR* X86Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
-  X86OpCode opcode = kX86Bkpt;
-  switch (op) {
-    case kOpBlx: opcode = kX86CallM;  break;
-    default:
-      LOG(FATAL) << "Bad opcode: " << op;
-      break;
-  }
-  return NewLIR2(opcode, r_base.GetReg(), disp);
-}
-
-LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
-    int32_t val_lo = Low32Bits(value);
-    int32_t val_hi = High32Bits(value);
-    int32_t low_reg_val = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
-    LIR *res;
-    bool is_fp = r_dest.IsFloat();
-    // TODO: clean this up once we fully recognize 64-bit storage containers.
-    if (is_fp) {
-      DCHECK(r_dest.IsDouble());
-      if (value == 0) {
-        return NewLIR2(kX86XorpdRR, low_reg_val, low_reg_val);
-      } else if (pc_rel_base_reg_.Valid() || cu_->target64) {
-        // We will load the value from the literal area.
-        LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-        if (data_target == nullptr) {
-          data_target = AddWideData(&literal_list_, val_lo, val_hi);
-        }
-
-        // Load the proper value from the literal area.
-        // We don't know the proper offset for the value, so pick one that
-        // will force 4 byte offset.  We will fix this up in the assembler
-        // later to have the right value.
-        ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
-        if (cu_->target64) {
-          res = NewLIR3(kX86MovsdRM, low_reg_val, kRIPReg, 256 /* bogus */);
-        } else {
-          // Get the PC to a register and get the anchor.
-          LIR* anchor;
-          RegStorage r_pc = GetPcAndAnchor(&anchor);
-
-          res = LoadBaseDisp(r_pc, kDummy32BitOffset, RegStorage::FloatSolo64(low_reg_val),
-                             kDouble, kNotVolatile);
-          res->operands[4] = WrapPointer(anchor);
-          if (IsTemp(r_pc)) {
-            FreeTemp(r_pc);
-          }
-        }
-        res->target = data_target;
-        res->flags.fixup = kFixupLoad;
-      } else {
-        if (r_dest.IsPair()) {
-          if (val_lo == 0) {
-            res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
-          } else {
-            res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
-          }
-          if (val_hi != 0) {
-            RegStorage r_dest_hi = AllocTempDouble();
-            LoadConstantNoClobber(r_dest_hi, val_hi);
-            NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
-            FreeTemp(r_dest_hi);
-          }
-        } else {
-          RegStorage r_temp = AllocTypedTempWide(false, kCoreReg);
-          res = LoadConstantWide(r_temp, value);
-          OpRegCopyWide(r_dest, r_temp);
-          FreeTemp(r_temp);
-        }
-      }
-    } else {
-      if (r_dest.IsPair()) {
-        res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
-        LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
-      } else {
-        if (value == 0) {
-          res = NewLIR2(kX86Xor64RR, r_dest.GetReg(), r_dest.GetReg());
-        } else if (value >= INT_MIN && value <= INT_MAX) {
-          res = NewLIR2(kX86Mov64RI32, r_dest.GetReg(), val_lo);
-        } else {
-          res = NewLIR3(kX86Mov64RI64, r_dest.GetReg(), val_hi, val_lo);
-        }
-      }
-    }
-    return res;
-}
-
-LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                     int displacement, RegStorage r_dest, OpSize size) {
-  LIR *load = nullptr;
-  LIR *load2 = nullptr;
-  bool is_array = r_index.Valid();
-  bool pair = r_dest.IsPair();
-  bool is64bit = ((size == k64) || (size == kDouble));
-  X86OpCode opcode = kX86Nop;
-  switch (size) {
-    case k64:
-    case kDouble:
-      if (r_dest.IsFloat()) {
-        opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
-      } else if (!pair) {
-        opcode = is_array ? kX86Mov64RA  : kX86Mov64RM;
-      } else {
-        opcode = is_array ? kX86Mov32RA  : kX86Mov32RM;
-      }
-      // TODO: double store is to unaligned address
-      DCHECK_ALIGNED(displacement, 4);
-      break;
-    case kWord:
-      if (cu_->target64) {
-        opcode = is_array ? kX86Mov64RA  : kX86Mov64RM;
-        CHECK_EQ(is_array, false);
-        CHECK_EQ(r_dest.IsFloat(), false);
-        break;
-      }
-      FALLTHROUGH_INTENDED;  // else fall-through to k32 case
-    case k32:
-    case kSingle:
-    case kReference:  // TODO: update for reference decompression on 64-bit targets.
-      opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
-      if (r_dest.IsFloat()) {
-        opcode = is_array ? kX86MovssRA : kX86MovssRM;
-        DCHECK(r_dest.IsFloat());
-      }
-      DCHECK_ALIGNED(displacement, 4);
-      break;
-    case kUnsignedHalf:
-      opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
-      DCHECK_ALIGNED(displacement, 2);
-      break;
-    case kSignedHalf:
-      opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
-      DCHECK_ALIGNED(displacement, 2);
-      break;
-    case kUnsignedByte:
-      opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
-      break;
-    case kSignedByte:
-      opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
-      break;
-    default:
-      LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
-  }
-
-  if (!is_array) {
-    if (!pair) {
-      load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
-    } else {
-      DCHECK(!r_dest.IsFloat());  // Make sure we're not still using a pair here.
-      if (r_base == r_dest.GetLow()) {
-        load = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
-                        displacement + HIWORD_OFFSET);
-        load2 = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
-      } else {
-        load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
-        load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
-                        displacement + HIWORD_OFFSET);
-      }
-    }
-    if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-      DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
-      AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
-                              true /* is_load */, is64bit);
-      if (pair) {
-        AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
-                                true /* is_load */, is64bit);
-      }
-    }
-  } else {
-    if (!pair) {
-      load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
-                     displacement + LOWORD_OFFSET);
-    } else {
-      DCHECK(!r_dest.IsFloat());  // Make sure we're not still using a pair here.
-      if (r_base == r_dest.GetLow()) {
-        if (r_dest.GetHigh() == r_index) {
-          // We can't use either register for the first load.
-          RegStorage temp = AllocTemp();
-          load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
-                          displacement + HIWORD_OFFSET);
-          load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
-                         displacement + LOWORD_OFFSET);
-          OpRegCopy(r_dest.GetHigh(), temp);
-          FreeTemp(temp);
-        } else {
-          load = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
-                          displacement + HIWORD_OFFSET);
-          load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
-                         displacement + LOWORD_OFFSET);
-        }
-      } else {
-        if (r_dest.GetLow() == r_index) {
-          // We can't use either register for the first load.
-          RegStorage temp = AllocTemp();
-          load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
-                         displacement + LOWORD_OFFSET);
-          load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
-                          displacement + HIWORD_OFFSET);
-          OpRegCopy(r_dest.GetLow(), temp);
-          FreeTemp(temp);
-        } else {
-          load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
-                         displacement + LOWORD_OFFSET);
-          load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
-                          displacement + HIWORD_OFFSET);
-        }
-      }
-    }
-  }
-
-  // Always return first load generated as this might cause a fault if base is null.
-  return load;
-}
-
-/* Load value from base + scaled index. */
-LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
-                                 int scale, OpSize size) {
-  return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size);
-}
-
-LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                              OpSize size, VolatileKind is_volatile) {
-  // LoadBaseDisp() will emit correct insn for atomic load on x86
-  // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
-
-  LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
-                                  size);
-
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    GenMemBarrier(kLoadAny);  // Only a scheduling barrier.
-  }
-
-  return load;
-}
-
-LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                      int displacement, RegStorage r_src, OpSize size,
-                                      int opt_flags) {
-  LIR *store = nullptr;
-  LIR *store2 = nullptr;
-  bool is_array = r_index.Valid();
-  bool pair = r_src.IsPair();
-  bool is64bit = (size == k64) || (size == kDouble);
-  bool consider_non_temporal = false;
-
-  X86OpCode opcode = kX86Nop;
-  switch (size) {
-    case k64:
-      consider_non_temporal = true;
-      FALLTHROUGH_INTENDED;
-    case kDouble:
-      if (r_src.IsFloat()) {
-        opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
-      } else if (!pair) {
-        opcode = is_array ? kX86Mov64AR  : kX86Mov64MR;
-      } else {
-        opcode = is_array ? kX86Mov32AR  : kX86Mov32MR;
-      }
-      // TODO: double store is to unaligned address
-      DCHECK_ALIGNED(displacement, 4);
-      break;
-    case kWord:
-      if (cu_->target64) {
-        opcode = is_array ? kX86Mov64AR  : kX86Mov64MR;
-        CHECK_EQ(is_array, false);
-        CHECK_EQ(r_src.IsFloat(), false);
-        consider_non_temporal = true;
-        break;
-      }
-      FALLTHROUGH_INTENDED;  // else fall-through to k32 case
-    case k32:
-    case kSingle:
-    case kReference:
-      opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
-      if (r_src.IsFloat()) {
-        opcode = is_array ? kX86MovssAR : kX86MovssMR;
-        DCHECK(r_src.IsSingle());
-      }
-      DCHECK_ALIGNED(displacement, 4);
-      consider_non_temporal = true;
-      break;
-    case kUnsignedHalf:
-    case kSignedHalf:
-      opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
-      DCHECK_ALIGNED(displacement, 2);
-      break;
-    case kUnsignedByte:
-    case kSignedByte:
-      opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
-      break;
-    default:
-      LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody";
-  }
-
-  // Handle non temporal hint here.
-  if (consider_non_temporal && ((opt_flags & MIR_STORE_NON_TEMPORAL) != 0)) {
-    switch (opcode) {
-      // We currently only handle 32/64 bit moves here.
-      case kX86Mov64AR:
-        opcode = kX86Movnti64AR;
-        break;
-      case kX86Mov64MR:
-        opcode = kX86Movnti64MR;
-        break;
-      case kX86Mov32AR:
-        opcode = kX86Movnti32AR;
-        break;
-      case kX86Mov32MR:
-        opcode = kX86Movnti32MR;
-        break;
-      default:
-        // Do nothing here.
-        break;
-    }
-  }
-
-  if (!is_array) {
-    if (!pair) {
-      store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg());
-    } else {
-      DCHECK(!r_src.IsFloat());  // Make sure we're not still using a pair here.
-      store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetLowReg());
-      store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
-    }
-    if (mem_ref_type_ == ResourceMask::kDalvikReg) {
-      DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
-      AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
-                              false /* is_load */, is64bit);
-      if (pair) {
-        AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
-                                false /* is_load */, is64bit);
-      }
-    }
-  } else {
-    if (!pair) {
-      store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
-                      displacement + LOWORD_OFFSET, r_src.GetReg());
-    } else {
-      DCHECK(!r_src.IsFloat());  // Make sure we're not still using a pair here.
-      store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
-                      displacement + LOWORD_OFFSET, r_src.GetLowReg());
-      store2 = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
-                       displacement + HIWORD_OFFSET, r_src.GetHighReg());
-    }
-  }
-  return store;
-}
-
-/* store value base base + scaled index. */
-LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
-                                  int scale, OpSize size) {
-  return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size);
-}
-
-LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
-                               VolatileKind is_volatile) {
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    GenMemBarrier(kAnyStore);  // Only a scheduling barrier.
-  }
-
-  // StoreBaseDisp() will emit correct insn for atomic store on x86
-  // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
-  // x86 only allows registers EAX-EDX to be used as byte registers, if the input src is not
-  // valid, allocate a temp.
-  bool allocated_temp = false;
-  if (size == kUnsignedByte || size == kSignedByte) {
-    if (!cu_->target64 && !r_src.Low4()) {
-      RegStorage r_input = r_src;
-      r_src = AllocateByteRegister();
-      OpRegCopy(r_src, r_input);
-      allocated_temp = true;
-    }
-  }
-
-  LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
-
-  if (UNLIKELY(is_volatile == kVolatile)) {
-    // A volatile load might follow the volatile store so insert a StoreLoad barrier.
-    // This does require a fence, even on x86.
-    GenMemBarrier(kAnyAny);
-  }
-
-  if (allocated_temp) {
-    FreeTemp(r_src);
-  }
-
-  return store;
-}
-
-LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond,
-                                   // Comparison performed directly with memory.
-                                   RegStorage temp_reg ATTRIBUTE_UNUSED,
-                                   RegStorage base_reg,
-                                   int offset,
-                                   int check_value,
-                                   LIR* target,
-                                   LIR** compare) {
-  LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
-      offset, check_value);
-  if (compare != nullptr) {
-    *compare = inst;
-  }
-  LIR* branch = OpCondBranch(cond, target);
-  return branch;
-}
-
-void X86Mir2Lir::AnalyzeMIR(RefCounts* core_counts, MIR* mir, uint32_t weight) {
-  if (cu_->target64) {
-    Mir2Lir::AnalyzeMIR(core_counts, mir, weight);
-    return;
-  }
-
-  int opcode = mir->dalvikInsn.opcode;
-  bool uses_pc_rel_load = false;
-  switch (opcode) {
-    // Instructions referencing doubles.
-    case Instruction::CMPL_DOUBLE:
-    case Instruction::CMPG_DOUBLE:
-    case Instruction::NEG_DOUBLE:
-    case Instruction::ADD_DOUBLE:
-    case Instruction::SUB_DOUBLE:
-    case Instruction::MUL_DOUBLE:
-    case Instruction::DIV_DOUBLE:
-    case Instruction::REM_DOUBLE:
-    case Instruction::ADD_DOUBLE_2ADDR:
-    case Instruction::SUB_DOUBLE_2ADDR:
-    case Instruction::MUL_DOUBLE_2ADDR:
-    case Instruction::DIV_DOUBLE_2ADDR:
-    case Instruction::REM_DOUBLE_2ADDR:
-    case kMirOpFusedCmplDouble:
-    case kMirOpFusedCmpgDouble:
-      uses_pc_rel_load = AnalyzeFPInstruction(opcode, mir);
-      break;
-
-    // Packed switch needs the PC-relative pointer if it's large.
-    case Instruction::PACKED_SWITCH:
-      if (mir_graph_->GetTable(mir, mir->dalvikInsn.vB)[1] > kSmallSwitchThreshold) {
-        uses_pc_rel_load = true;
-      }
-      break;
-
-    case kMirOpConstVector:
-      uses_pc_rel_load = true;
-      break;
-    case kMirOpPackedMultiply:
-    case kMirOpPackedShiftLeft:
-    case kMirOpPackedSignedShiftRight:
-    case kMirOpPackedUnsignedShiftRight:
-      {
-        // Byte emulation requires constants from the literal pool.
-        OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
-        if (opsize == kSignedByte || opsize == kUnsignedByte) {
-          uses_pc_rel_load = true;
-        }
-      }
-      break;
-
-    case Instruction::INVOKE_STATIC:
-    case Instruction::INVOKE_STATIC_RANGE:
-      if (mir_graph_->GetMethodLoweringInfo(mir).IsIntrinsic()) {
-        uses_pc_rel_load = AnalyzeInvokeStaticIntrinsic(mir);
-        break;
-      }
-      FALLTHROUGH_INTENDED;
-    default:
-      Mir2Lir::AnalyzeMIR(core_counts, mir, weight);
-      break;
-  }
-
-  if (uses_pc_rel_load) {
-    DCHECK(pc_rel_temp_ != nullptr);
-    core_counts[SRegToPMap(pc_rel_temp_->s_reg_low)].count += weight;
-  }
-}
-
-bool X86Mir2Lir::AnalyzeFPInstruction(int opcode, MIR* mir) {
-  DCHECK(!cu_->target64);
-  // Look at all the uses, and see if they are double constants.
-  uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
-  int next_sreg = 0;
-  if (attrs & DF_UA) {
-    if (attrs & DF_A_WIDE) {
-      if (AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg))) {
-        return true;
-      }
-      next_sreg += 2;
-    } else {
-      next_sreg++;
-    }
-  }
-  if (attrs & DF_UB) {
-    if (attrs & DF_B_WIDE) {
-      if (AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg))) {
-        return true;
-      }
-      next_sreg += 2;
-    } else {
-      next_sreg++;
-    }
-  }
-  if (attrs & DF_UC) {
-    if (attrs & DF_C_WIDE) {
-      if (AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg))) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-inline bool X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) {
-  // If this is a double literal, we will want it in the literal pool on 32b platforms.
-  DCHECK(!cu_->target64);
-  return use.is_const;
-}
-
-bool X86Mir2Lir::AnalyzeInvokeStaticIntrinsic(MIR* mir) {
-  // 64 bit RIP addressing doesn't need this analysis.
-  DCHECK(!cu_->target64);
-
-  // Retrieve the type of the intrinsic.
-  MethodReference method_ref = mir_graph_->GetMethodLoweringInfo(mir).GetTargetMethod();
-  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
-  DexFileMethodInliner* method_inliner =
-    cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(method_ref.dex_file);
-  InlineMethod method;
-  bool is_intrinsic = method_inliner->IsIntrinsic(method_ref.dex_method_index, &method);
-  DCHECK(is_intrinsic);
-
-  switch (method.opcode) {
-    case kIntrinsicAbsDouble:
-    case kIntrinsicMinMaxDouble:
-      return true;
-    default:
-      return false;
-  }
-}
-
-RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc) {
-  loc = UpdateLoc(loc);
-  if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
-    if (GetRegInfo(loc.reg)->IsTemp()) {
-      Clobber(loc.reg);
-      FreeTemp(loc.reg);
-      loc.reg = RegStorage::InvalidReg();
-      loc.location = kLocDalvikFrame;
-    }
-  }
-  DCHECK(CheckCorePoolSanity());
-  return loc;
-}
-
-RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
-  loc = UpdateLocWide(loc);
-  if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
-    if (GetRegInfo(loc.reg)->IsTemp()) {
-      Clobber(loc.reg);
-      FreeTemp(loc.reg);
-      loc.reg = RegStorage::InvalidReg();
-      loc.location = kLocDalvikFrame;
-    }
-  }
-  DCHECK(CheckCorePoolSanity());
-  return loc;
-}
-
-LIR* X86Mir2Lir::InvokeTrampoline(OpKind op,
-                                  // Call to absolute memory location doesn't
-                                  // need a temporary target register.
-                                  RegStorage r_tgt ATTRIBUTE_UNUSED,
-                                  QuickEntrypointEnum trampoline) {
-  if (cu_->target64) {
-    return OpThreadMem(op, GetThreadOffset<8>(trampoline));
-  } else {
-    return OpThreadMem(op, GetThreadOffset<4>(trampoline));
-  }
-}
-
-void X86Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) {
-  // Start with the default counts.
-  Mir2Lir::CountRefs(core_counts, fp_counts, num_regs);
-
-  if (pc_rel_temp_ != nullptr) {
-    // Now, if the dex cache array base temp is used only once outside any loops (weight = 1),
-    // avoid the promotion, otherwise boost the weight by factor 2 because the full PC-relative
-    // load sequence is 3 instructions long and by promoting the PC base we save 2 instructions
-    // per use.
-    int p_map_idx = SRegToPMap(pc_rel_temp_->s_reg_low);
-    if (core_counts[p_map_idx].count == 1) {
-      core_counts[p_map_idx].count = 0;
-    } else {
-      core_counts[p_map_idx].count *= 2;
-    }
-  }
-}
-
-void X86Mir2Lir::DoPromotion() {
-  if (!cu_->target64) {
-    pc_rel_temp_ = mir_graph_->GetNewCompilerTemp(kCompilerTempBackend, false);
-  }
-
-  Mir2Lir::DoPromotion();
-
-  if (pc_rel_temp_ != nullptr) {
-    // Now, if the dex cache array base temp is promoted, remember the register but
-    // always remove the temp's stack location to avoid unnecessarily bloating the stack.
-    pc_rel_base_reg_ = mir_graph_->reg_location_[pc_rel_temp_->s_reg_low].reg;
-    DCHECK(!pc_rel_base_reg_.Valid() || !pc_rel_base_reg_.IsFloat());
-    mir_graph_->RemoveLastCompilerTemp(kCompilerTempBackend, false, pc_rel_temp_);
-    pc_rel_temp_ = nullptr;
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
deleted file mode 100644
index 8cd6574..0000000
--- a/compiler/dex/quick/x86/x86_lir.h
+++ /dev/null
@@ -1,739 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
-#define ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
-
-#include "dex/reg_location.h"
-#include "dex/reg_storage.h"
-
-namespace art {
-
-/*
- * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64). The ABI
- * has different conventions and we capture those here. Changing something that is callee save and
- * making it caller save places a burden on up-calls to save/restore the callee save register,
- * however, there are few registers that are callee save in the ABI. Changing something that is
- * caller save and making it callee save places a burden on down-calls to save/restore the callee
- * save register. For these reasons we aim to match native conventions for caller and callee save.
- * On x86 only the first 4 registers can be used for byte operations, for this reason they are
- * preferred for temporary scratch registers.
- *
- * General Purpose Register:
- *  Native: x86    | x86-64 / x32 | ART x86                                         | ART x86-64
- *  r0/eax: caller | caller       | caller, Method*, scratch, return value          | caller, scratch, return value
- *  r1/ecx: caller | caller, arg4 | caller, arg1, scratch                           | caller, arg3, scratch
- *  r2/edx: caller | caller, arg3 | caller, arg2, scratch, high half of long return | caller, arg2, scratch
- *  r3/ebx: callEE | callEE       | callER, arg3, scratch                           | callee, promotable
- *  r4/esp: stack pointer
- *  r5/ebp: callee | callee       | callee, promotable                              | callee, promotable
- *  r6/esi: callEE | callER, arg2 | callee, promotable                              | caller, arg1, scratch
- *  r7/edi: callEE | callER, arg1 | callee, promotable                              | caller, Method*, scratch
- *  ---  x86-64/x32 registers
- *  Native: x86-64 / x32      | ART
- *  r8:     caller save, arg5 | caller, arg4, scratch
- *  r9:     caller save, arg6 | caller, arg5, scratch
- *  r10:    caller save       | caller, scratch
- *  r11:    caller save       | caller, scratch
- *  r12:    callee save       | callee, available for register promotion (promotable)
- *  r13:    callee save       | callee, available for register promotion (promotable)
- *  r14:    callee save       | callee, available for register promotion (promotable)
- *  r15:    callee save       | callee, available for register promotion (promotable)
- *
- * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
- * x86-64/x32 gs: holds it.
- *
- * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
- *  Native: x86  | x86-64 / x32 | ART x86                          | ART x86-64
- *  XMM0: caller | caller, arg1 | caller, arg1, float return value | caller, arg1, float return value
- *  XMM1: caller | caller, arg2 | caller, arg2, scratch            | caller, arg2, scratch
- *  XMM2: caller | caller, arg3 | caller, arg3, scratch            | caller, arg3, scratch
- *  XMM3: caller | caller, arg4 | caller, arg4, scratch            | caller, arg4, scratch
- *  XMM4: caller | caller, arg5 | caller, scratch                  | caller, arg5, scratch
- *  XMM5: caller | caller, arg6 | caller, scratch                  | caller, arg6, scratch
- *  XMM6: caller | caller, arg7 | caller, scratch                  | caller, arg7, scratch
- *  XMM7: caller | caller, arg8 | caller, scratch                  | caller, arg8, scratch
- *  ---  x86-64/x32 registers
- *  XMM8 .. 11: caller save available as scratch registers for ART.
- *  XMM12 .. 15: callee save available as promoted registers for ART.
- *  This change (XMM12..15) is for QCG only, for others they are caller save.
- *
- * X87 is a necessary evil outside of ART code for x86:
- *  ST0:  x86 float/double native return value, caller save
- *  ST1 .. ST7: caller save
- *
- *  Stack frame diagram (stack grows down, higher addresses at top):
- *  For a more detailed view of each region see stack.h.
- *
- * +---------------------------+
- * | IN[ins-1]                 |  {Note: resides in caller's frame}
- * |       .                   |
- * | IN[0]                     |
- * | caller's ArtMethod*       |
- * +===========================+  {Note: start of callee's frame}
- * | return address            |  {pushed by call}
- * | spill region              |  {variable sized}
- * +---------------------------+
- * | ...filler 4-bytes...      |  {Note: used as 2nd word of V[locals-1] if long]
- * +---------------------------+
- * | V[locals-1]               |
- * | V[locals-2]               |
- * |      .                    |
- * |      .                    |
- * | V[1]                      |
- * | V[0]                      |
- * +---------------------------+
- * | 0 to 12-bytes padding     |
- * +---------------------------+
- * | compiler temp region      |
- * +---------------------------+
- * | OUT[outs-1]               |
- * | OUT[outs-2]               |
- * |       .                   |
- * | OUT[0]                    |
- * | ArtMethod*                | <<== sp w/ 16-byte alignment
- * +===========================+
- */
-
-enum X86ResourceEncodingPos {
-  kX86GPReg0   = 0,
-  kX86RegSP    = 4,
-  kX86FPReg0   = 16,  // xmm0 .. xmm7/xmm15.
-  kX86FPRegEnd = 32,
-  kX86FPStack  = 33,
-  kX86RegEnd   = kX86FPStack,
-};
-
-// FIXME: for 64-bit, perhaps add an X86_64NativeRegisterPool enum?
-enum X86NativeRegisterPool {
-  r0             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
-  r0q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
-  rAX            = r0,
-  r1             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
-  r1q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
-  rCX            = r1,
-  r2             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
-  r2q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
-  rDX            = r2,
-  r3             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
-  r3q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
-  rBX            = r3,
-  r4sp_32        = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
-  rX86_SP_32     = r4sp_32,
-  r4sp_64        = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
-  rX86_SP_64     = r4sp_64,
-  r5             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
-  r5q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
-  rBP            = r5,
-  r5sib_no_base  = r5,
-  r6             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
-  r6q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
-  rSI            = r6,
-  r7             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
-  r7q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
-  rDI            = r7,
-  r8             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
-  r8q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
-  r9             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
-  r9q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
-  r10            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
-  r10q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
-  r11            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
-  r11q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
-  r12            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
-  r12q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
-  r13            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
-  r13q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
-  r14            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
-  r14q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
-  r15            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
-  r15q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
-  // fake return address register for core spill mask.
-  rRET           = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
-
-  // xmm registers, single precision view.
-  fr0  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
-  fr1  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
-  fr2  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 2,
-  fr3  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 3,
-  fr4  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 4,
-  fr5  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 5,
-  fr6  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 6,
-  fr7  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 7,
-  fr8  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 8,
-  fr9  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 9,
-  fr10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
-  fr11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
-  fr12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
-  fr13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
-  fr14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
-  fr15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
-
-  // xmm registers, double precision aliases.
-  dr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
-  dr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
-  dr2  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
-  dr3  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
-  dr4  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
-  dr5  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
-  dr6  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
-  dr7  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
-  dr8  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
-  dr9  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
-  dr10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
-  dr11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
-  dr12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
-  dr13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
-  dr14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
-  dr15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
-
-  // xmm registers, quad precision aliases
-  xr0  = RegStorage::k128BitSolo | 0,
-  xr1  = RegStorage::k128BitSolo | 1,
-  xr2  = RegStorage::k128BitSolo | 2,
-  xr3  = RegStorage::k128BitSolo | 3,
-  xr4  = RegStorage::k128BitSolo | 4,
-  xr5  = RegStorage::k128BitSolo | 5,
-  xr6  = RegStorage::k128BitSolo | 6,
-  xr7  = RegStorage::k128BitSolo | 7,
-  xr8  = RegStorage::k128BitSolo | 8,
-  xr9  = RegStorage::k128BitSolo | 9,
-  xr10 = RegStorage::k128BitSolo | 10,
-  xr11 = RegStorage::k128BitSolo | 11,
-  xr12 = RegStorage::k128BitSolo | 12,
-  xr13 = RegStorage::k128BitSolo | 13,
-  xr14 = RegStorage::k128BitSolo | 14,
-  xr15 = RegStorage::k128BitSolo | 15,
-
-  // Special value for RIP 64 bit addressing.
-  kRIPReg = 255,
-
-  // TODO: as needed, add 256, 512 and 1024-bit xmm views.
-};
-
-constexpr RegStorage rs_r0(RegStorage::kValid | r0);
-constexpr RegStorage rs_r0q(RegStorage::kValid | r0q);
-constexpr RegStorage rs_rAX = rs_r0;
-constexpr RegStorage rs_r1(RegStorage::kValid | r1);
-constexpr RegStorage rs_r1q(RegStorage::kValid | r1q);
-constexpr RegStorage rs_rCX = rs_r1;
-constexpr RegStorage rs_r2(RegStorage::kValid | r2);
-constexpr RegStorage rs_r2q(RegStorage::kValid | r2q);
-constexpr RegStorage rs_rDX = rs_r2;
-constexpr RegStorage rs_r3(RegStorage::kValid | r3);
-constexpr RegStorage rs_r3q(RegStorage::kValid | r3q);
-constexpr RegStorage rs_rBX = rs_r3;
-constexpr RegStorage rs_rX86_SP_64(RegStorage::kValid | r4sp_64);
-constexpr RegStorage rs_rX86_SP_32(RegStorage::kValid | r4sp_32);
-static_assert(rs_rX86_SP_64.GetRegNum() == rs_rX86_SP_32.GetRegNum(), "Unexpected mismatch");
-constexpr RegStorage rs_r5(RegStorage::kValid | r5);
-constexpr RegStorage rs_r5q(RegStorage::kValid | r5q);
-constexpr RegStorage rs_rBP = rs_r5;
-constexpr RegStorage rs_r6(RegStorage::kValid | r6);
-constexpr RegStorage rs_r6q(RegStorage::kValid | r6q);
-constexpr RegStorage rs_rSI = rs_r6;
-constexpr RegStorage rs_r7(RegStorage::kValid | r7);
-constexpr RegStorage rs_r7q(RegStorage::kValid | r7q);
-constexpr RegStorage rs_rDI = rs_r7;
-constexpr RegStorage rs_rRET(RegStorage::kValid | rRET);
-constexpr RegStorage rs_r8(RegStorage::kValid | r8);
-constexpr RegStorage rs_r8q(RegStorage::kValid | r8q);
-constexpr RegStorage rs_r9(RegStorage::kValid | r9);
-constexpr RegStorage rs_r9q(RegStorage::kValid | r9q);
-constexpr RegStorage rs_r10(RegStorage::kValid | r10);
-constexpr RegStorage rs_r10q(RegStorage::kValid | r10q);
-constexpr RegStorage rs_r11(RegStorage::kValid | r11);
-constexpr RegStorage rs_r11q(RegStorage::kValid | r11q);
-constexpr RegStorage rs_r12(RegStorage::kValid | r12);
-constexpr RegStorage rs_r12q(RegStorage::kValid | r12q);
-constexpr RegStorage rs_r13(RegStorage::kValid | r13);
-constexpr RegStorage rs_r13q(RegStorage::kValid | r13q);
-constexpr RegStorage rs_r14(RegStorage::kValid | r14);
-constexpr RegStorage rs_r14q(RegStorage::kValid | r14q);
-constexpr RegStorage rs_r15(RegStorage::kValid | r15);
-constexpr RegStorage rs_r15q(RegStorage::kValid | r15q);
-
-constexpr RegStorage rs_fr0(RegStorage::kValid | fr0);
-constexpr RegStorage rs_fr1(RegStorage::kValid | fr1);
-constexpr RegStorage rs_fr2(RegStorage::kValid | fr2);
-constexpr RegStorage rs_fr3(RegStorage::kValid | fr3);
-constexpr RegStorage rs_fr4(RegStorage::kValid | fr4);
-constexpr RegStorage rs_fr5(RegStorage::kValid | fr5);
-constexpr RegStorage rs_fr6(RegStorage::kValid | fr6);
-constexpr RegStorage rs_fr7(RegStorage::kValid | fr7);
-constexpr RegStorage rs_fr8(RegStorage::kValid | fr8);
-constexpr RegStorage rs_fr9(RegStorage::kValid | fr9);
-constexpr RegStorage rs_fr10(RegStorage::kValid | fr10);
-constexpr RegStorage rs_fr11(RegStorage::kValid | fr11);
-constexpr RegStorage rs_fr12(RegStorage::kValid | fr12);
-constexpr RegStorage rs_fr13(RegStorage::kValid | fr13);
-constexpr RegStorage rs_fr14(RegStorage::kValid | fr14);
-constexpr RegStorage rs_fr15(RegStorage::kValid | fr15);
-
-constexpr RegStorage rs_dr0(RegStorage::kValid | dr0);
-constexpr RegStorage rs_dr1(RegStorage::kValid | dr1);
-constexpr RegStorage rs_dr2(RegStorage::kValid | dr2);
-constexpr RegStorage rs_dr3(RegStorage::kValid | dr3);
-constexpr RegStorage rs_dr4(RegStorage::kValid | dr4);
-constexpr RegStorage rs_dr5(RegStorage::kValid | dr5);
-constexpr RegStorage rs_dr6(RegStorage::kValid | dr6);
-constexpr RegStorage rs_dr7(RegStorage::kValid | dr7);
-constexpr RegStorage rs_dr8(RegStorage::kValid | dr8);
-constexpr RegStorage rs_dr9(RegStorage::kValid | dr9);
-constexpr RegStorage rs_dr10(RegStorage::kValid | dr10);
-constexpr RegStorage rs_dr11(RegStorage::kValid | dr11);
-constexpr RegStorage rs_dr12(RegStorage::kValid | dr12);
-constexpr RegStorage rs_dr13(RegStorage::kValid | dr13);
-constexpr RegStorage rs_dr14(RegStorage::kValid | dr14);
-constexpr RegStorage rs_dr15(RegStorage::kValid | dr15);
-
-constexpr RegStorage rs_xr0(RegStorage::kValid | xr0);
-constexpr RegStorage rs_xr1(RegStorage::kValid | xr1);
-constexpr RegStorage rs_xr2(RegStorage::kValid | xr2);
-constexpr RegStorage rs_xr3(RegStorage::kValid | xr3);
-constexpr RegStorage rs_xr4(RegStorage::kValid | xr4);
-constexpr RegStorage rs_xr5(RegStorage::kValid | xr5);
-constexpr RegStorage rs_xr6(RegStorage::kValid | xr6);
-constexpr RegStorage rs_xr7(RegStorage::kValid | xr7);
-constexpr RegStorage rs_xr8(RegStorage::kValid | xr8);
-constexpr RegStorage rs_xr9(RegStorage::kValid | xr9);
-constexpr RegStorage rs_xr10(RegStorage::kValid | xr10);
-constexpr RegStorage rs_xr11(RegStorage::kValid | xr11);
-constexpr RegStorage rs_xr12(RegStorage::kValid | xr12);
-constexpr RegStorage rs_xr13(RegStorage::kValid | xr13);
-constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
-constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
-
-constexpr RegStorage rs_rX86_RET0 = rs_rAX;
-constexpr RegStorage rs_rX86_RET1 = rs_rDX;
-
-// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
-const RegLocation x86_loc_c_return
-    {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
-     RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
-const RegLocation x86_loc_c_return_wide
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
-     RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
-const RegLocation x86_loc_c_return_ref
-    {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
-     RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
-const RegLocation x86_64_loc_c_return_ref
-    {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
-     RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG};
-const RegLocation x86_64_loc_c_return_wide
-    {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
-     RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG};
-const RegLocation x86_loc_c_return_float
-    {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
-     RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG};
-const RegLocation x86_loc_c_return_double
-    {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
-     RegStorage(RegStorage::k64BitSolo, dr0), INVALID_SREG, INVALID_SREG};
-
-/*
- * The following enum defines the list of supported X86 instructions by the
- * assembler. Their corresponding EncodingMap positions will be defined in
- * Assemble.cc.
- */
-enum X86OpCode {
-  kX86First = 0,
-  kX8632BitData = kX86First,  // data [31..0].
-  kX86Bkpt,
-  kX86Nop,
-  // Define groups of binary operations
-  // MR - Memory Register  - opcode [base + disp], reg
-  //             - lir operands - 0: base, 1: disp, 2: reg
-  // AR - Array Register   - opcode [base + index * scale + disp], reg
-  //             - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
-  // TR - Thread Register  - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
-  //             - lir operands - 0: disp, 1: reg
-  // RR - Register Register  - opcode reg1, reg2
-  //             - lir operands - 0: reg1, 1: reg2
-  // RM - Register Memory  - opcode reg, [base + disp]
-  //             - lir operands - 0: reg, 1: base, 2: disp
-  // RA - Register Array   - opcode reg, [base + index * scale + disp]
-  //             - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
-  // RT - Register Thread  - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
-  //             - lir operands - 0: reg, 1: disp
-  // RI - Register Immediate - opcode reg, #immediate
-  //             - lir operands - 0: reg, 1: immediate
-  // MI - Memory Immediate   - opcode [base + disp], #immediate
-  //             - lir operands - 0: base, 1: disp, 2: immediate
-  // AI - Array Immediate  - opcode [base + index * scale + disp], #immediate
-  //             - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
-  // TI - Thread Immediate  - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
-  //             - lir operands - 0: disp, 1: imm
-#define BinaryOpCode(opcode) \
-  opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
-  opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
-  opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
-  opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
-  opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
-  opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
-  opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
-  opcode ## 32MR, opcode ## 32AR, opcode ## 32TR,  \
-  opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
-  opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
-  opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8, \
-  opcode ## 64MR, opcode ## 64AR, opcode ## 64TR,  \
-  opcode ## 64RR, opcode ## 64RM, opcode ## 64RA, opcode ## 64RT, \
-  opcode ## 64RI, opcode ## 64MI, opcode ## 64AI, opcode ## 64TI, \
-  opcode ## 64RI8, opcode ## 64MI8, opcode ## 64AI8, opcode ## 64TI8
-  BinaryOpCode(kX86Add),
-  BinaryOpCode(kX86Or),
-  BinaryOpCode(kX86Adc),
-  BinaryOpCode(kX86Sbb),
-  BinaryOpCode(kX86And),
-  BinaryOpCode(kX86Sub),
-  BinaryOpCode(kX86Xor),
-  BinaryOpCode(kX86Cmp),
-#undef BinaryOpCode
-  kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
-  kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
-  kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
-  kX86Imul64RRI, kX86Imul64RMI, kX86Imul64RAI,
-  kX86Imul64RRI8, kX86Imul64RMI8, kX86Imul64RAI8,
-  kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
-  kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
-  kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
-  kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
-  kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
-  kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
-  kX86Mov32MR, kX86Mov32AR, kX86Movnti32MR, kX86Movnti32AR, kX86Mov32TR,
-  kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
-  kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
-  kX86Lea32RM,
-  kX86Lea32RA,
-  kX86Mov64MR, kX86Mov64AR, kX86Movnti64MR, kX86Movnti64AR, kX86Mov64TR,
-  kX86Mov64RR, kX86Mov64RM, kX86Mov64RA, kX86Mov64RT,
-  kX86Mov64RI32, kX86Mov64RI64, kX86Mov64MI, kX86Mov64AI, kX86Mov64TI,
-  kX86Lea64RM,
-  kX86Lea64RA,
-  // RRC - Register Register ConditionCode - cond_opcode reg1, reg2
-  //             - lir operands - 0: reg1, 1: reg2, 2: CC
-  kX86Cmov32RRC,
-  kX86Cmov64RRC,
-  // RMC - Register Memory ConditionCode - cond_opcode reg1, [base + disp]
-  //             - lir operands - 0: reg1, 1: base, 2: disp 3: CC
-  kX86Cmov32RMC,
-  kX86Cmov64RMC,
-
-  // RC - Register CL - opcode reg, CL
-  //          - lir operands - 0: reg, 1: CL
-  // MC - Memory CL   - opcode [base + disp], CL
-  //          - lir operands - 0: base, 1: disp, 2: CL
-  // AC - Array CL  - opcode [base + index * scale + disp], CL
-  //          - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
-#define BinaryShiftOpCode(opcode) \
-  opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
-  opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
-  opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
-  opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
-  opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
-  opcode ## 32RC, opcode ## 32MC, opcode ## 32AC, \
-  opcode ## 64RI, opcode ## 64MI, opcode ## 64AI, \
-  opcode ## 64RC, opcode ## 64MC, opcode ## 64AC
-  BinaryShiftOpCode(kX86Rol),
-  BinaryShiftOpCode(kX86Ror),
-  BinaryShiftOpCode(kX86Rcl),
-  BinaryShiftOpCode(kX86Rcr),
-  BinaryShiftOpCode(kX86Sal),
-  BinaryShiftOpCode(kX86Shr),
-  BinaryShiftOpCode(kX86Sar),
-#undef BinaryShiftOpcode
-  kX86Cmc,
-  kX86Shld32RRI,
-  kX86Shld32RRC,
-  kX86Shld32MRI,
-  kX86Shrd32RRI,
-  kX86Shrd32RRC,
-  kX86Shrd32MRI,
-  kX86Shld64RRI,
-  kX86Shld64MRI,
-  kX86Shrd64RRI,
-  kX86Shrd64MRI,
-#define UnaryOpcode(opcode, reg, mem, array) \
-  opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
-  opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
-  opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array, \
-  opcode ## 64 ## reg, opcode ## 64 ## mem, opcode ## 64 ## array
-  UnaryOpcode(kX86Test, RI, MI, AI),
-  kX86Test32RR,
-  kX86Test64RR,
-  kX86Test32RM,
-  UnaryOpcode(kX86Not, R, M, A),
-  UnaryOpcode(kX86Neg, R, M, A),
-  UnaryOpcode(kX86Mul,  DaR, DaM, DaA),
-  UnaryOpcode(kX86Imul, DaR, DaM, DaA),
-  UnaryOpcode(kX86Divmod,  DaR, DaM, DaA),
-  UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
-  kx86Cdq32Da,
-  kx86Cqo64Da,
-  kX86Bswap32R,
-  kX86Bswap64R,
-  kX86Push32R, kX86Pop32R,
-#undef UnaryOpcode
-#define Binary0fOpCode(opcode) \
-  opcode ## RR, opcode ## RM, opcode ## RA
-  Binary0fOpCode(kX86Movsd),
-  kX86MovsdMR,
-  kX86MovsdAR,
-  Binary0fOpCode(kX86Movss),
-  kX86MovssMR,
-  kX86MovssAR,
-  Binary0fOpCode(kX86Cvtsi2sd),  // int to double
-  Binary0fOpCode(kX86Cvtsi2ss),  // int to float
-  Binary0fOpCode(kX86Cvtsqi2sd),  // long to double
-  Binary0fOpCode(kX86Cvtsqi2ss),  // long to float
-  Binary0fOpCode(kX86Cvttsd2si),  // truncating double to int
-  Binary0fOpCode(kX86Cvttss2si),  // truncating float to int
-  Binary0fOpCode(kX86Cvttsd2sqi),  // truncating double to long
-  Binary0fOpCode(kX86Cvttss2sqi),  // truncating float to long
-  Binary0fOpCode(kX86Cvtsd2si),  // rounding double to int
-  Binary0fOpCode(kX86Cvtss2si),  // rounding float to int
-  Binary0fOpCode(kX86Ucomisd),  // unordered double compare
-  Binary0fOpCode(kX86Ucomiss),  // unordered float compare
-  Binary0fOpCode(kX86Comisd),   // double compare
-  Binary0fOpCode(kX86Comiss),   // float compare
-  Binary0fOpCode(kX86Orpd),     // double logical OR
-  Binary0fOpCode(kX86Orps),     // float logical OR
-  Binary0fOpCode(kX86Andpd),    // double logical AND
-  Binary0fOpCode(kX86Andps),    // float logical AND
-  Binary0fOpCode(kX86Xorpd),    // double logical XOR
-  Binary0fOpCode(kX86Xorps),    // float logical XOR
-  Binary0fOpCode(kX86Addsd),    // double ADD
-  Binary0fOpCode(kX86Addss),    // float ADD
-  Binary0fOpCode(kX86Mulsd),    // double multiply
-  Binary0fOpCode(kX86Mulss),    // float multiply
-  Binary0fOpCode(kX86Cvtsd2ss),  // double to float
-  Binary0fOpCode(kX86Cvtss2sd),  // float to double
-  Binary0fOpCode(kX86Subsd),    // double subtract
-  Binary0fOpCode(kX86Subss),    // float subtract
-  Binary0fOpCode(kX86Divsd),    // double divide
-  Binary0fOpCode(kX86Divss),    // float divide
-  Binary0fOpCode(kX86Punpcklbw),  // Interleave low-order bytes
-  Binary0fOpCode(kX86Punpcklwd),  // Interleave low-order single words (16-bits)
-  Binary0fOpCode(kX86Punpckldq),  // Interleave low-order double words (32-bit)
-  Binary0fOpCode(kX86Punpcklqdq),  // Interleave low-order quad word
-  Binary0fOpCode(kX86Sqrtsd),   // square root
-  Binary0fOpCode(kX86Pmulld),   // parallel integer multiply 32 bits x 4
-  Binary0fOpCode(kX86Pmullw),   // parallel integer multiply 16 bits x 8
-  Binary0fOpCode(kX86Pmuludq),   // parallel unsigned 32 integer and stores result as 64
-  Binary0fOpCode(kX86Mulps),    // parallel FP multiply 32 bits x 4
-  Binary0fOpCode(kX86Mulpd),    // parallel FP multiply 64 bits x 2
-  Binary0fOpCode(kX86Paddb),    // parallel integer addition 8 bits x 16
-  Binary0fOpCode(kX86Paddw),    // parallel integer addition 16 bits x 8
-  Binary0fOpCode(kX86Paddd),    // parallel integer addition 32 bits x 4
-  Binary0fOpCode(kX86Paddq),    // parallel integer addition 64 bits x 2
-  Binary0fOpCode(kX86Psadbw),   // computes sum of absolute differences for unsigned byte integers
-  Binary0fOpCode(kX86Addps),    // parallel FP addition 32 bits x 4
-  Binary0fOpCode(kX86Addpd),    // parallel FP addition 64 bits x 2
-  Binary0fOpCode(kX86Psubb),    // parallel integer subtraction 8 bits x 16
-  Binary0fOpCode(kX86Psubw),    // parallel integer subtraction 16 bits x 8
-  Binary0fOpCode(kX86Psubd),    // parallel integer subtraction 32 bits x 4
-  Binary0fOpCode(kX86Psubq),    // parallel integer subtraction 32 bits x 4
-  Binary0fOpCode(kX86Subps),    // parallel FP subtraction 32 bits x 4
-  Binary0fOpCode(kX86Subpd),    // parallel FP subtraction 64 bits x 2
-  Binary0fOpCode(kX86Pand),     // parallel AND 128 bits x 1
-  Binary0fOpCode(kX86Por),      // parallel OR 128 bits x 1
-  Binary0fOpCode(kX86Pxor),     // parallel XOR 128 bits x 1
-  Binary0fOpCode(kX86Phaddw),   // parallel horizontal addition 16 bits x 8
-  Binary0fOpCode(kX86Phaddd),   // parallel horizontal addition 32 bits x 4
-  Binary0fOpCode(kX86Haddpd),   // parallel FP horizontal addition 64 bits x 2
-  Binary0fOpCode(kX86Haddps),   // parallel FP horizontal addition 32 bits x 4
-  kX86PextrbRRI,                // Extract 8 bits from XMM into GPR
-  kX86PextrwRRI,                // Extract 16 bits from XMM into GPR
-  kX86PextrdRRI,                // Extract 32 bits from XMM into GPR
-  kX86PextrbMRI,                // Extract 8 bits from XMM into memory
-  kX86PextrwMRI,                // Extract 16 bits from XMM into memory
-  kX86PextrdMRI,                // Extract 32 bits from XMM into memory
-  kX86PshuflwRRI,               // Shuffle 16 bits in lower 64 bits of XMM.
-  kX86PshufdRRI,                // Shuffle 32 bits in XMM.
-  kX86ShufpsRRI,                // FP Shuffle 32 bits in XMM.
-  kX86ShufpdRRI,                // FP Shuffle 64 bits in XMM.
-  kX86PsrawRI,                  // signed right shift of floating point registers 16 bits x 8
-  kX86PsradRI,                  // signed right shift of floating point registers 32 bits x 4
-  kX86PsrlwRI,                  // logical right shift of floating point registers 16 bits x 8
-  kX86PsrldRI,                  // logical right shift of floating point registers 32 bits x 4
-  kX86PsrlqRI,                  // logical right shift of floating point registers 64 bits x 2
-  kX86PsrldqRI,                 // logical shift of 128-bit vector register, immediate in bytes
-  kX86PsllwRI,                  // left shift of floating point registers 16 bits x 8
-  kX86PslldRI,                  // left shift of floating point registers 32 bits x 4
-  kX86PsllqRI,                  // left shift of floating point registers 64 bits x 2
-  kX86Fild32M,                  // push 32-bit integer on x87 stack
-  kX86Fild64M,                  // push 64-bit integer on x87 stack
-  kX86Fld32M,                   // push float on x87 stack
-  kX86Fld64M,                   // push double on x87 stack
-  kX86Fstp32M,                  // pop top x87 fp stack and do 32-bit store
-  kX86Fstp64M,                  // pop top x87 fp stack and do 64-bit store
-  kX86Fst32M,                   // do 32-bit store
-  kX86Fst64M,                   // do 64-bit store
-  kX86Fprem,                    // remainder from dividing of two floating point values
-  kX86Fucompp,                  // compare floating point values and pop x87 fp stack twice
-  kX86Fstsw16R,                 // store FPU status word
-  Binary0fOpCode(kX86Movdqa),   // move 128 bits aligned
-  kX86MovdqaMR, kX86MovdqaAR,   // store 128 bit aligned from xmm1 to m128
-  Binary0fOpCode(kX86Movups),   // load unaligned packed single FP values from xmm2/m128 to xmm1
-  kX86MovupsMR, kX86MovupsAR,   // store unaligned packed single FP values from xmm1 to m128
-  Binary0fOpCode(kX86Movaps),   // load aligned packed single FP values from xmm2/m128 to xmm1
-  kX86MovapsMR, kX86MovapsAR,   // store aligned packed single FP values from xmm1 to m128
-  kX86MovlpsRM, kX86MovlpsRA,   // load packed single FP values from m64 to low quadword of xmm
-  kX86MovlpsMR, kX86MovlpsAR,   // store packed single FP values from low quadword of xmm to m64
-  kX86MovhpsRM, kX86MovhpsRA,   // load packed single FP values from m64 to high quadword of xmm
-  kX86MovhpsMR, kX86MovhpsAR,   // store packed single FP values from high quadword of xmm to m64
-  Binary0fOpCode(kX86Movdxr),   // move into xmm from gpr
-  Binary0fOpCode(kX86Movqxr),   // move into xmm from 64 bit gpr
-  kX86MovqrxRR, kX86MovqrxMR, kX86MovqrxAR,  // move into 64 bit reg from xmm
-  kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR,  // move into reg from xmm
-  kX86MovsxdRR, kX86MovsxdRM, kX86MovsxdRA,  // move 32 bit to 64 bit with sign extension
-  kX86Set8R, kX86Set8M, kX86Set8A,  // set byte depending on condition operand
-  kX86Lfence,                   // memory barrier to serialize all previous
-                                // load-from-memory instructions
-  kX86Mfence,                   // memory barrier to serialize all previous
-                                // load-from-memory and store-to-memory instructions
-  kX86Sfence,                   // memory barrier to serialize all previous
-                                // store-to-memory instructions
-  kX86LockAdd32MI8,             // locked add used to serialize memory instructions
-  Binary0fOpCode(kX86Imul16),   // 16bit multiply
-  Binary0fOpCode(kX86Imul32),   // 32bit multiply
-  Binary0fOpCode(kX86Imul64),   // 64bit multiply
-  kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR,  // compare and exchange
-  kX86LockCmpxchgMR, kX86LockCmpxchgAR, kX86LockCmpxchg64AR,  // locked compare and exchange
-  kX86LockCmpxchg64M, kX86LockCmpxchg64A,  // locked compare and exchange
-  kX86XchgMR,  // exchange memory with register (automatically locked)
-  Binary0fOpCode(kX86Movzx8),   // zero-extend 8-bit value
-  Binary0fOpCode(kX86Movzx16),  // zero-extend 16-bit value
-  Binary0fOpCode(kX86Movsx8),   // sign-extend 8-bit value
-  Binary0fOpCode(kX86Movsx16),  // sign-extend 16-bit value
-  Binary0fOpCode(kX86Movzx8q),   // zero-extend 8-bit value to quad word
-  Binary0fOpCode(kX86Movzx16q),  // zero-extend 16-bit value to quad word
-  Binary0fOpCode(kX86Movsx8q),   // sign-extend 8-bit value to quad word
-  Binary0fOpCode(kX86Movsx16q),  // sign-extend 16-bit value to quad word
-#undef Binary0fOpCode
-  kX86Jcc8, kX86Jcc32,  // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
-  kX86Jmp8, kX86Jmp32,  // jmp rel8/32; lir operands - 0: rel, target assigned
-  kX86JmpR,             // jmp reg; lir operands - 0: reg
-  kX86Jecxz8,           // jcexz rel8; jump relative if ECX is zero.
-  kX86JmpT,             // jmp fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
-
-  kX86CallR,            // call reg; lir operands - 0: reg
-  kX86CallM,            // call [base + disp]; lir operands - 0: base, 1: disp
-  kX86CallA,            // call [base + index * scale + disp]
-                        // lir operands - 0: base, 1: index, 2: scale, 3: disp
-  kX86CallT,            // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
-  kX86CallI,            // call <relative> - 0: disp; Used for core.oat linking only
-  kX86Ret,              // ret; no lir operands
-  kX86PcRelLoadRA,      // mov reg, [base + index * scale + PC relative displacement]
-                        // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
-  kX86PcRelAdr,         // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
-  kX86RepneScasw,       // repne scasw
-  kX86Last
-};
-std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
-
-/* Instruction assembly field_loc kind */
-enum X86EncodingKind {
-  kData,                                    // Special case for raw data.
-  kNop,                                     // Special case for variable length nop.
-  kNullary,                                 // Opcode that takes no arguments.
-  kRegOpcode,                               // Shorter form of R instruction kind (opcode+rd)
-  kReg, kMem, kArray,                       // R, M and A instruction kinds.
-  kMemReg, kArrayReg, kThreadReg,           // MR, AR and TR instruction kinds.
-  kRegReg, kRegMem, kRegArray, kRegThread,  // RR, RM, RA and RT instruction kinds.
-  kRegRegStore,                             // RR following the store modrm reg-reg encoding rather than the load.
-  kRegImm, kMemImm, kArrayImm, kThreadImm,  // RI, MI, AI and TI instruction kinds.
-  kRegRegImm, kRegMemImm, kRegArrayImm,     // RRI, RMI and RAI instruction kinds.
-  kMovRegImm,                               // Shorter form move RI.
-  kMovRegQuadImm,                           // 64 bit move RI
-  kRegRegImmStore,                          // RRI following the store modrm reg-reg encoding rather than the load.
-  kMemRegImm,                               // MRI instruction kinds.
-  kShiftRegImm, kShiftMemImm, kShiftArrayImm,  // Shift opcode with immediate.
-  kShiftRegCl, kShiftMemCl, kShiftArrayCl,     // Shift opcode with register CL.
-  kShiftRegRegCl,
-  // kRegRegReg, kRegRegMem, kRegRegArray,    // RRR, RRM, RRA instruction kinds.
-  kRegCond, kMemCond, kArrayCond,          // R, M, A instruction kinds following by a condition.
-  kRegRegCond,                             // RR instruction kind followed by a condition.
-  kRegMemCond,                             // RM instruction kind followed by a condition.
-  kJmp, kJcc, kCall,                       // Branch instruction kinds.
-  kPcRel,                                  // Operation with displacement that is PC relative
-  kUnimplemented                           // Encoding used when an instruction isn't yet implemented.
-};
-
-/* Struct used to define the EncodingMap positions for each X86 opcode */
-struct X86EncodingMap {
-  X86OpCode opcode;      // e.g. kOpAddRI
-  // The broad category the instruction conforms to, such as kRegReg. Identifies which LIR operands
-  // hold meaning for the opcode.
-  X86EncodingKind kind;
-  uint64_t flags;
-  struct {
-  uint8_t prefix1;       // Non-zero => a prefix byte.
-  uint8_t prefix2;       // Non-zero => a second prefix byte.
-  uint8_t opcode;        // 1 byte opcode.
-  uint8_t extra_opcode1;  // Possible extra opcode byte.
-  uint8_t extra_opcode2;  // Possible second extra opcode byte.
-  // 3-bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
-  // encoding kind.
-  uint8_t modrm_opcode;
-  uint8_t ax_opcode;  // Non-zero => shorter encoding for AX as a destination.
-  uint8_t immediate_bytes;  // Number of bytes of immediate.
-  // Does the instruction address a byte register? In 32-bit mode the registers ah, bh, ch and dh
-  // are not used. In 64-bit mode the REX prefix is used to normalize and allow any byte register
-  // to be addressed.
-  bool r8_form;
-  } skeleton;
-  const char *name;
-  const char* fmt;
-};
-
-
-// FIXME: mem barrier type - what do we do for x86?
-#define kSY 0
-#define kST 0
-
-// Offsets of high and low halves of a 64bit value.
-#define LOWORD_OFFSET 0
-#define HIWORD_OFFSET 4
-
-// Segment override instruction prefix used for quick TLS access to Thread::Current().
-#define THREAD_PREFIX 0x64
-#define THREAD_PREFIX_GS 0x65
-
-// 64 Bit Operand Size
-#define REX_W 0x48
-// Extension of the ModR/M reg field
-#define REX_R 0x44
-// Extension of the SIB index field
-#define REX_X 0x42
-// Extension of the ModR/M r/m field, SIB base field, or Opcode reg field
-#define REX_B 0x41
-// An empty REX prefix used to normalize the byte operations so that they apply to R4 through R15
-#define REX 0x40
-// Mask extracting the least 3 bits of r0..r15
-#define kRegNumMask32 0x07
-// Value indicating that base or reg is not used
-#define NO_REG 0
-
-#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
-#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
-#define IS_SIMM32(v) ((INT64_C(-2147483648) <= (v)) && ((v) <= INT64_C(2147483647)))
-
-extern X86EncodingMap EncodingMap[kX86Last];
-extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
diff --git a/compiler/dex/reg_location.h b/compiler/dex/reg_location.h
deleted file mode 100644
index aa8ed46..0000000
--- a/compiler/dex/reg_location.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_REG_LOCATION_H_
-#define ART_COMPILER_DEX_REG_LOCATION_H_
-
-#include "reg_storage.h"
-
-namespace art {
-
-static constexpr int16_t INVALID_SREG = -1;
-
-/*
- * Whereas a SSA name describes a definition of a Dalvik vreg, the RegLocation describes
- * the type of an SSA name (and, can also be used by code generators to record where the
- * value is located (i.e. - physical register, frame, spill, etc.).  For each SSA name (SReg)
- * there is a RegLocation.
- * A note on SSA names:
- *   o SSA names for Dalvik vRegs v0..vN will be assigned 0..N.  These represent the "vN_0"
- *     names.  Negative SSA names represent special values not present in the Dalvik byte code.
- *     For example, SSA name -1 represents an invalid SSA name, and SSA name -2 represents the
- *     the Method pointer.  SSA names < -2 are reserved for future use.
- *   o The vN_0 names for non-argument Dalvik should in practice never be used (as they would
- *     represent the read of an undefined local variable).  The first definition of the
- *     underlying Dalvik vReg will result in a vN_1 name.
- *
- * FIXME: The orig_sreg field was added as a workaround for llvm bitcode generation.  With
- * the latest restructuring, we should be able to remove it and rely on s_reg_low throughout.
- */
-struct RegLocation {
-  RegLocationType location:3;
-  unsigned wide:1;
-  unsigned defined:1;   // Do we know the type?
-  unsigned is_const:1;  // Constant, value in mir_graph->constant_values[].
-  unsigned fp:1;        // Floating point?
-  unsigned core:1;      // Non-floating point?
-  unsigned ref:1;       // Something GC cares about.
-  unsigned high_word:1;  // High word of pair?
-  unsigned home:1;      // Does this represent the home location?
-  RegStorage reg;       // Encoded physical registers.
-  int16_t s_reg_low;    // SSA name for low Dalvik word.
-  int16_t orig_sreg;    // TODO: remove after Bitcode gen complete
-                        // and consolidate usage w/ s_reg_low.
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_REG_LOCATION_H_
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
deleted file mode 100644
index 46ed011..0000000
--- a/compiler/dex/reg_storage.h
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_REG_STORAGE_H_
-#define ART_COMPILER_DEX_REG_STORAGE_H_
-
-#include "base/logging.h"
-#include "base/value_object.h"
-#include "compiler_enums.h"  // For WideKind
-
-namespace art {
-
-/*
- * 16-bit representation of the physical register container holding a Dalvik value.
- * The encoding allows up to 64 physical elements per storage class, and supports eight
- * register container shapes.
- *
- * [V] [HHHHH] [SSS] [F] [LLLLLL]
- *
- * [LLLLLL]
- *  Physical register number for the low or solo register.
- *    0..63
- *
- * [F]
- *  Describes type of the [LLLLL] register.
- *    0: Core
- *    1: Floating point
- *
- * [SSS]
- *  Shape of the register container.
- *    000: Invalid
- *    001: 32-bit solo register
- *    010: 64-bit solo register
- *    011: 64-bit pair consisting of two 32-bit solo registers
- *    100: 128-bit solo register
- *    101: 256-bit solo register
- *    110: 512-bit solo register
- *    111: 1024-bit solo register
- *
- * [HHHHH]
- *  Physical register number of the high register (valid only for register pair).
- *    0..31
- *
- * [V]
- *    0 -> Invalid
- *    1 -> Valid
- *
- * Note that in all non-invalid cases, we can determine if the storage is floating point
- * by testing bit 7.  Note also that a register pair is effectively limited to a pair of
- * physical register numbers in the 0..31 range.
- *
- * On some target architectures, the same underlying physical register container can be given
- * different views.  For example, Arm's 32-bit single-precision floating point registers
- * s2 and s3 map to the low and high halves of double-precision d1.  Similarly, X86's xmm3
- * vector register can be viewed as 32-bit, 64-bit, 128-bit, etc.  In these cases the use of
- * one view will affect the other views.  The RegStorage class does not concern itself
- * with potential aliasing.  That will be done using the associated RegisterInfo struct.
- * Distinct RegStorage elements should be created for each view of a physical register
- * container.  The management of the aliased physical elements will be handled via RegisterInfo
- * records.
- */
-
-class RegStorage : public ValueObject {
- public:
-  enum RegStorageKind {
-    kValidMask     = 0x8000,
-    kValid         = 0x8000,
-    kInvalid       = 0x0000,
-    kShapeMask     = 0x0380,
-    k32BitSolo     = 0x0080,
-    k64BitSolo     = 0x0100,
-    k64BitPair     = 0x0180,
-    k128BitSolo    = 0x0200,
-    k256BitSolo    = 0x0280,
-    k512BitSolo    = 0x0300,
-    k1024BitSolo   = 0x0380,
-    k64BitMask     = 0x0300,
-    k64Bits        = 0x0100,
-    kShapeTypeMask = 0x03c0,
-    kFloatingPoint = 0x0040,
-    kCoreRegister  = 0x0000,
-  };
-
-  static const uint16_t kRegValMask  = 0x03ff;     // Num, type and shape.
-  static const uint16_t kRegTypeMask = 0x007f;     // Num and type.
-  static const uint16_t kRegNumMask  = 0x003f;     // Num only.
-  static const uint16_t kHighRegNumMask = 0x001f;  // 0..31 for high reg
-  static const uint16_t kMaxRegs     = kRegValMask + 1;
-  // TODO: deprecate use of kInvalidRegVal and speed up GetReg().  Rely on valid bit instead.
-  static const uint16_t kInvalidRegVal = 0x03ff;
-  static const uint16_t kHighRegShift = 10;
-  static const uint16_t kHighRegMask = (kHighRegNumMask << kHighRegShift);
-
-  // Reg is [F][LLLLL], will override any existing shape and use rs_kind.
-  constexpr RegStorage(RegStorageKind rs_kind, int reg)
-      : reg_(
-          DCHECK_CONSTEXPR(rs_kind != k64BitPair, , 0u)
-          DCHECK_CONSTEXPR((rs_kind & ~kShapeMask) == 0, , 0u)
-          kValid | rs_kind | (reg & kRegTypeMask)) {
-  }
-  constexpr RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg)
-      : reg_(
-          DCHECK_CONSTEXPR(rs_kind == k64BitPair, << static_cast<int>(rs_kind), 0u)
-          DCHECK_CONSTEXPR((low_reg & kFloatingPoint) == (high_reg & kFloatingPoint),
-                           << low_reg << ", " << high_reg, 0u)
-          DCHECK_CONSTEXPR((high_reg & kRegNumMask) <= kHighRegNumMask,
-                           << "High reg must be in 0..31: " << high_reg, false)
-          kValid | rs_kind | ((high_reg & kHighRegNumMask) << kHighRegShift) |
-                  (low_reg & kRegTypeMask)) {
-  }
-  constexpr explicit RegStorage(uint16_t val) : reg_(val) {}
-  RegStorage() : reg_(kInvalid) {}
-
-  // We do not provide a general operator overload for equality of reg storage, as this is
-  // dangerous in the case of architectures with multiple views, and the naming ExactEquals
-  // expresses the exact match expressed here. It is more likely that a comparison between the views
-  // is intended in most cases. Such code can be found in, for example, Mir2Lir::IsSameReg.
-  //
-  // If you know what you are doing, include reg_storage_eq.h, which defines == and != for brevity.
-
-  bool ExactlyEquals(const RegStorage& rhs) const {
-    return (reg_ == rhs.GetRawBits());
-  }
-
-  bool NotExactlyEquals(const RegStorage& rhs) const {
-    return (reg_ != rhs.GetRawBits());
-  }
-
-  constexpr bool Valid() const {
-    return ((reg_ & kValidMask) == kValid);
-  }
-
-  constexpr bool Is32Bit() const {
-    return ((reg_ & kShapeMask) == k32BitSolo);
-  }
-
-  constexpr bool Is64Bit() const {
-    return ((reg_ & k64BitMask) == k64Bits);
-  }
-
-  constexpr WideKind GetWideKind() const {
-    return Is64Bit() ? kWide : kNotWide;
-  }
-
-  constexpr bool Is64BitSolo() const {
-    return ((reg_ & kShapeMask) == k64BitSolo);
-  }
-
-  constexpr bool IsPair() const {
-    return ((reg_ & kShapeMask) == k64BitPair);
-  }
-
-  constexpr bool IsFloat() const {
-    return
-        DCHECK_CONSTEXPR(Valid(), , false)
-        ((reg_ & kFloatingPoint) == kFloatingPoint);
-  }
-
-  constexpr bool IsDouble() const {
-    return
-        DCHECK_CONSTEXPR(Valid(), , false)
-        (reg_ & (kFloatingPoint | k64BitMask)) == (kFloatingPoint | k64Bits);
-  }
-
-  constexpr bool IsSingle() const {
-    return
-        DCHECK_CONSTEXPR(Valid(), , false)
-        (reg_ & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
-  }
-
-  static constexpr bool IsFloat(uint16_t reg) {
-    return ((reg & kFloatingPoint) == kFloatingPoint);
-  }
-
-  static constexpr bool IsDouble(uint16_t reg) {
-    return (reg & (kFloatingPoint | k64BitMask)) == (kFloatingPoint | k64Bits);
-  }
-
-  static constexpr bool IsSingle(uint16_t reg) {
-    return (reg & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
-  }
-
-  static constexpr bool Is32Bit(uint16_t reg) {
-    return ((reg & kShapeMask) == k32BitSolo);
-  }
-
-  static constexpr bool Is64Bit(uint16_t reg) {
-    return ((reg & k64BitMask) == k64Bits);
-  }
-
-  static constexpr bool Is64BitSolo(uint16_t reg) {
-    return ((reg & kShapeMask) == k64BitSolo);
-  }
-
-  // Used to retrieve either the low register of a pair, or the only register.
-  int GetReg() const {
-    DCHECK(!IsPair()) << "reg_ = 0x" << std::hex << reg_;
-    return Valid() ? (reg_ & kRegValMask) : kInvalidRegVal;
-  }
-
-  // Sets shape, type and num of solo.
-  void SetReg(int reg) {
-    DCHECK(Valid());
-    DCHECK(!IsPair());
-    reg_ = (reg_ & ~kRegValMask) | reg;
-  }
-
-  // Set the reg number and type only, target remain 64-bit pair.
-  void SetLowReg(int reg) {
-    DCHECK(IsPair());
-    reg_ = (reg_ & ~kRegTypeMask) | (reg & kRegTypeMask);
-  }
-
-  // Retrieve the least significant register of a pair and return as 32-bit solo.
-  int GetLowReg() const {
-    DCHECK(IsPair());
-    return ((reg_ & kRegTypeMask) | k32BitSolo);
-  }
-
-  // Create a stand-alone RegStorage from the low reg of a pair.
-  RegStorage GetLow() const {
-    DCHECK(IsPair());
-    return RegStorage(k32BitSolo, reg_ & kRegTypeMask);
-  }
-
-  // Retrieve the most significant register of a pair.
-  int GetHighReg() const {
-    DCHECK(IsPair());
-    return k32BitSolo | ((reg_ & kHighRegMask) >> kHighRegShift) | (reg_ & kFloatingPoint);
-  }
-
-  // Create a stand-alone RegStorage from the high reg of a pair.
-  RegStorage GetHigh() const {
-    DCHECK(IsPair());
-    return RegStorage(kValid | GetHighReg());
-  }
-
-  void SetHighReg(int reg) {
-    DCHECK(IsPair());
-    reg_ = (reg_ & ~kHighRegMask) | ((reg & kHighRegNumMask) << kHighRegShift);
-  }
-
-  // Return the register number of low or solo.
-  constexpr int GetRegNum() const {
-    return reg_ & kRegNumMask;
-  }
-
-  // Is register number in 0..7?
-  constexpr bool Low8() const {
-    return GetRegNum() < 8;
-  }
-
-  // Is register number in 0..3?
-  constexpr bool Low4() const {
-    return GetRegNum() < 4;
-  }
-
-  // Combine 2 32-bit solo regs into a pair.
-  static RegStorage MakeRegPair(RegStorage low, RegStorage high) {
-    DCHECK(!low.IsPair());
-    DCHECK(low.Is32Bit());
-    DCHECK(!high.IsPair());
-    DCHECK(high.Is32Bit());
-    return RegStorage(k64BitPair, low.GetReg(), high.GetReg());
-  }
-
-  static constexpr bool SameRegType(RegStorage reg1, RegStorage reg2) {
-    return ((reg1.reg_ & kShapeTypeMask) == (reg2.reg_ & kShapeTypeMask));
-  }
-
-  static constexpr bool SameRegType(int reg1, int reg2) {
-    return ((reg1 & kShapeTypeMask) == (reg2 & kShapeTypeMask));
-  }
-
-  // Create a 32-bit solo.
-  static RegStorage Solo32(int reg_num) {
-    return RegStorage(k32BitSolo, reg_num & kRegTypeMask);
-  }
-
-  // Create a floating point 32-bit solo.
-  static constexpr RegStorage FloatSolo32(int reg_num) {
-    return RegStorage(k32BitSolo, (reg_num & kRegNumMask) | kFloatingPoint);
-  }
-
-  // Create a 128-bit solo.
-  static constexpr RegStorage Solo128(int reg_num) {
-    return RegStorage(k128BitSolo, reg_num & kRegTypeMask);
-  }
-
-  // Create a 64-bit solo.
-  static constexpr RegStorage Solo64(int reg_num) {
-    return RegStorage(k64BitSolo, reg_num & kRegTypeMask);
-  }
-
-  // Create a floating point 64-bit solo.
-  static RegStorage FloatSolo64(int reg_num) {
-    return RegStorage(k64BitSolo, (reg_num & kRegNumMask) | kFloatingPoint);
-  }
-
-  static constexpr RegStorage InvalidReg() {
-    return RegStorage(kInvalid);
-  }
-
-  static constexpr uint16_t RegNum(int raw_reg_bits) {
-    return raw_reg_bits & kRegNumMask;
-  }
-
-  constexpr int GetRawBits() const {
-    return reg_;
-  }
-
-  size_t StorageSize() const {
-    switch (reg_ & kShapeMask) {
-      case kInvalid: return 0;
-      case k32BitSolo: return 4;
-      case k64BitSolo: return 8;
-      case k64BitPair: return 8;  // Is this useful?  Might want to disallow taking size of pair.
-      case k128BitSolo: return 16;
-      case k256BitSolo: return 32;
-      case k512BitSolo: return 64;
-      case k1024BitSolo: return 128;
-      default: LOG(FATAL) << "Unexpected shape"; UNREACHABLE();
-    }
-  }
-
- private:
-  uint16_t reg_;
-};
-static inline std::ostream& operator<<(std::ostream& o, const RegStorage& rhs) {
-  return o << rhs.GetRawBits();  // TODO: better output.
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_REG_STORAGE_H_
diff --git a/compiler/dex/reg_storage_eq.h b/compiler/dex/reg_storage_eq.h
deleted file mode 100644
index b688dac..0000000
--- a/compiler/dex/reg_storage_eq.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_REG_STORAGE_EQ_H_
-#define ART_COMPILER_DEX_REG_STORAGE_EQ_H_
-
-#include "reg_storage.h"
-
-namespace art {
-
-// Define == and != operators for RegStorage. These are based on exact equality of the reg storage,
-// that is, 32b and 64b views of the same physical register won't match. This is often not the
-// intended behavior, so be careful when including this header.
-
-inline bool operator==(const RegStorage& lhs, const RegStorage& rhs) {
-  return lhs.ExactlyEquals(rhs);
-}
-
-inline bool operator!=(const RegStorage& lhs, const RegStorage& rhs) {
-  return lhs.NotExactlyEquals(rhs);
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_REG_STORAGE_EQ_H_
-
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
deleted file mode 100644
index 6d5b351..0000000
--- a/compiler/dex/ssa_transformation.cc
+++ /dev/null
@@ -1,607 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/bit_vector-inl.h"
-#include "base/logging.h"
-#include "base/scoped_arena_containers.h"
-#include "compiler_ir.h"
-#include "dataflow_iterator-inl.h"
-
-#define NOTVISITED (-1)
-
-namespace art {
-
-void MIRGraph::ClearAllVisitedFlags() {
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    bb->visited = false;
-  }
-}
-
-BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
-  if (bb != nullptr) {
-    if (bb->visited || bb->hidden) {
-      bb = nullptr;
-    }
-  }
-  return bb;
-}
-
-BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
-  BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
-  if (res == nullptr) {
-    res = NeedsVisit(GetBasicBlock(bb->taken));
-    if (res == nullptr) {
-      if (bb->successor_block_list_type != kNotUsed) {
-        for (SuccessorBlockInfo* sbi : bb->successor_blocks) {
-          res = NeedsVisit(GetBasicBlock(sbi->block));
-          if (res != nullptr) {
-            break;
-          }
-        }
-      }
-    }
-  }
-  return res;
-}
-
-void MIRGraph::MarkPreOrder(BasicBlock* block) {
-  block->visited = true;
-  /* Enqueue the pre_order block id */
-  if (block->id != NullBasicBlockId) {
-    dfs_order_.push_back(block->id);
-  }
-}
-
-void MIRGraph::RecordDFSOrders(BasicBlock* block) {
-  ScopedArenaAllocator allocator(&cu_->arena_stack);
-  ScopedArenaVector<BasicBlock*> succ(allocator.Adapter());
-  succ.reserve(GetNumBlocks());
-  MarkPreOrder(block);
-  succ.push_back(block);
-  while (!succ.empty()) {
-    BasicBlock* curr = succ.back();
-    BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
-    if (next_successor != nullptr) {
-      MarkPreOrder(next_successor);
-      succ.push_back(next_successor);
-      continue;
-    }
-    curr->dfs_id = dfs_post_order_.size();
-    if (curr->id != NullBasicBlockId) {
-      dfs_post_order_.push_back(curr->id);
-    }
-    succ.pop_back();
-  }
-}
-
-/* Sort the blocks by the Depth-First-Search */
-void MIRGraph::ComputeDFSOrders() {
-  /* Clear the DFS pre-order and post-order lists. */
-  dfs_order_.clear();
-  dfs_order_.reserve(GetNumBlocks());
-  dfs_post_order_.clear();
-  dfs_post_order_.reserve(GetNumBlocks());
-
-  // Reset visited flags from all nodes
-  ClearAllVisitedFlags();
-
-  // Record dfs orders
-  RecordDFSOrders(GetEntryBlock());
-
-  num_reachable_blocks_ = dfs_order_.size();
-
-  if (num_reachable_blocks_ != GetNumBlocks()) {
-    // Kill all unreachable blocks.
-    AllNodesIterator iter(this);
-    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-      if (!bb->visited) {
-        bb->Kill(this);
-      }
-    }
-  }
-  dfs_orders_up_to_date_ = true;
-}
-
-/*
- * Mark block bit on the per-Dalvik register vector to denote that Dalvik
- * register idx is defined in BasicBlock bb.
- */
-bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
-  if (bb->data_flow_info == nullptr) {
-    return false;
-  }
-
-  for (uint32_t idx : bb->data_flow_info->def_v->Indexes()) {
-    /* Block bb defines register idx */
-    temp_.ssa.def_block_matrix[idx]->SetBit(bb->id);
-  }
-  return true;
-}
-
-void MIRGraph::ComputeDefBlockMatrix() {
-  int num_registers = GetNumOfCodeAndTempVRs();
-  /* Allocate num_registers bit vector pointers */
-  DCHECK(temp_scoped_alloc_ != nullptr);
-  DCHECK(temp_.ssa.def_block_matrix == nullptr);
-  temp_.ssa.def_block_matrix =
-      temp_scoped_alloc_->AllocArray<ArenaBitVector*>(num_registers, kArenaAllocDFInfo);
-  int i;
-
-  /* Initialize num_register vectors with num_blocks bits each */
-  for (i = 0; i < num_registers; i++) {
-    temp_.ssa.def_block_matrix[i] = new (temp_scoped_alloc_.get()) ArenaBitVector(
-        arena_, GetNumBlocks(), false);
-    temp_.ssa.def_block_matrix[i]->ClearAllBits();
-  }
-
-  AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    FindLocalLiveIn(bb);
-  }
-  AllNodesIterator iter2(this);
-  for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
-    FillDefBlockMatrix(bb);
-  }
-
-  /*
-   * Also set the incoming parameters as defs in the entry block.
-   * Only need to handle the parameters for the outer method.
-   */
-  int num_regs = GetNumOfCodeVRs();
-  int in_reg = GetFirstInVR();
-  for (; in_reg < num_regs; in_reg++) {
-    temp_.ssa.def_block_matrix[in_reg]->SetBit(GetEntryBlock()->id);
-  }
-}
-
-void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
-  // Clear the dominator post-order list.
-  dom_post_order_traversal_.clear();
-  dom_post_order_traversal_.reserve(num_reachable_blocks_);
-
-  ClearAllVisitedFlags();
-  ScopedArenaAllocator allocator(&cu_->arena_stack);
-  ScopedArenaVector<std::pair<BasicBlock*, ArenaBitVector::IndexIterator>> work_stack(
-      allocator.Adapter());
-  bb->visited = true;
-  work_stack.push_back(std::make_pair(bb, bb->i_dominated->Indexes().begin()));
-  while (!work_stack.empty()) {
-    std::pair<BasicBlock*, ArenaBitVector::IndexIterator>* curr = &work_stack.back();
-    BasicBlock* curr_bb = curr->first;
-    ArenaBitVector::IndexIterator* curr_idom_iter = &curr->second;
-    while (!curr_idom_iter->Done() && (NeedsVisit(GetBasicBlock(**curr_idom_iter)) == nullptr)) {
-      ++*curr_idom_iter;
-    }
-    // NOTE: work_stack.push_back()/pop_back() invalidate curr and curr_idom_iter.
-    if (!curr_idom_iter->Done()) {
-      BasicBlock* new_bb = GetBasicBlock(**curr_idom_iter);
-      ++*curr_idom_iter;
-      new_bb->visited = true;
-      work_stack.push_back(std::make_pair(new_bb, new_bb->i_dominated->Indexes().begin()));
-    } else {
-      // no successor/next
-      if (curr_bb->id != NullBasicBlockId) {
-        dom_post_order_traversal_.push_back(curr_bb->id);
-      }
-      work_stack.pop_back();
-    }
-  }
-}
-
-void MIRGraph::CheckForDominanceFrontier(BasicBlock* dom_bb,
-                                         const BasicBlock* succ_bb) {
-  /*
-   * TODO - evaluate whether phi will ever need to be inserted into exit
-   * blocks.
-   */
-  if (succ_bb->i_dom != dom_bb->id &&
-    succ_bb->block_type == kDalvikByteCode &&
-    succ_bb->hidden == false) {
-    dom_bb->dom_frontier->SetBit(succ_bb->id);
-  }
-}
-
-/* Worker function to compute the dominance frontier */
-bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
-  /* Calculate DF_local */
-  if (bb->taken != NullBasicBlockId) {
-    CheckForDominanceFrontier(bb, GetBasicBlock(bb->taken));
-  }
-  if (bb->fall_through != NullBasicBlockId) {
-    CheckForDominanceFrontier(bb, GetBasicBlock(bb->fall_through));
-  }
-  if (bb->successor_block_list_type != kNotUsed) {
-    for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
-      BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
-      CheckForDominanceFrontier(bb, succ_bb);
-    }
-  }
-
-  /* Calculate DF_up */
-  for (uint32_t dominated_idx : bb->i_dominated->Indexes()) {
-    BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
-    for (uint32_t df_up_block_idx : dominated_bb->dom_frontier->Indexes()) {
-      BasicBlock* df_up_block = GetBasicBlock(df_up_block_idx);
-      CheckForDominanceFrontier(bb, df_up_block);
-    }
-  }
-
-  return true;
-}
-
-/* Worker function for initializing domination-related data structures */
-void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
-  int num_total_blocks = GetBasicBlockListCount();
-
-  if (bb->dominators == nullptr) {
-    bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks, true /* expandable */);
-    bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks, true /* expandable */);
-    bb->dom_frontier = new (arena_) ArenaBitVector(arena_, num_total_blocks, true /* expandable */);
-  } else {
-    bb->dominators->ClearAllBits();
-    bb->i_dominated->ClearAllBits();
-    bb->dom_frontier->ClearAllBits();
-  }
-  /* Set all bits in the dominator vector */
-  bb->dominators->SetInitialBits(num_total_blocks);
-
-  return;
-}
-
-/*
- * Walk through the ordered i_dom list until we reach common parent.
- * Given the ordering of i_dom_list, this common parent represents the
- * last element of the intersection of block1 and block2 dominators.
-  */
-int MIRGraph::FindCommonParent(int block1, int block2) {
-  while (block1 != block2) {
-    while (block1 < block2) {
-      block1 = i_dom_list_[block1];
-      DCHECK_NE(block1, NOTVISITED);
-    }
-    while (block2 < block1) {
-      block2 = i_dom_list_[block2];
-      DCHECK_NE(block2, NOTVISITED);
-    }
-  }
-  return block1;
-}
-
-/* Worker function to compute each block's immediate dominator */
-bool MIRGraph::ComputeblockIDom(BasicBlock* bb) {
-  /* Special-case entry block */
-  if ((bb->id == NullBasicBlockId) || (bb == GetEntryBlock())) {
-    return false;
-  }
-
-  /* Iterate through the predecessors */
-  auto it = bb->predecessors.begin(), end = bb->predecessors.end();
-
-  /* Find the first processed predecessor */
-  int idom = -1;
-  for ( ; ; ++it) {
-    CHECK(it != end);
-    BasicBlock* pred_bb = GetBasicBlock(*it);
-    DCHECK(pred_bb != nullptr);
-    if (i_dom_list_[pred_bb->dfs_id] != NOTVISITED) {
-      idom = pred_bb->dfs_id;
-      break;
-    }
-  }
-
-  /* Scan the rest of the predecessors */
-  for ( ; it != end; ++it) {
-      BasicBlock* pred_bb = GetBasicBlock(*it);
-      DCHECK(pred_bb != nullptr);
-      if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) {
-        continue;
-      } else {
-        idom = FindCommonParent(pred_bb->dfs_id, idom);
-      }
-  }
-
-  DCHECK_NE(idom, NOTVISITED);
-
-  /* Did something change? */
-  if (i_dom_list_[bb->dfs_id] != idom) {
-    i_dom_list_[bb->dfs_id] = idom;
-    return true;
-  }
-  return false;
-}
-
-/* Worker function to compute each block's domintors */
-bool MIRGraph::ComputeBlockDominators(BasicBlock* bb) {
-  if (bb == GetEntryBlock()) {
-    bb->dominators->ClearAllBits();
-  } else {
-    bb->dominators->Copy(GetBasicBlock(bb->i_dom)->dominators);
-  }
-  bb->dominators->SetBit(bb->id);
-  return false;
-}
-
-bool MIRGraph::SetDominators(BasicBlock* bb) {
-  if (bb != GetEntryBlock()) {
-    int idom_dfs_idx = i_dom_list_[bb->dfs_id];
-    DCHECK_NE(idom_dfs_idx, NOTVISITED);
-    int i_dom_idx = dfs_post_order_[idom_dfs_idx];
-    BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
-    bb->i_dom = i_dom->id;
-    /* Add bb to the i_dominated set of the immediate dominator block */
-    i_dom->i_dominated->SetBit(bb->id);
-  }
-  return false;
-}
-
-/* Compute dominators, immediate dominator, and dominance fronter */
-void MIRGraph::ComputeDominators() {
-  int num_reachable_blocks = num_reachable_blocks_;
-
-  /* Initialize domination-related data structures */
-  PreOrderDfsIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    InitializeDominationInfo(bb);
-  }
-
-  /* Initialize & Clear i_dom_list */
-  if (max_num_reachable_blocks_ < num_reachable_blocks_) {
-    i_dom_list_ = arena_->AllocArray<int>(num_reachable_blocks, kArenaAllocDFInfo);
-  }
-  for (int i = 0; i < num_reachable_blocks; i++) {
-    i_dom_list_[i] = NOTVISITED;
-  }
-
-  /* For post-order, last block is entry block.  Set its i_dom to istelf */
-  DCHECK_EQ(GetEntryBlock()->dfs_id, num_reachable_blocks-1);
-  i_dom_list_[GetEntryBlock()->dfs_id] = GetEntryBlock()->dfs_id;
-
-  /* Compute the immediate dominators */
-  RepeatingReversePostOrderDfsIterator iter2(this);
-  bool change = false;
-  for (BasicBlock* bb = iter2.Next(false); bb != nullptr; bb = iter2.Next(change)) {
-    change = ComputeblockIDom(bb);
-  }
-
-  /* Set the dominator for the root node */
-  GetEntryBlock()->dominators->ClearAllBits();
-  GetEntryBlock()->dominators->SetBit(GetEntryBlock()->id);
-
-  GetEntryBlock()->i_dom = 0;
-
-  PreOrderDfsIterator iter3(this);
-  for (BasicBlock* bb = iter3.Next(); bb != nullptr; bb = iter3.Next()) {
-    SetDominators(bb);
-  }
-
-  ReversePostOrderDfsIterator iter4(this);
-  for (BasicBlock* bb = iter4.Next(); bb != nullptr; bb = iter4.Next()) {
-    ComputeBlockDominators(bb);
-  }
-
-  // Compute the dominance frontier for each block.
-  ComputeDomPostOrderTraversal(GetEntryBlock());
-  PostOrderDOMIterator iter5(this);
-  for (BasicBlock* bb = iter5.Next(); bb != nullptr; bb = iter5.Next()) {
-    ComputeDominanceFrontier(bb);
-  }
-
-  domination_up_to_date_ = true;
-}
-
-/*
- * Perform dest U= src1 ^ ~src2
- * This is probably not general enough to be placed in BitVector.[ch].
- */
-void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
-                                 const ArenaBitVector* src2) {
-  if (dest->GetStorageSize() != src1->GetStorageSize() ||
-      dest->GetStorageSize() != src2->GetStorageSize() ||
-      dest->IsExpandable() != src1->IsExpandable() ||
-      dest->IsExpandable() != src2->IsExpandable()) {
-    LOG(FATAL) << "Incompatible set properties";
-  }
-
-  unsigned int idx;
-  for (idx = 0; idx < dest->GetStorageSize(); idx++) {
-    dest->GetRawStorage()[idx] |= src1->GetRawStorageWord(idx) & ~(src2->GetRawStorageWord(idx));
-  }
-}
-
-/*
- * Iterate through all successor blocks and propagate up the live-in sets.
- * The calculated result is used for phi-node pruning - where we only need to
- * insert a phi node if the variable is live-in to the block.
- */
-bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
-  DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
-  ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
-
-  if (bb->data_flow_info == nullptr) {
-    return false;
-  }
-  temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
-  BasicBlock* bb_taken = GetBasicBlock(bb->taken);
-  BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
-  if (bb_taken && bb_taken->data_flow_info)
-    ComputeSuccLineIn(temp_live_vregs, bb_taken->data_flow_info->live_in_v,
-                      bb->data_flow_info->def_v);
-  if (bb_fall_through && bb_fall_through->data_flow_info)
-    ComputeSuccLineIn(temp_live_vregs, bb_fall_through->data_flow_info->live_in_v,
-                      bb->data_flow_info->def_v);
-  if (bb->successor_block_list_type != kNotUsed) {
-    for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
-      BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
-      if (succ_bb->data_flow_info) {
-        ComputeSuccLineIn(temp_live_vregs, succ_bb->data_flow_info->live_in_v,
-                          bb->data_flow_info->def_v);
-      }
-    }
-  }
-  if (!temp_live_vregs->Equal(bb->data_flow_info->live_in_v)) {
-    bb->data_flow_info->live_in_v->Copy(temp_live_vregs);
-    return true;
-  }
-  return false;
-}
-
-/* For each dalvik reg, find blocks that need phi nodes according to the dominance frontiers. */
-void MIRGraph::FindPhiNodeBlocks() {
-  RepeatingPostOrderDfsIterator iter(this);
-  bool change = false;
-  for (BasicBlock* bb = iter.Next(false); bb != nullptr; bb = iter.Next(change)) {
-    change = ComputeBlockLiveIns(bb);
-  }
-
-  ArenaBitVector* phi_blocks = new (temp_scoped_alloc_.get()) ArenaBitVector(
-      temp_scoped_alloc_.get(), GetNumBlocks(), false);
-
-  // Reuse the def_block_matrix storage for phi_node_blocks.
-  ArenaBitVector** def_block_matrix = temp_.ssa.def_block_matrix;
-  ArenaBitVector** phi_node_blocks = def_block_matrix;
-  DCHECK(temp_.ssa.phi_node_blocks == nullptr);
-  temp_.ssa.phi_node_blocks = phi_node_blocks;
-  temp_.ssa.def_block_matrix = nullptr;
-
-  /* Iterate through each Dalvik register */
-  for (int dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
-    phi_blocks->ClearAllBits();
-    ArenaBitVector* input_blocks = def_block_matrix[dalvik_reg];
-    do {
-      // TUNING: When we repeat this, we could skip indexes from the previous pass.
-      for (uint32_t idx : input_blocks->Indexes()) {
-        BasicBlock* def_bb = GetBasicBlock(idx);
-        if (def_bb->dom_frontier != nullptr) {
-          phi_blocks->Union(def_bb->dom_frontier);
-        }
-      }
-    } while (input_blocks->Union(phi_blocks));
-
-    def_block_matrix[dalvik_reg] = phi_blocks;
-    phi_blocks = input_blocks;  // Reuse the bit vector in next iteration.
-  }
-}
-
-/*
- * Worker function to insert phi-operands with latest SSA names from
- * predecessor blocks
- */
-bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
-  /* Phi nodes are at the beginning of each block */
-  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-    if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
-      return true;
-    int ssa_reg = mir->ssa_rep->defs[0];
-    DCHECK_GE(ssa_reg, 0);   // Shouldn't see compiler temps here
-    int v_reg = SRegToVReg(ssa_reg);
-
-    /* Iterate through the predecessors */
-    size_t num_uses = bb->predecessors.size();
-    AllocateSSAUseData(mir, num_uses);
-    int* uses = mir->ssa_rep->uses;
-    BasicBlockId* incoming = arena_->AllocArray<BasicBlockId>(num_uses, kArenaAllocDFInfo);
-    mir->meta.phi_incoming = incoming;
-    int idx = 0;
-    for (BasicBlockId pred_id : bb->predecessors) {
-      BasicBlock* pred_bb = GetBasicBlock(pred_id);
-      DCHECK(pred_bb != nullptr);
-      uses[idx] = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
-      incoming[idx] = pred_id;
-      idx++;
-    }
-  }
-
-  return true;
-}
-
-void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) {
-  if (block->visited || block->hidden) {
-    return;
-  }
-
-  typedef struct {
-    BasicBlock* bb;
-    int32_t* ssa_map;
-  } BasicBlockInfo;
-  BasicBlockInfo temp;
-
-  ScopedArenaAllocator allocator(&cu_->arena_stack);
-  ScopedArenaVector<BasicBlockInfo> bi_stack(allocator.Adapter());
-  ScopedArenaVector<BasicBlock*> succ_stack(allocator.Adapter());
-
-  uint32_t num_vregs = GetNumOfCodeAndTempVRs();
-  size_t map_size = sizeof(int32_t) * num_vregs;
-  temp.bb = block;
-  temp.ssa_map = vreg_to_ssa_map_;
-  bi_stack.push_back(temp);
-
-  while (!bi_stack.empty()) {
-    temp = bi_stack.back();
-    bi_stack.pop_back();
-    BasicBlock* b = temp.bb;
-
-    if (b->visited || b->hidden) {
-      continue;
-    }
-    b->visited = true;
-
-    /* Restore SSA map snapshot, except for the first block */
-    if (b != block) {
-      memcpy(vreg_to_ssa_map_, temp.ssa_map, map_size);
-    }
-
-    /* Process this block */
-    DoSSAConversion(b);
-
-    /* If there are no successor, taken, and fall through blocks, continue */
-    if (b->successor_block_list_type == kNotUsed &&
-        b->taken == NullBasicBlockId &&
-        b->fall_through == NullBasicBlockId) {
-      continue;
-    }
-
-    /* Save SSA map snapshot */
-    int32_t* saved_ssa_map =
-      allocator.AllocArray<int32_t>(num_vregs, kArenaAllocDalvikToSSAMap);
-    memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
-
-    if (b->successor_block_list_type != kNotUsed) {
-      for (SuccessorBlockInfo* successor_block_info : b->successor_blocks) {
-        BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
-        succ_stack.push_back(succ_bb);
-      }
-      while (!succ_stack.empty()) {
-        temp.bb = succ_stack.back();
-        succ_stack.pop_back();
-        temp.ssa_map = saved_ssa_map;
-        bi_stack.push_back(temp);
-      }
-    }
-    if (b->taken != NullBasicBlockId) {
-      temp.bb = GetBasicBlock(b->taken);
-      temp.ssa_map = saved_ssa_map;
-      bi_stack.push_back(temp);
-    }
-    if (b->fall_through != NullBasicBlockId) {
-      temp.bb = GetBasicBlock(b->fall_through);
-      temp.ssa_map = saved_ssa_map;
-      bi_stack.push_back(temp);
-    }
-  }
-}
-
-}  // namespace art
diff --git a/compiler/dex/type_inference.cc b/compiler/dex/type_inference.cc
deleted file mode 100644
index c93fe20..0000000
--- a/compiler/dex/type_inference.cc
+++ /dev/null
@@ -1,1074 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "type_inference.h"
-
-#include "base/bit_vector-inl.h"
-#include "compiler_ir.h"
-#include "dataflow_iterator-inl.h"
-#include "dex_flags.h"
-#include "dex_file-inl.h"
-#include "driver/dex_compilation_unit.h"
-#include "mir_field_info.h"
-#include "mir_graph.h"
-#include "mir_method_info.h"
-#include "utils.h"
-
-namespace art {
-
-inline TypeInference::Type TypeInference::Type::ArrayType(uint32_t array_depth, Type nested_type) {
-  DCHECK_NE(array_depth, 0u);
-  return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (array_depth << kBitArrayDepthStart) |
-              ((nested_type.raw_bits_ & kMaskWideAndType) << kArrayTypeShift));
-}
-
-inline TypeInference::Type TypeInference::Type::ArrayTypeFromComponent(Type component_type) {
-  if (component_type.ArrayDepth() == 0u) {
-    return ArrayType(1u, component_type);
-  }
-  if (UNLIKELY(component_type.ArrayDepth() == kMaxArrayDepth)) {
-    return component_type;
-  }
-  return Type(component_type.raw_bits_ + (1u << kBitArrayDepthStart));  // array_depth + 1u;
-}
-
-TypeInference::Type TypeInference::Type::ShortyType(char shorty) {
-  switch (shorty) {
-    case 'L':
-      return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
-    case 'D':
-      return Type(kFlagLowWord | kFlagWide | kFlagFp);
-    case 'J':
-      return Type(kFlagLowWord | kFlagWide | kFlagCore);
-    case 'F':
-      return Type(kFlagLowWord | kFlagNarrow | kFlagFp);
-    default:
-      DCHECK(shorty == 'I' || shorty == 'S' || shorty == 'C' || shorty == 'B' || shorty == 'Z');
-      return Type(kFlagLowWord | kFlagNarrow | kFlagCore);
-  }
-}
-
-TypeInference::Type TypeInference::Type::DexType(const DexFile* dex_file, uint32_t type_idx) {
-  const char* desc = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_idx));
-  if (UNLIKELY(desc[0] == 'V')) {
-    return Unknown();
-  } else if (UNLIKELY(desc[0] == '[')) {
-    size_t array_depth = 0u;
-    while (*desc == '[') {
-      ++array_depth;
-      ++desc;
-    }
-    if (UNLIKELY(array_depth > kMaxArrayDepth)) {
-      LOG(WARNING) << "Array depth exceeds " << kMaxArrayDepth << ": " << array_depth
-          << " in dex file " << dex_file->GetLocation() << " type index " << type_idx;
-      array_depth = kMaxArrayDepth;
-    }
-    Type shorty_result = Type::ShortyType(desc[0]);
-    return ArrayType(array_depth, shorty_result);
-  } else {
-    return ShortyType(desc[0]);
-  }
-}
-
-bool TypeInference::Type::MergeArrayConflict(Type src_type) {
-  DCHECK(Ref());
-  DCHECK_NE(ArrayDepth(), src_type.ArrayDepth());
-  DCHECK_GE(std::min(ArrayDepth(), src_type.ArrayDepth()), 1u);
-  bool size_conflict =
-      (ArrayDepth() == 1u && (raw_bits_ & kFlagArrayWide) != 0u) ||
-      (src_type.ArrayDepth() == 1u && (src_type.raw_bits_ & kFlagArrayWide) != 0u);
-  // Mark all three array type bits so that merging any other type bits will not change this type.
-  return Copy(Type((raw_bits_ & kMaskNonArray) |
-                   (1u << kBitArrayDepthStart) | kFlagArrayCore | kFlagArrayRef | kFlagArrayFp |
-                   kFlagArrayNarrow | (size_conflict ? kFlagArrayWide : 0u)));
-}
-
-bool TypeInference::Type::MergeStrong(Type src_type) {
-  bool changed = MergeNonArrayFlags(src_type);
-  if (src_type.ArrayDepth() != 0u) {
-    if (ArrayDepth() == 0u) {
-      DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
-      DCHECK_NE(src_type.raw_bits_ & kFlagRef, 0u);
-      raw_bits_ |= src_type.raw_bits_ & (~kMaskNonArray | kFlagRef);
-      changed = true;
-    } else if (ArrayDepth() == src_type.ArrayDepth()) {
-      changed |= MergeBits(src_type, kMaskArrayWideAndType);
-    } else if (src_type.ArrayDepth() == 1u &&
-        (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
-         ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
-      // Source type is [L or [? but current type is at least [[, preserve it.
-    } else if (ArrayDepth() == 1u &&
-        (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
-         ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
-      // Overwrite [? or [L with the source array type which is at least [[.
-      raw_bits_ = (raw_bits_ & kMaskNonArray) | (src_type.raw_bits_ & ~kMaskNonArray);
-      changed = true;
-    } else {
-      // Mark the array value type with conflict - both ref and fp.
-      changed |= MergeArrayConflict(src_type);
-    }
-  }
-  return changed;
-}
-
-bool TypeInference::Type::MergeWeak(Type src_type) {
-  bool changed = MergeNonArrayFlags(src_type);
-  if (src_type.ArrayDepth() != 0u && src_type.NonNull()) {
-    DCHECK_NE(src_type.ArrayDepth(), 0u);
-    if (ArrayDepth() == 0u) {
-      DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
-      // Preserve current type.
-    } else if (ArrayDepth() == src_type.ArrayDepth()) {
-      changed |= MergeBits(src_type, kMaskArrayWideAndType);
-    } else if (src_type.ArrayDepth() == 1u &&
-        (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
-         ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
-      // Source type is [L or [? but current type is at least [[, preserve it.
-    } else if (ArrayDepth() == 1u &&
-        (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
-         ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
-      // We have [? or [L. If it's [?, upgrade to [L as the source array type is at least [[.
-      changed |= MergeBits(ObjectArrayType(), kMaskArrayWideAndType);
-    } else {
-      // Mark the array value type with conflict - both ref and fp.
-      changed |= MergeArrayConflict(src_type);
-    }
-  }
-  return changed;
-}
-
-TypeInference::CheckCastData::CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
-    : mir_graph_(mir_graph),
-      alloc_(alloc),
-      num_blocks_(mir_graph->GetNumBlocks()),
-      num_sregs_(mir_graph->GetNumSSARegs()),
-      check_cast_map_(std::less<MIR*>(), alloc->Adapter()),
-      split_sreg_data_(std::less<int32_t>(), alloc->Adapter()) {
-}
-
-void TypeInference::CheckCastData::AddCheckCast(MIR* check_cast, Type type) {
-  DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
-  type.CheckPureRef();
-  int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
-  num_sregs_ += 1;
-  check_cast_map_.Put(check_cast, CheckCastMapValue{extra_s_reg, type});  // NOLINT
-  int32_t s_reg = check_cast->ssa_rep->uses[0];
-  auto lb = split_sreg_data_.lower_bound(s_reg);
-  if (lb == split_sreg_data_.end() || split_sreg_data_.key_comp()(s_reg, lb->first)) {
-    SplitSRegData split_s_reg_data = {
-        0,
-        alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
-        alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
-        new (alloc_) ArenaBitVector(alloc_, num_blocks_, false)
-    };
-    std::fill_n(split_s_reg_data.starting_mod_s_reg, num_blocks_, INVALID_SREG);
-    std::fill_n(split_s_reg_data.ending_mod_s_reg, num_blocks_, INVALID_SREG);
-    split_s_reg_data.def_phi_blocks_->ClearAllBits();
-    BasicBlock* def_bb = FindDefBlock(check_cast);
-    split_s_reg_data.ending_mod_s_reg[def_bb->id] = s_reg;
-    split_s_reg_data.def_phi_blocks_->SetBit(def_bb->id);
-    lb = split_sreg_data_.PutBefore(lb, s_reg, split_s_reg_data);
-  }
-  lb->second.ending_mod_s_reg[check_cast->bb] = extra_s_reg;
-  lb->second.def_phi_blocks_->SetBit(check_cast->bb);
-}
-
-void TypeInference::CheckCastData::AddPseudoPhis() {
-  // Look for pseudo-phis where a split SSA reg merges with a differently typed version
-  // and initialize all starting_mod_s_reg.
-  DCHECK(!split_sreg_data_.empty());
-  ArenaBitVector* phi_blocks = new (alloc_) ArenaBitVector(alloc_, num_blocks_, false);
-
-  for (auto& entry : split_sreg_data_) {
-    SplitSRegData& data = entry.second;
-
-    // Find pseudo-phi nodes.
-    phi_blocks->ClearAllBits();
-    ArenaBitVector* input_blocks = data.def_phi_blocks_;
-    do {
-      for (uint32_t idx : input_blocks->Indexes()) {
-        BasicBlock* def_bb = mir_graph_->GetBasicBlock(idx);
-        if (def_bb->dom_frontier != nullptr) {
-          phi_blocks->Union(def_bb->dom_frontier);
-        }
-      }
-    } while (input_blocks->Union(phi_blocks));
-
-    // Find live pseudo-phis. Make sure they're merging the same SSA reg.
-    data.def_phi_blocks_->ClearAllBits();
-    int32_t s_reg = entry.first;
-    int v_reg = mir_graph_->SRegToVReg(s_reg);
-    for (uint32_t phi_bb_id : phi_blocks->Indexes()) {
-      BasicBlock* phi_bb = mir_graph_->GetBasicBlock(phi_bb_id);
-      DCHECK(phi_bb != nullptr);
-      DCHECK(phi_bb->data_flow_info != nullptr);
-      DCHECK(phi_bb->data_flow_info->live_in_v != nullptr);
-      if (IsSRegLiveAtStart(phi_bb, v_reg, s_reg)) {
-        int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
-        num_sregs_ += 1;
-        data.starting_mod_s_reg[phi_bb_id] = extra_s_reg;
-        data.def_phi_blocks_->SetBit(phi_bb_id);
-      }
-    }
-
-    // SSA rename for s_reg.
-    TopologicalSortIterator iter(mir_graph_);
-    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-      if (bb->data_flow_info == nullptr || bb->block_type == kEntryBlock) {
-        continue;
-      }
-      BasicBlockId bb_id = bb->id;
-      if (data.def_phi_blocks_->IsBitSet(bb_id)) {
-        DCHECK_NE(data.starting_mod_s_reg[bb_id], INVALID_SREG);
-      } else {
-        DCHECK_EQ(data.starting_mod_s_reg[bb_id], INVALID_SREG);
-        if (IsSRegLiveAtStart(bb, v_reg, s_reg)) {
-          // The earliest predecessor must have been processed already.
-          BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
-          int32_t mod_s_reg = data.ending_mod_s_reg[pred_bb->id];
-          data.starting_mod_s_reg[bb_id] = (mod_s_reg != INVALID_SREG) ? mod_s_reg : s_reg;
-        } else if (data.ending_mod_s_reg[bb_id] != INVALID_SREG) {
-          // Start the original defining block with s_reg.
-          data.starting_mod_s_reg[bb_id] = s_reg;
-        }
-      }
-      if (data.ending_mod_s_reg[bb_id] == INVALID_SREG) {
-        // If the block doesn't define the modified SSA reg, it propagates the starting type.
-        data.ending_mod_s_reg[bb_id] = data.starting_mod_s_reg[bb_id];
-      }
-    }
-  }
-}
-
-void TypeInference::CheckCastData::InitializeCheckCastSRegs(Type* sregs) const {
-  for (const auto& entry : check_cast_map_) {
-    DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
-    sregs[entry.second.modified_s_reg] = entry.second.type.AsNonNull();
-  }
-}
-
-void TypeInference::CheckCastData::MergeCheckCastConflicts(Type* sregs) const {
-  for (const auto& entry : check_cast_map_) {
-    DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
-    sregs[entry.first->ssa_rep->uses[0]].MergeNonArrayFlags(
-        sregs[entry.second.modified_s_reg].AsNull());
-  }
-}
-
-void TypeInference::CheckCastData::MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const {
-  for (auto& entry : split_sreg_data_) {
-    for (uint32_t bb_id : entry.second.def_phi_blocks_->Indexes()) {
-      bb_df_attrs[bb_id] |= DF_NULL_TRANSFER_N;
-    }
-  }
-}
-
-void TypeInference::CheckCastData::Start(BasicBlock* bb) {
-  for (auto& entry : split_sreg_data_) {
-    entry.second.current_mod_s_reg = entry.second.starting_mod_s_reg[bb->id];
-  }
-}
-
-bool TypeInference::CheckCastData::ProcessPseudoPhis(BasicBlock* bb, Type* sregs) {
-  bool changed = false;
-  for (auto& entry : split_sreg_data_) {
-    DCHECK_EQ(entry.second.current_mod_s_reg, entry.second.starting_mod_s_reg[bb->id]);
-    if (entry.second.def_phi_blocks_->IsBitSet(bb->id)) {
-      int32_t* ending_mod_s_reg = entry.second.ending_mod_s_reg;
-      Type merged_type = sregs[entry.second.current_mod_s_reg];
-      for (BasicBlockId pred_id : bb->predecessors) {
-        DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
-        merged_type.MergeWeak(sregs[ending_mod_s_reg[pred_id]]);
-      }
-      if (UNLIKELY(!merged_type.IsDefined())) {
-        // This can happen during an initial merge of a loop head if the original def is
-        // actually an untyped null. (All other definitions are typed using the check-cast.)
-      } else if (merged_type.Wide()) {
-        // Ignore the pseudo-phi, just remember that there's a size mismatch.
-        sregs[entry.second.current_mod_s_reg].MarkSizeConflict();
-      } else {
-        DCHECK(merged_type.Narrow() && merged_type.LowWord() && !merged_type.HighWord());
-        // Propagate both down (fully) and up (without the "non-null" flag).
-        changed |= sregs[entry.second.current_mod_s_reg].Copy(merged_type);
-        merged_type = merged_type.AsNull();
-        for (BasicBlockId pred_id : bb->predecessors) {
-          DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
-          sregs[ending_mod_s_reg[pred_id]].MergeStrong(merged_type);
-        }
-      }
-    }
-  }
-  return changed;
-}
-
-void TypeInference::CheckCastData::ProcessCheckCast(MIR* mir) {
-  auto mir_it = check_cast_map_.find(mir);
-  DCHECK(mir_it != check_cast_map_.end());
-  auto sreg_it = split_sreg_data_.find(mir->ssa_rep->uses[0]);
-  DCHECK(sreg_it != split_sreg_data_.end());
-  sreg_it->second.current_mod_s_reg = mir_it->second.modified_s_reg;
-}
-
-TypeInference::SplitSRegData* TypeInference::CheckCastData::GetSplitSRegData(int32_t s_reg) {
-  auto it = split_sreg_data_.find(s_reg);
-  return (it == split_sreg_data_.end()) ? nullptr : &it->second;
-}
-
-BasicBlock* TypeInference::CheckCastData::FindDefBlock(MIR* check_cast) {
-  // Find the initial definition of the SSA reg used by the check-cast.
-  DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
-  int32_t s_reg = check_cast->ssa_rep->uses[0];
-  if (mir_graph_->IsInVReg(s_reg)) {
-    return mir_graph_->GetEntryBlock();
-  }
-  int v_reg = mir_graph_->SRegToVReg(s_reg);
-  BasicBlock* bb = mir_graph_->GetBasicBlock(check_cast->bb);
-  DCHECK(bb != nullptr);
-  while (true) {
-    // Find the earliest predecessor in the topological sort order to ensure we don't
-    // go in a loop.
-    BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
-    DCHECK(pred_bb != nullptr);
-    DCHECK(pred_bb->data_flow_info != nullptr);
-    DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
-    if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
-      // The s_reg was not valid at the end of pred_bb, so it must have been defined in bb.
-      return bb;
-    }
-    bb = pred_bb;
-  }
-}
-
-BasicBlock* TypeInference::CheckCastData::FindTopologicallyEarliestPredecessor(BasicBlock* bb) {
-  DCHECK(!bb->predecessors.empty());
-  const auto& indexes = mir_graph_->GetTopologicalSortOrderIndexes();
-  DCHECK_LT(bb->id, indexes.size());
-  size_t best_idx = indexes[bb->id];
-  BasicBlockId best_id = NullBasicBlockId;
-  for (BasicBlockId pred_id : bb->predecessors) {
-    DCHECK_LT(pred_id, indexes.size());
-    if (best_idx > indexes[pred_id]) {
-      best_idx = indexes[pred_id];
-      best_id = pred_id;
-    }
-  }
-  // There must be at least one predecessor earlier than the bb.
-  DCHECK_LT(best_idx, indexes[bb->id]);
-  return mir_graph_->GetBasicBlock(best_id);
-}
-
-bool TypeInference::CheckCastData::IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg) {
-  DCHECK_EQ(v_reg, mir_graph_->SRegToVReg(s_reg));
-  DCHECK(bb != nullptr);
-  DCHECK(bb->data_flow_info != nullptr);
-  DCHECK(bb->data_flow_info->live_in_v != nullptr);
-  if (!bb->data_flow_info->live_in_v->IsBitSet(v_reg)) {
-    return false;
-  }
-  for (BasicBlockId pred_id : bb->predecessors) {
-    BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
-    DCHECK(pred_bb != nullptr);
-    DCHECK(pred_bb->data_flow_info != nullptr);
-    DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
-    if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
-      return false;
-    }
-  }
-  return true;
-}
-
-TypeInference::TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
-    : mir_graph_(mir_graph),
-      cu_(mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()),
-      check_cast_data_(!mir_graph->HasCheckCast() ? nullptr :
-          InitializeCheckCastData(mir_graph, alloc)),
-      num_sregs_(
-          check_cast_data_ != nullptr ? check_cast_data_->NumSRegs() : mir_graph->GetNumSSARegs()),
-      ifields_(mir_graph->GetIFieldLoweringInfoCount() == 0u ? nullptr :
-          PrepareIFieldTypes(cu_->dex_file, mir_graph, alloc)),
-      sfields_(mir_graph->GetSFieldLoweringInfoCount() == 0u ? nullptr :
-          PrepareSFieldTypes(cu_->dex_file, mir_graph, alloc)),
-      signatures_(mir_graph->GetMethodLoweringInfoCount() == 0u ? nullptr :
-          PrepareSignatures(cu_->dex_file, mir_graph, alloc)),
-      current_method_signature_(
-          Signature(cu_->dex_file, cu_->method_idx, (cu_->access_flags & kAccStatic) != 0, alloc)),
-      sregs_(alloc->AllocArray<Type>(num_sregs_, kArenaAllocMisc)),
-      bb_df_attrs_(alloc->AllocArray<uint64_t>(mir_graph->GetNumBlocks(), kArenaAllocDFInfo)) {
-  InitializeSRegs();
-}
-
-bool TypeInference::Apply(BasicBlock* bb) {
-  bool changed = false;
-  uint64_t bb_df_attrs = bb_df_attrs_[bb->id];
-  if (bb_df_attrs != 0u) {
-    if (UNLIKELY(check_cast_data_ != nullptr)) {
-      check_cast_data_->Start(bb);
-      if (bb_df_attrs & DF_NULL_TRANSFER_N) {
-        changed |= check_cast_data_->ProcessPseudoPhis(bb, sregs_);
-      }
-    }
-    MIR* mir = bb->first_mir_insn;
-    MIR* main_mirs_end = ((bb_df_attrs & DF_SAME_TYPE_AB) != 0u) ? bb->last_mir_insn : nullptr;
-    for (; mir != main_mirs_end && static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi;
-        mir = mir->next) {
-      // Special-case handling for Phi comes first because we have 2 Phis instead of a wide one.
-      // At least one input must have been previously processed. Look for the first
-      // occurrence of a high_word or low_word flag to determine the type.
-      size_t num_uses = mir->ssa_rep->num_uses;
-      const int32_t* uses = mir->ssa_rep->uses;
-      const int32_t* defs = mir->ssa_rep->defs;
-      DCHECK_EQ(bb->predecessors.size(), num_uses);
-      Type merged_type = sregs_[defs[0]];
-      for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
-        int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
-        merged_type.MergeWeak(sregs_[input_mod_s_reg]);
-      }
-      if (UNLIKELY(!merged_type.IsDefined())) {
-        // No change
-      } else if (merged_type.HighWord()) {
-        // Ignore the high word phi, just remember if there's a size mismatch.
-        if (UNLIKELY(merged_type.LowWord())) {
-          sregs_[defs[0]].MarkSizeConflict();
-        }
-      } else {
-        // Propagate both down (fully) and up (without the "non-null" flag).
-        changed |= sregs_[defs[0]].Copy(merged_type);
-        merged_type = merged_type.AsNull();
-        for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
-          int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
-          changed |= UpdateSRegFromLowWordType(input_mod_s_reg, merged_type);
-        }
-      }
-    }
-
-    // Propagate types with MOVEs and AGETs, process CHECK_CASTs for modified SSA reg tracking.
-    for (; mir != main_mirs_end; mir = mir->next) {
-      uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
-      size_t num_uses = mir->ssa_rep->num_uses;
-      const int32_t* uses = mir->ssa_rep->uses;
-      const int32_t* defs = mir->ssa_rep->defs;
-
-      // Special handling for moves. Propagate type both ways.
-      if ((attrs & DF_IS_MOVE) != 0) {
-        int32_t used_mod_s_reg = ModifiedSReg(uses[0]);
-        int32_t defd_mod_s_reg = defs[0];
-
-        // The "non-null" flag is propagated only downwards from actual definitions and it's
-        // not initially marked for moves, so used sreg must be marked before defined sreg.
-        // The only exception is an inlined move where we know the type from the original invoke.
-        DCHECK(sregs_[used_mod_s_reg].NonNull() || !sregs_[defd_mod_s_reg].NonNull() ||
-               (mir->optimization_flags & MIR_CALLEE) != 0);
-        changed |= UpdateSRegFromLowWordType(used_mod_s_reg, sregs_[defd_mod_s_reg].AsNull());
-
-        // The value is the same, so either both registers are null or no register is.
-        // In any case we can safely propagate the array type down.
-        changed |= UpdateSRegFromLowWordType(defd_mod_s_reg, sregs_[used_mod_s_reg]);
-        if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[used_mod_s_reg].Ref())) {
-          // Mark type conflict: move instead of move-object.
-          sregs_[used_mod_s_reg].MarkTypeConflict();
-        }
-        continue;
-      }
-
-      // Handle AGET/APUT.
-      if ((attrs & DF_HAS_RANGE_CHKS) != 0) {
-        int32_t base_mod_s_reg = ModifiedSReg(uses[num_uses - 2u]);
-        int32_t mod_s_reg = (attrs & DF_DA) != 0 ? defs[0] : ModifiedSReg(uses[0]);
-        DCHECK_NE(sregs_[base_mod_s_reg].ArrayDepth(), 0u);
-        if (!sregs_[base_mod_s_reg].NonNull()) {
-          // If the base is null, don't propagate anything. All that we could determine
-          // has already been merged in the previous stage.
-        } else {
-          changed |= UpdateSRegFromLowWordType(mod_s_reg, sregs_[base_mod_s_reg].ComponentType());
-          Type array_type = Type::ArrayTypeFromComponent(sregs_[mod_s_reg]);
-          if ((attrs & DF_DA) != 0) {
-            changed |= sregs_[base_mod_s_reg].MergeStrong(array_type);
-          } else {
-            changed |= sregs_[base_mod_s_reg].MergeWeak(array_type);
-          }
-        }
-        if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[mod_s_reg].Ref())) {
-          // Mark type conflict: aget/aput instead of aget/aput-object.
-          sregs_[mod_s_reg].MarkTypeConflict();
-        }
-        continue;
-      }
-
-      // Special-case handling for check-cast to advance modified SSA reg.
-      if (UNLIKELY((attrs & DF_CHK_CAST) != 0)) {
-        DCHECK(check_cast_data_ != nullptr);
-        check_cast_data_->ProcessCheckCast(mir);
-      }
-    }
-
-    // Propagate types for IF_cc if present.
-    if (mir != nullptr) {
-      DCHECK(mir == bb->last_mir_insn);
-      DCHECK(mir->next == nullptr);
-      DCHECK_NE(MIRGraph::GetDataFlowAttributes(mir) & DF_SAME_TYPE_AB, 0u);
-      DCHECK_EQ(mir->ssa_rep->num_uses, 2u);
-      const int32_t* uses = mir->ssa_rep->uses;
-      int32_t mod_s_reg0 = ModifiedSReg(uses[0]);
-      int32_t mod_s_reg1 = ModifiedSReg(uses[1]);
-      changed |= sregs_[mod_s_reg0].MergeWeak(sregs_[mod_s_reg1].AsNull());
-      changed |= sregs_[mod_s_reg1].MergeWeak(sregs_[mod_s_reg0].AsNull());
-    }
-  }
-  return changed;
-}
-
-void TypeInference::Finish() {
-  if (UNLIKELY(check_cast_data_ != nullptr)) {
-    check_cast_data_->MergeCheckCastConflicts(sregs_);
-  }
-
-  size_t num_sregs = mir_graph_->GetNumSSARegs();  // Without the extra SSA regs.
-  for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
-    if (sregs_[s_reg].SizeConflict()) {
-      /*
-       * The dex bytecode definition does not explicitly outlaw the definition of the same
-       * virtual register to be used in both a 32-bit and 64-bit pair context.  However, dx
-       * does not generate this pattern (at least recently).  Further, in the next revision of
-       * dex, we will forbid this.  To support the few cases in the wild, detect this pattern
-       * and punt to the interpreter.
-       */
-      LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
-                   << " has size conflict block for sreg " << s_reg
-                   << ", punting to interpreter.";
-      mir_graph_->SetPuntToInterpreter(true);
-      return;
-    }
-  }
-
-  size_t conflict_s_reg = 0;
-  bool type_conflict = false;
-  for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
-    Type type = sregs_[s_reg];
-    RegLocation* loc = &mir_graph_->reg_location_[s_reg];
-    loc->wide = type.Wide();
-    loc->defined = type.IsDefined();
-    loc->fp = type.Fp();
-    loc->core = type.Core();
-    loc->ref = type.Ref();
-    loc->high_word = type.HighWord();
-    if (UNLIKELY(type.TypeConflict())) {
-      type_conflict = true;
-      conflict_s_reg = s_reg;
-    }
-  }
-
-  if (type_conflict) {
-    /*
-     * Each dalvik register definition should be used either as a reference, or an
-     * integer or a floating point value. We don't normally expect to see a Dalvik
-     * register definition used in two or three of these roles though technically it
-     * could happen with constants (0 for all three roles, non-zero for integer and
-     * FP). Detect this situation and disable optimizations that rely on correct
-     * typing, i.e. register promotion, GVN/LVN and GVN-based DCE.
-     */
-    LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
-                 << " has type conflict block for sreg " << conflict_s_reg
-                 << ", disabling register promotion.";
-    cu_->disable_opt |=
-        (1u << kPromoteRegs) |
-        (1u << kGlobalValueNumbering) |
-        (1u << kGvnDeadCodeElimination) |
-        (1u << kLocalValueNumbering);
-  }
-}
-
-TypeInference::Type TypeInference::FieldType(const DexFile* dex_file, uint32_t field_idx) {
-  uint32_t type_idx = dex_file->GetFieldId(field_idx).type_idx_;
-  Type result = Type::DexType(dex_file, type_idx);
-  return result;
-}
-
-TypeInference::Type* TypeInference::PrepareIFieldTypes(const DexFile* dex_file,
-                                                       MIRGraph* mir_graph,
-                                                       ScopedArenaAllocator* alloc) {
-  size_t count = mir_graph->GetIFieldLoweringInfoCount();
-  Type* ifields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
-  for (uint32_t i = 0u; i != count; ++i) {
-    // NOTE: Quickened field accesses have invalid FieldIndex() but they are always resolved.
-    const MirFieldInfo& info = mir_graph->GetIFieldLoweringInfo(i);
-    const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
-    uint32_t field_idx = info.IsResolved() ? info.DeclaringFieldIndex() : info.FieldIndex();
-    ifields[i] = FieldType(current_dex_file, field_idx);
-    DCHECK_EQ(info.MemAccessType() == kDexMemAccessWide, ifields[i].Wide());
-    DCHECK_EQ(info.MemAccessType() == kDexMemAccessObject, ifields[i].Ref());
-  }
-  return ifields;
-}
-
-TypeInference::Type* TypeInference::PrepareSFieldTypes(const DexFile* dex_file,
-                                                       MIRGraph* mir_graph,
-                                                       ScopedArenaAllocator* alloc) {
-  size_t count = mir_graph->GetSFieldLoweringInfoCount();
-  Type* sfields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
-  for (uint32_t i = 0u; i != count; ++i) {
-    // FieldIndex() is always valid for static fields (no quickened instructions).
-    sfields[i] = FieldType(dex_file, mir_graph->GetSFieldLoweringInfo(i).FieldIndex());
-  }
-  return sfields;
-}
-
-TypeInference::MethodSignature TypeInference::Signature(const DexFile* dex_file,
-                                                        uint32_t method_idx,
-                                                        bool is_static,
-                                                        ScopedArenaAllocator* alloc) {
-  const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
-  const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
-  Type return_type = Type::DexType(dex_file, proto_id.return_type_idx_);
-  const DexFile::TypeList* type_list = dex_file->GetProtoParameters(proto_id);
-  size_t this_size = (is_static ? 0u : 1u);
-  size_t param_size = ((type_list != nullptr) ? type_list->Size() : 0u);
-  size_t size = this_size + param_size;
-  Type* param_types = (size != 0u) ? alloc->AllocArray<Type>(size, kArenaAllocDFInfo) : nullptr;
-  if (!is_static) {
-    param_types[0] = Type::DexType(dex_file, method_id.class_idx_);
-  }
-  for (size_t i = 0; i != param_size; ++i)  {
-    uint32_t type_idx = type_list->GetTypeItem(i).type_idx_;
-    param_types[this_size + i] = Type::DexType(dex_file, type_idx);
-  }
-  return MethodSignature{ return_type, size, param_types };  // NOLINT
-}
-
-TypeInference::MethodSignature* TypeInference::PrepareSignatures(const DexFile* dex_file,
-                                                                 MIRGraph* mir_graph,
-                                                                 ScopedArenaAllocator* alloc) {
-  size_t count = mir_graph->GetMethodLoweringInfoCount();
-  MethodSignature* signatures = alloc->AllocArray<MethodSignature>(count, kArenaAllocDFInfo);
-  for (uint32_t i = 0u; i != count; ++i) {
-    // NOTE: Quickened invokes have invalid MethodIndex() but they are always resolved.
-    const MirMethodInfo& info = mir_graph->GetMethodLoweringInfo(i);
-    uint32_t method_idx = info.IsResolved() ? info.DeclaringMethodIndex() : info.MethodIndex();
-    const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
-    signatures[i] = Signature(current_dex_file, method_idx, info.IsStatic(), alloc);
-  }
-  return signatures;
-}
-
-TypeInference::CheckCastData* TypeInference::InitializeCheckCastData(MIRGraph* mir_graph,
-                                                                     ScopedArenaAllocator* alloc) {
-  if (!mir_graph->HasCheckCast()) {
-    return nullptr;
-  }
-
-  CheckCastData* data = nullptr;
-  const DexFile* dex_file = nullptr;
-  PreOrderDfsIterator iter(mir_graph);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      if (mir->dalvikInsn.opcode == Instruction::CHECK_CAST) {
-        if (data == nullptr) {
-          data = new (alloc) CheckCastData(mir_graph, alloc);
-          dex_file = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()->dex_file;
-        }
-        Type type = Type::DexType(dex_file, mir->dalvikInsn.vB);
-        data->AddCheckCast(mir, type);
-      }
-    }
-  }
-  if (data != nullptr) {
-    data->AddPseudoPhis();
-  }
-  return data;
-}
-
-void TypeInference::InitializeSRegs() {
-  std::fill_n(sregs_, num_sregs_, Type::Unknown());
-
-  /* Treat ArtMethod* specially since they are pointer sized */
-  sregs_[mir_graph_->GetMethodSReg()] = Type::ArtMethodType(cu_->target64);
-
-  // Initialize parameter SSA regs at method entry.
-  int32_t entry_param_s_reg = mir_graph_->GetFirstInVR();
-  for (size_t i = 0, size = current_method_signature_.num_params; i != size; ++i)  {
-    Type param_type = current_method_signature_.param_types[i].AsNonNull();
-    sregs_[entry_param_s_reg] = param_type;
-    entry_param_s_reg += param_type.Wide() ? 2 : 1;
-  }
-  DCHECK_EQ(static_cast<uint32_t>(entry_param_s_reg),
-            mir_graph_->GetFirstInVR() + mir_graph_->GetNumOfInVRs());
-
-  // Initialize check-cast types.
-  if (UNLIKELY(check_cast_data_ != nullptr)) {
-    check_cast_data_->InitializeCheckCastSRegs(sregs_);
-  }
-
-  // Initialize well-known SSA register definition types. Merge inferred types
-  // upwards where a single merge is enough (INVOKE arguments and return type,
-  // RETURN type, IPUT/SPUT source type).
-  // NOTE: Using topological sort order to make sure the definition comes before
-  // any upward merging. This allows simple assignment of the defined types
-  // instead of MergeStrong().
-  TopologicalSortIterator iter(mir_graph_);
-  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    uint64_t bb_df_attrs = 0u;
-    if (UNLIKELY(check_cast_data_ != nullptr)) {
-      check_cast_data_->Start(bb);
-    }
-    // Ignore pseudo-phis, we're not setting types for SSA regs that depend on them in this pass.
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
-      bb_df_attrs |= attrs;
-
-      const uint32_t num_uses = mir->ssa_rep->num_uses;
-      const int32_t* uses = mir->ssa_rep->uses;
-      const int32_t* defs = mir->ssa_rep->defs;
-
-      uint16_t opcode = mir->dalvikInsn.opcode;
-      switch (opcode) {
-        case Instruction::CONST_4:
-        case Instruction::CONST_16:
-        case Instruction::CONST:
-        case Instruction::CONST_HIGH16:
-        case Instruction::CONST_WIDE_16:
-        case Instruction::CONST_WIDE_32:
-        case Instruction::CONST_WIDE:
-        case Instruction::CONST_WIDE_HIGH16:
-        case Instruction::MOVE:
-        case Instruction::MOVE_FROM16:
-        case Instruction::MOVE_16:
-        case Instruction::MOVE_WIDE:
-        case Instruction::MOVE_WIDE_FROM16:
-        case Instruction::MOVE_WIDE_16:
-        case Instruction::MOVE_OBJECT:
-        case Instruction::MOVE_OBJECT_FROM16:
-        case Instruction::MOVE_OBJECT_16:
-          if ((mir->optimization_flags & MIR_CALLEE) != 0) {
-            // Inlined const/move keeps method_lowering_info for type inference.
-            DCHECK_LT(mir->meta.method_lowering_info, mir_graph_->GetMethodLoweringInfoCount());
-            Type return_type = signatures_[mir->meta.method_lowering_info].return_type;
-            DCHECK(return_type.IsDefined());  // Method return type can't be void.
-            sregs_[defs[0]] = return_type.AsNonNull();
-            if (return_type.Wide()) {
-              DCHECK_EQ(defs[0] + 1, defs[1]);
-              sregs_[defs[1]] = return_type.ToHighWord();
-            }
-            break;
-          }
-          FALLTHROUGH_INTENDED;
-        case kMirOpPhi:
-          // These cannot be determined in this simple pass and will be processed later.
-          break;
-
-        case Instruction::MOVE_RESULT:
-        case Instruction::MOVE_RESULT_WIDE:
-        case Instruction::MOVE_RESULT_OBJECT:
-          // Nothing to do, handled with invoke-* or filled-new-array/-range.
-          break;
-        case Instruction::MOVE_EXCEPTION:
-          // NOTE: We can never catch an array.
-          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
-          break;
-        case Instruction::CONST_STRING:
-        case Instruction::CONST_STRING_JUMBO:
-          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
-          break;
-        case Instruction::CONST_CLASS:
-          sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
-          break;
-        case Instruction::CHECK_CAST:
-          DCHECK(check_cast_data_ != nullptr);
-          check_cast_data_->ProcessCheckCast(mir);
-          break;
-        case Instruction::ARRAY_LENGTH:
-          sregs_[ModifiedSReg(uses[0])].MergeStrong(Type::UnknownArrayType());
-          break;
-        case Instruction::NEW_INSTANCE:
-          sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB).AsNonNull();
-          DCHECK(sregs_[defs[0]].Ref());
-          DCHECK_EQ(sregs_[defs[0]].ArrayDepth(), 0u);
-          break;
-        case Instruction::NEW_ARRAY:
-          sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vC).AsNonNull();
-          DCHECK(sregs_[defs[0]].Ref());
-          DCHECK_NE(sregs_[defs[0]].ArrayDepth(), 0u);
-          break;
-        case Instruction::FILLED_NEW_ARRAY:
-        case Instruction::FILLED_NEW_ARRAY_RANGE: {
-          Type array_type = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB);
-          array_type.CheckPureRef();  // Previously checked by the method verifier.
-          DCHECK_NE(array_type.ArrayDepth(), 0u);
-          Type component_type = array_type.ComponentType();
-          DCHECK(!component_type.Wide());
-          MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
-          if (move_result_mir != nullptr) {
-            DCHECK_EQ(move_result_mir->dalvikInsn.opcode, Instruction::MOVE_RESULT_OBJECT);
-            sregs_[move_result_mir->ssa_rep->defs[0]] = array_type.AsNonNull();
-          }
-          DCHECK_EQ(num_uses, mir->dalvikInsn.vA);
-          for (size_t next = 0u; next != num_uses; ++next) {
-            int32_t input_mod_s_reg = ModifiedSReg(uses[next]);
-            sregs_[input_mod_s_reg].MergeStrong(component_type);
-          }
-          break;
-        }
-        case Instruction::INVOKE_VIRTUAL:
-        case Instruction::INVOKE_SUPER:
-        case Instruction::INVOKE_DIRECT:
-        case Instruction::INVOKE_STATIC:
-        case Instruction::INVOKE_INTERFACE:
-        case Instruction::INVOKE_VIRTUAL_RANGE:
-        case Instruction::INVOKE_SUPER_RANGE:
-        case Instruction::INVOKE_DIRECT_RANGE:
-        case Instruction::INVOKE_STATIC_RANGE:
-        case Instruction::INVOKE_INTERFACE_RANGE:
-        case Instruction::INVOKE_VIRTUAL_QUICK:
-        case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
-          const MethodSignature* signature = &signatures_[mir->meta.method_lowering_info];
-          MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
-          if (move_result_mir != nullptr) {
-            Type return_type = signature->return_type;
-            sregs_[move_result_mir->ssa_rep->defs[0]] = return_type.AsNonNull();
-            if (return_type.Wide()) {
-              DCHECK_EQ(move_result_mir->ssa_rep->defs[0] + 1, move_result_mir->ssa_rep->defs[1]);
-              sregs_[move_result_mir->ssa_rep->defs[1]] = return_type.ToHighWord();
-            }
-          }
-          size_t next = 0u;
-          for (size_t i = 0, size = signature->num_params; i != size; ++i)  {
-            Type param_type = signature->param_types[i];
-            int32_t param_s_reg = ModifiedSReg(uses[next]);
-            DCHECK(!param_type.Wide() || uses[next] + 1 == uses[next + 1]);
-            UpdateSRegFromLowWordType(param_s_reg, param_type);
-            next += param_type.Wide() ? 2 : 1;
-          }
-          DCHECK_EQ(next, num_uses);
-          DCHECK_EQ(next, mir->dalvikInsn.vA);
-          break;
-        }
-
-        case Instruction::RETURN_WIDE:
-          DCHECK(current_method_signature_.return_type.Wide());
-          DCHECK_EQ(uses[0] + 1, uses[1]);
-          DCHECK_EQ(ModifiedSReg(uses[0]), uses[0]);
-          FALLTHROUGH_INTENDED;
-        case Instruction::RETURN:
-        case Instruction::RETURN_OBJECT: {
-          int32_t mod_s_reg = ModifiedSReg(uses[0]);
-          UpdateSRegFromLowWordType(mod_s_reg, current_method_signature_.return_type);
-          break;
-        }
-
-        // NOTE: For AGET/APUT we set only the array type. The operand type is set
-        // below based on the data flow attributes.
-        case Instruction::AGET:
-        case Instruction::APUT:
-          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowArrayType());
-          break;
-        case Instruction::AGET_WIDE:
-        case Instruction::APUT_WIDE:
-          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::WideArrayType());
-          break;
-        case Instruction::AGET_OBJECT:
-          sregs_[defs[0]] = sregs_[defs[0]].AsNonNull();
-          FALLTHROUGH_INTENDED;
-        case Instruction::APUT_OBJECT:
-          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::ObjectArrayType());
-          break;
-        case Instruction::AGET_BOOLEAN:
-        case Instruction::APUT_BOOLEAN:
-        case Instruction::AGET_BYTE:
-        case Instruction::APUT_BYTE:
-        case Instruction::AGET_CHAR:
-        case Instruction::APUT_CHAR:
-        case Instruction::AGET_SHORT:
-        case Instruction::APUT_SHORT:
-          sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowCoreArrayType());
-          break;
-
-        case Instruction::IGET_WIDE:
-        case Instruction::IGET_WIDE_QUICK:
-          DCHECK_EQ(defs[0] + 1, defs[1]);
-          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
-          sregs_[defs[1]] = ifields_[mir->meta.ifield_lowering_info].ToHighWord();
-          FALLTHROUGH_INTENDED;
-        case Instruction::IGET:
-        case Instruction::IGET_OBJECT:
-        case Instruction::IGET_BOOLEAN:
-        case Instruction::IGET_BYTE:
-        case Instruction::IGET_CHAR:
-        case Instruction::IGET_SHORT:
-        case Instruction::IGET_QUICK:
-        case Instruction::IGET_OBJECT_QUICK:
-        case Instruction::IGET_BOOLEAN_QUICK:
-        case Instruction::IGET_BYTE_QUICK:
-        case Instruction::IGET_CHAR_QUICK:
-        case Instruction::IGET_SHORT_QUICK:
-          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
-          sregs_[defs[0]] = ifields_[mir->meta.ifield_lowering_info].AsNonNull();
-          break;
-        case Instruction::IPUT_WIDE:
-        case Instruction::IPUT_WIDE_QUICK:
-          DCHECK_EQ(uses[0] + 1, uses[1]);
-          FALLTHROUGH_INTENDED;
-        case Instruction::IPUT:
-        case Instruction::IPUT_OBJECT:
-        case Instruction::IPUT_BOOLEAN:
-        case Instruction::IPUT_BYTE:
-        case Instruction::IPUT_CHAR:
-        case Instruction::IPUT_SHORT:
-        case Instruction::IPUT_QUICK:
-        case Instruction::IPUT_OBJECT_QUICK:
-        case Instruction::IPUT_BOOLEAN_QUICK:
-        case Instruction::IPUT_BYTE_QUICK:
-        case Instruction::IPUT_CHAR_QUICK:
-        case Instruction::IPUT_SHORT_QUICK:
-          DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
-          UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
-                                    ifields_[mir->meta.ifield_lowering_info]);
-          break;
-        case Instruction::SGET_WIDE:
-          DCHECK_EQ(defs[0] + 1, defs[1]);
-          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
-          sregs_[defs[1]] = sfields_[mir->meta.sfield_lowering_info].ToHighWord();
-          FALLTHROUGH_INTENDED;
-        case Instruction::SGET:
-        case Instruction::SGET_OBJECT:
-        case Instruction::SGET_BOOLEAN:
-        case Instruction::SGET_BYTE:
-        case Instruction::SGET_CHAR:
-        case Instruction::SGET_SHORT:
-          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
-          sregs_[defs[0]] = sfields_[mir->meta.sfield_lowering_info].AsNonNull();
-          break;
-        case Instruction::SPUT_WIDE:
-          DCHECK_EQ(uses[0] + 1, uses[1]);
-          FALLTHROUGH_INTENDED;
-        case Instruction::SPUT:
-        case Instruction::SPUT_OBJECT:
-        case Instruction::SPUT_BOOLEAN:
-        case Instruction::SPUT_BYTE:
-        case Instruction::SPUT_CHAR:
-        case Instruction::SPUT_SHORT:
-          DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
-          UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
-                                          sfields_[mir->meta.sfield_lowering_info]);
-          break;
-
-        default:
-          // No invokes or reference definitions here.
-          DCHECK_EQ(attrs & (DF_FORMAT_35C | DF_FORMAT_3RC), 0u);
-          DCHECK_NE(attrs & (DF_DA | DF_REF_A), (DF_DA | DF_REF_A));
-          break;
-      }
-
-      if ((attrs & DF_NULL_TRANSFER_N) != 0) {
-        // Don't process Phis at this stage.
-        continue;
-      }
-
-      // Handle defs
-      if (attrs & DF_DA) {
-        int32_t s_reg = defs[0];
-        sregs_[s_reg].SetLowWord();
-        if (attrs & DF_FP_A) {
-          sregs_[s_reg].SetFp();
-        }
-        if (attrs & DF_CORE_A) {
-          sregs_[s_reg].SetCore();
-        }
-        if (attrs & DF_REF_A) {
-          sregs_[s_reg].SetRef();
-        }
-        if (attrs & DF_A_WIDE) {
-          sregs_[s_reg].SetWide();
-          DCHECK_EQ(s_reg + 1, ModifiedSReg(defs[1]));
-          sregs_[s_reg + 1].MergeHighWord(sregs_[s_reg]);
-        } else {
-          sregs_[s_reg].SetNarrow();
-        }
-      }
-
-      // Handles uses
-      size_t next = 0;
-  #define PROCESS(REG)                                                        \
-      if (attrs & DF_U##REG) {                                                \
-        int32_t mod_s_reg = ModifiedSReg(uses[next]);                         \
-        sregs_[mod_s_reg].SetLowWord();                                       \
-        if (attrs & DF_FP_##REG) {                                            \
-          sregs_[mod_s_reg].SetFp();                                          \
-        }                                                                     \
-        if (attrs & DF_CORE_##REG) {                                          \
-          sregs_[mod_s_reg].SetCore();                                        \
-        }                                                                     \
-        if (attrs & DF_REF_##REG) {                                           \
-          sregs_[mod_s_reg].SetRef();                                         \
-        }                                                                     \
-        if (attrs & DF_##REG##_WIDE) {                                        \
-          sregs_[mod_s_reg].SetWide();                                        \
-          DCHECK_EQ(mod_s_reg + 1, ModifiedSReg(uses[next + 1]));             \
-          sregs_[mod_s_reg + 1].SetWide();                                    \
-          sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);             \
-          next += 2;                                                          \
-        } else {                                                              \
-          sregs_[mod_s_reg].SetNarrow();                                      \
-          next++;                                                             \
-        }                                                                     \
-      }
-      PROCESS(A)
-      PROCESS(B)
-      PROCESS(C)
-  #undef PROCESS
-      DCHECK(next == mir->ssa_rep->num_uses || (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0);
-    }
-    // Record relevant attributes.
-    bb_df_attrs_[bb->id] = bb_df_attrs &
-        (DF_NULL_TRANSFER_N | DF_CHK_CAST | DF_IS_MOVE | DF_HAS_RANGE_CHKS | DF_SAME_TYPE_AB);
-  }
-
-  if (UNLIKELY(check_cast_data_ != nullptr)) {
-    check_cast_data_->MarkPseudoPhiBlocks(bb_df_attrs_);
-  }
-}
-
-int32_t TypeInference::ModifiedSReg(int32_t s_reg) {
-  if (UNLIKELY(check_cast_data_ != nullptr)) {
-    SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
-    if (UNLIKELY(split_data != nullptr)) {
-      DCHECK_NE(split_data->current_mod_s_reg, INVALID_SREG);
-      return split_data->current_mod_s_reg;
-    }
-  }
-  return s_reg;
-}
-
-int32_t TypeInference::PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx) {
-  DCHECK_LT(pred_idx, bb->predecessors.size());
-  if (UNLIKELY(check_cast_data_ != nullptr)) {
-    SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
-    if (UNLIKELY(split_data != nullptr)) {
-      return split_data->ending_mod_s_reg[bb->predecessors[pred_idx]];
-    }
-  }
-  return s_reg;
-}
-
-bool TypeInference::UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type) {
-  DCHECK(low_word_type.LowWord());
-  bool changed = sregs_[mod_s_reg].MergeStrong(low_word_type);
-  if (!sregs_[mod_s_reg].Narrow()) {  // Wide without conflict with narrow.
-    DCHECK(!low_word_type.Narrow());
-    DCHECK_LT(mod_s_reg, mir_graph_->GetNumSSARegs());  // Original SSA reg.
-    changed |= sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);
-  }
-  return changed;
-}
-
-}  // namespace art
diff --git a/compiler/dex/type_inference.h b/compiler/dex/type_inference.h
deleted file mode 100644
index adc3b54..0000000
--- a/compiler/dex/type_inference.h
+++ /dev/null
@@ -1,448 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_TYPE_INFERENCE_H_
-#define ART_COMPILER_DEX_TYPE_INFERENCE_H_
-
-#include "base/bit_utils.h"
-#include "base/logging.h"
-#include "base/arena_object.h"
-#include "base/scoped_arena_containers.h"
-
-namespace art {
-
-class ArenaBitVector;
-class BasicBlock;
-struct CompilationUnit;
-class DexFile;
-class MirFieldInfo;
-class MirMethodInfo;
-class MIR;
-class MIRGraph;
-
-/**
- * @brief Determine the type of SSA registers.
- *
- * @details
- * Because Dalvik's bytecode is not fully typed, we have to do some work to figure
- * out the sreg type.  For some operations it is clear based on the opcode (i.e.
- * ADD_FLOAT v0, v1, v2), but for others (MOVE), we may never know the "real" type.
- *
- * We perform the type inference operation in two phases:
- *   1. First, we make one pass over all insns in the topological sort order and
- *      extract known type information from all insns for their defs and uses.
- *   2. Then we repeatedly go through the graph to process insns that can propagate
- *      types from inputs to outputs and vice versa. These insns are just the MOVEs,
- *      AGET/APUTs, IF_ccs and Phis (including pseudo-Phis, see below).
- *
- * Since the main purpose is to determine the basic FP/core/reference type, we don't
- * need to record the precise reference type, we only record the array type to determine
- * the result types of agets and source type of aputs.
- *
- * One complication is the check-cast instruction that effectively defines a new
- * virtual register that has a different type than the original sreg. We need to
- * track these virtual sregs and insert pseudo-phis where they merge.
- *
- * Another problems is with null references. The same zero constant can be used
- * as differently typed null and moved around with move-object which would normally
- * be an ill-formed assignment. So we need to keep track of values that can be null
- * and values that cannot.
- *
- * Note that it's possible to have the same sreg show multiple defined types because dx
- * treats constants as untyped bit patterns. We disable register promotion in that case.
- */
-class TypeInference : public DeletableArenaObject<kArenaAllocMisc> {
- public:
-  TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
-
-  bool Apply(BasicBlock* bb);
-  void Finish();
-
- private:
-  struct Type {
-    static Type Unknown() {
-      return Type(0u);
-    }
-
-    static Type NonArrayRefType() {
-      return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
-    }
-
-    static Type ArtMethodType(bool wide) {
-      return Type(kFlagLowWord | kFlagRef | (wide ? kFlagWide : kFlagNarrow));
-    }
-
-    static Type ObjectArrayType() {
-      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
-                  (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayRef);
-    }
-
-    static Type WideArrayType() {
-      // Core or FP unknown.
-      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
-                  (1u << kBitArrayDepthStart) | kFlagArrayWide);
-    }
-
-    static Type NarrowArrayType() {
-      // Core or FP unknown.
-      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
-                  (1u << kBitArrayDepthStart) | kFlagArrayNarrow);
-    }
-
-    static Type NarrowCoreArrayType() {
-      return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
-                  (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayCore);
-    }
-
-    static Type UnknownArrayType() {
-      return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (1u << kBitArrayDepthStart));
-    }
-
-    static Type ArrayType(uint32_t array_depth, Type nested_type);
-    static Type ArrayTypeFromComponent(Type component_type);
-    static Type ShortyType(char shorty);
-    static Type DexType(const DexFile* dex_file, uint32_t type_idx);
-
-    bool IsDefined() {
-      return raw_bits_ != 0u;
-    }
-
-    bool SizeConflict() const {
-      // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
-      return (Wide() && Narrow()) || (HighWord() && LowWord());
-    }
-
-    bool TypeConflict() const {
-      // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
-      return (raw_bits_ & kMaskType) != 0u && !IsPowerOfTwo(raw_bits_ & kMaskType);  // 2+ bits.
-    }
-
-    void MarkSizeConflict() {
-      SetBits(kFlagLowWord | kFlagHighWord);
-    }
-
-    void MarkTypeConflict() {
-      // Mark all three type bits so that merging any other type bits will not change this type.
-      SetBits(kFlagFp | kFlagCore | kFlagRef);
-    }
-
-    void CheckPureRef() const {
-      DCHECK_EQ(raw_bits_ & (kMaskWideAndType | kMaskWord), kFlagNarrow | kFlagRef | kFlagLowWord);
-    }
-
-    // If reference, don't treat as possible null and require precise type.
-    //
-    // References without this flag are allowed to have a type conflict and their
-    // type will not be propagated down. However, for simplicity we allow propagation
-    // of other flags up as it will affect only other null references; should those
-    // references be marked non-null later, we would have to do it anyway.
-    // NOTE: This is a negative "non-null" flag rather then a positive "is-null"
-    // to simplify merging together with other non-array flags.
-    bool NonNull() const {
-      return IsBitSet(kFlagNonNull);
-    }
-
-    bool Wide() const {
-      return IsBitSet(kFlagWide);
-    }
-
-    bool Narrow() const {
-      return IsBitSet(kFlagNarrow);
-    }
-
-    bool Fp() const {
-      return IsBitSet(kFlagFp);
-    }
-
-    bool Core() const {
-      return IsBitSet(kFlagCore);
-    }
-
-    bool Ref() const {
-      return IsBitSet(kFlagRef);
-    }
-
-    bool LowWord() const {
-      return IsBitSet(kFlagLowWord);
-    }
-
-    bool HighWord() const {
-      return IsBitSet(kFlagHighWord);
-    }
-
-    uint32_t ArrayDepth() const {
-      return raw_bits_ >> kBitArrayDepthStart;
-    }
-
-    Type NestedType() const {
-      DCHECK_NE(ArrayDepth(), 0u);
-      return Type(kFlagLowWord | ((raw_bits_ & kMaskArrayWideAndType) >> kArrayTypeShift));
-    }
-
-    Type ComponentType() const {
-      DCHECK_NE(ArrayDepth(), 0u);
-      Type temp(raw_bits_ - (1u << kBitArrayDepthStart));  // array_depth - 1u;
-      return (temp.ArrayDepth() != 0u) ? temp.AsNull() : NestedType();
-    }
-
-    void SetWide() {
-      SetBits(kFlagWide);
-    }
-
-    void SetNarrow() {
-      SetBits(kFlagNarrow);
-    }
-
-    void SetFp() {
-      SetBits(kFlagFp);
-    }
-
-    void SetCore() {
-      SetBits(kFlagCore);
-    }
-
-    void SetRef() {
-      SetBits(kFlagRef);
-    }
-
-    void SetLowWord() {
-      SetBits(kFlagLowWord);
-    }
-
-    void SetHighWord() {
-      SetBits(kFlagHighWord);
-    }
-
-    Type ToHighWord() const {
-      DCHECK_EQ(raw_bits_ & (kMaskWide | kMaskWord), kFlagWide | kFlagLowWord);
-      return Type(raw_bits_ ^ (kFlagLowWord | kFlagHighWord));
-    }
-
-    bool MergeHighWord(Type low_word_type) {
-      // NOTE: low_word_type may be also Narrow() or HighWord().
-      DCHECK(low_word_type.Wide() && low_word_type.LowWord());
-      return MergeBits(Type(low_word_type.raw_bits_ | kFlagHighWord),
-                       kMaskWideAndType | kFlagHighWord);
-    }
-
-    bool Copy(Type type) {
-      if (raw_bits_ != type.raw_bits_) {
-        raw_bits_ = type.raw_bits_;
-        return true;
-      }
-      return false;
-    }
-
-    // Merge non-array flags.
-    bool MergeNonArrayFlags(Type src_type) {
-      return MergeBits(src_type, kMaskNonArray);
-    }
-
-    // Merge array flags for conflict.
-    bool MergeArrayConflict(Type src_type);
-
-    // Merge all flags.
-    bool MergeStrong(Type src_type);
-
-    // Merge all flags.
-    bool MergeWeak(Type src_type);
-
-    // Get the same type but mark that it should not be treated as null.
-    Type AsNonNull() const {
-      return Type(raw_bits_ | kFlagNonNull);
-    }
-
-    // Get the same type but mark that it can be treated as null.
-    Type AsNull() const {
-      return Type(raw_bits_ & ~kFlagNonNull);
-    }
-
-   private:
-    enum FlagBits {
-      kBitNonNull = 0,
-      kBitWide,
-      kBitNarrow,
-      kBitFp,
-      kBitCore,
-      kBitRef,
-      kBitLowWord,
-      kBitHighWord,
-      kBitArrayWide,
-      kBitArrayNarrow,
-      kBitArrayFp,
-      kBitArrayCore,
-      kBitArrayRef,
-      kBitArrayDepthStart,
-    };
-    static constexpr size_t kArrayDepthBits = sizeof(uint32_t) * 8u - kBitArrayDepthStart;
-
-    static constexpr uint32_t kFlagNonNull = 1u << kBitNonNull;
-    static constexpr uint32_t kFlagWide = 1u << kBitWide;
-    static constexpr uint32_t kFlagNarrow = 1u << kBitNarrow;
-    static constexpr uint32_t kFlagFp = 1u << kBitFp;
-    static constexpr uint32_t kFlagCore = 1u << kBitCore;
-    static constexpr uint32_t kFlagRef = 1u << kBitRef;
-    static constexpr uint32_t kFlagLowWord = 1u << kBitLowWord;
-    static constexpr uint32_t kFlagHighWord = 1u << kBitHighWord;
-    static constexpr uint32_t kFlagArrayWide = 1u << kBitArrayWide;
-    static constexpr uint32_t kFlagArrayNarrow = 1u << kBitArrayNarrow;
-    static constexpr uint32_t kFlagArrayFp = 1u << kBitArrayFp;
-    static constexpr uint32_t kFlagArrayCore = 1u << kBitArrayCore;
-    static constexpr uint32_t kFlagArrayRef = 1u << kBitArrayRef;
-
-    static constexpr uint32_t kMaskWide = kFlagWide | kFlagNarrow;
-    static constexpr uint32_t kMaskType = kFlagFp | kFlagCore | kFlagRef;
-    static constexpr uint32_t kMaskWord = kFlagLowWord | kFlagHighWord;
-    static constexpr uint32_t kMaskArrayWide = kFlagArrayWide | kFlagArrayNarrow;
-    static constexpr uint32_t kMaskArrayType = kFlagArrayFp | kFlagArrayCore | kFlagArrayRef;
-    static constexpr uint32_t kMaskWideAndType = kMaskWide | kMaskType;
-    static constexpr uint32_t kMaskArrayWideAndType = kMaskArrayWide | kMaskArrayType;
-
-    static constexpr size_t kArrayTypeShift = kBitArrayWide - kBitWide;
-    static_assert(kArrayTypeShift == kBitArrayNarrow - kBitNarrow, "shift mismatch");
-    static_assert(kArrayTypeShift == kBitArrayFp - kBitFp, "shift mismatch");
-    static_assert(kArrayTypeShift == kBitArrayCore - kBitCore, "shift mismatch");
-    static_assert(kArrayTypeShift == kBitArrayRef - kBitRef, "shift mismatch");
-    static_assert((kMaskWide << kArrayTypeShift) == kMaskArrayWide, "shift mismatch");
-    static_assert((kMaskType << kArrayTypeShift) == kMaskArrayType, "shift mismatch");
-    static_assert((kMaskWideAndType << kArrayTypeShift) == kMaskArrayWideAndType, "shift mismatch");
-
-    static constexpr uint32_t kMaskArrayDepth = static_cast<uint32_t>(-1) << kBitArrayDepthStart;
-    static constexpr uint32_t kMaskNonArray = ~(kMaskArrayWideAndType | kMaskArrayDepth);
-
-    // The maximum representable array depth. If we exceed the maximum (which can happen
-    // only with an absurd nested array type in a dex file which would presumably cause
-    // OOM while being resolved), we can report false conflicts.
-    static constexpr uint32_t kMaxArrayDepth = static_cast<uint32_t>(-1) >> kBitArrayDepthStart;
-
-    explicit Type(uint32_t raw_bits) : raw_bits_(raw_bits) { }
-
-    bool IsBitSet(uint32_t flag) const {
-      return (raw_bits_ & flag) != 0u;
-    }
-
-    void SetBits(uint32_t flags) {
-      raw_bits_ |= flags;
-    }
-
-    bool MergeBits(Type src_type, uint32_t mask) {
-      uint32_t new_bits = raw_bits_ | (src_type.raw_bits_ & mask);
-      if (new_bits != raw_bits_) {
-        raw_bits_ = new_bits;
-        return true;
-      }
-      return false;
-    }
-
-    uint32_t raw_bits_;
-  };
-
-  struct MethodSignature {
-    Type return_type;
-    size_t num_params;
-    Type* param_types;
-  };
-
-  struct SplitSRegData {
-    int32_t current_mod_s_reg;
-    int32_t* starting_mod_s_reg;        // Indexed by BasicBlock::id.
-    int32_t* ending_mod_s_reg;          // Indexed by BasicBlock::id.
-
-    // NOTE: Before AddPseudoPhis(), def_phi_blocks_ marks the blocks
-    // with check-casts and the block with the original SSA reg.
-    // After AddPseudoPhis(), it marks blocks with pseudo-phis.
-    ArenaBitVector* def_phi_blocks_;    // Indexed by BasicBlock::id.
-  };
-
-  class CheckCastData : public DeletableArenaObject<kArenaAllocMisc> {
-   public:
-    CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
-
-    size_t NumSRegs() const {
-      return num_sregs_;
-    }
-
-    void AddCheckCast(MIR* check_cast, Type type);
-    void AddPseudoPhis();
-    void InitializeCheckCastSRegs(Type* sregs) const;
-    void MergeCheckCastConflicts(Type* sregs) const;
-    void MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const;
-
-    void Start(BasicBlock* bb);
-    bool ProcessPseudoPhis(BasicBlock* bb, Type* sregs);
-    void ProcessCheckCast(MIR* mir);
-
-    SplitSRegData* GetSplitSRegData(int32_t s_reg);
-
-   private:
-    BasicBlock* FindDefBlock(MIR* check_cast);
-    BasicBlock* FindTopologicallyEarliestPredecessor(BasicBlock* bb);
-    bool IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg);
-
-    MIRGraph* const mir_graph_;
-    ScopedArenaAllocator* const alloc_;
-    const size_t num_blocks_;
-    size_t num_sregs_;
-
-    // Map check-cast mir to special sreg and type.
-    struct CheckCastMapValue {
-      int32_t modified_s_reg;
-      Type type;
-    };
-    ScopedArenaSafeMap<MIR*, CheckCastMapValue> check_cast_map_;
-    ScopedArenaSafeMap<int32_t, SplitSRegData> split_sreg_data_;
-  };
-
-  static Type FieldType(const DexFile* dex_file, uint32_t field_idx);
-  static Type* PrepareIFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
-                                  ScopedArenaAllocator* alloc);
-  static Type* PrepareSFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
-                                  ScopedArenaAllocator* alloc);
-  static MethodSignature Signature(const DexFile* dex_file, uint32_t method_idx, bool is_static,
-                                   ScopedArenaAllocator* alloc);
-  static MethodSignature* PrepareSignatures(const DexFile* dex_file, MIRGraph* mir_graph,
-                                            ScopedArenaAllocator* alloc);
-  static CheckCastData* InitializeCheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
-
-  void InitializeSRegs();
-
-  int32_t ModifiedSReg(int32_t s_reg);
-  int32_t PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx);
-
-  bool UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type);
-
-  MIRGraph* const mir_graph_;
-  CompilationUnit* const cu_;
-
-  // The type inference propagates types also backwards but this must not happen across
-  // check-cast. So we need to effectively split an SSA reg into two at check-cast and
-  // keep track of the types separately.
-  std::unique_ptr<CheckCastData> check_cast_data_;
-
-  size_t num_sregs_;      // Number of SSA regs or modified SSA regs, see check-cast.
-  const Type* const ifields_;                 // Indexed by MIR::meta::ifield_lowering_info.
-  const Type* const sfields_;                 // Indexed by MIR::meta::sfield_lowering_info.
-  const MethodSignature* const signatures_;   // Indexed by MIR::meta::method_lowering_info.
-  const MethodSignature current_method_signature_;
-  Type* const sregs_;     // Indexed by SSA reg or modified SSA reg, see check-cast.
-  uint64_t* const bb_df_attrs_;               // Indexed by BasicBlock::id.
-
-  friend class TypeInferenceTest;
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_DEX_TYPE_INFERENCE_H_
diff --git a/compiler/dex/type_inference_test.cc b/compiler/dex/type_inference_test.cc
deleted file mode 100644
index ef53651..0000000
--- a/compiler/dex/type_inference_test.cc
+++ /dev/null
@@ -1,2045 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "compiler_ir.h"
-#include "dataflow_iterator-inl.h"
-#include "dex_flags.h"
-#include "dex/mir_field_info.h"
-#include "dex/mir_graph.h"
-#include "driver/dex_compilation_unit.h"
-#include "gtest/gtest.h"
-#include "type_inference.h"
-#include "utils/test_dex_file_builder.h"
-
-namespace art {
-
-class TypeInferenceTest : public testing::Test {
- protected:
-  struct TypeDef {
-    const char* descriptor;
-  };
-
-  struct FieldDef {
-    const char* class_descriptor;
-    const char* type;
-    const char* name;
-  };
-
-  struct MethodDef {
-    const char* class_descriptor;
-    const char* signature;
-    const char* name;
-    InvokeType type;
-  };
-
-  struct BBDef {
-    static constexpr size_t kMaxSuccessors = 4;
-    static constexpr size_t kMaxPredecessors = 4;
-
-    BBType type;
-    size_t num_successors;
-    BasicBlockId successors[kMaxPredecessors];
-    size_t num_predecessors;
-    BasicBlockId predecessors[kMaxPredecessors];
-  };
-
-  struct MIRDef {
-    static constexpr size_t kMaxSsaDefs = 2;
-    static constexpr size_t kMaxSsaUses = 4;
-
-    BasicBlockId bbid;
-    Instruction::Code opcode;
-    int64_t value;
-    uint32_t metadata;
-    size_t num_uses;
-    int32_t uses[kMaxSsaUses];
-    size_t num_defs;
-    int32_t defs[kMaxSsaDefs];
-  };
-
-#define DEF_SUCC0() \
-    0u, { }
-#define DEF_SUCC1(s1) \
-    1u, { s1 }
-#define DEF_SUCC2(s1, s2) \
-    2u, { s1, s2 }
-#define DEF_SUCC3(s1, s2, s3) \
-    3u, { s1, s2, s3 }
-#define DEF_SUCC4(s1, s2, s3, s4) \
-    4u, { s1, s2, s3, s4 }
-#define DEF_PRED0() \
-    0u, { }
-#define DEF_PRED1(p1) \
-    1u, { p1 }
-#define DEF_PRED2(p1, p2) \
-    2u, { p1, p2 }
-#define DEF_PRED3(p1, p2, p3) \
-    3u, { p1, p2, p3 }
-#define DEF_PRED4(p1, p2, p3, p4) \
-    4u, { p1, p2, p3, p4 }
-#define DEF_BB(type, succ, pred) \
-    { type, succ, pred }
-
-#define DEF_CONST(bb, opcode, reg, value) \
-    { bb, opcode, value, 0u, 0, { }, 1, { reg } }
-#define DEF_CONST_WIDE(bb, opcode, reg, value) \
-    { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_CONST_STRING(bb, opcode, reg, index) \
-    { bb, opcode, index, 0u, 0, { }, 1, { reg } }
-#define DEF_IGET(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
-    { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
-    { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
-#define DEF_AGET(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
-#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
-#define DEF_APUT(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
-#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
-    { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
-#define DEF_INVOKE0(bb, opcode, method_idx) \
-    { bb, opcode, 0u, method_idx, 0, { }, 0, { } }
-#define DEF_INVOKE1(bb, opcode, reg, method_idx) \
-    { bb, opcode, 0u, method_idx, 1, { reg }, 0, { } }
-#define DEF_INVOKE2(bb, opcode, reg1, reg2, method_idx) \
-    { bb, opcode, 0u, method_idx, 2, { reg1, reg2 }, 0, { } }
-#define DEF_IFZ(bb, opcode, reg) \
-    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
-#define DEF_MOVE(bb, opcode, reg, src) \
-    { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
-#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
-    { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
-#define DEF_PHI2(bb, reg, src1, src2) \
-    { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
-#define DEF_BINOP(bb, opcode, result, src1, src2) \
-    { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
-#define DEF_UNOP(bb, opcode, result, src) DEF_MOVE(bb, opcode, result, src)
-#define DEF_NULOP(bb, opcode, result) DEF_CONST(bb, opcode, result, 0)
-#define DEF_NULOP_WIDE(bb, opcode, result) DEF_CONST_WIDE(bb, opcode, result, 0)
-#define DEF_CHECK_CAST(bb, opcode, reg, type) \
-    { bb, opcode, 0, type, 1, { reg }, 0, { } }
-#define DEF_NEW_ARRAY(bb, opcode, reg, length, type) \
-    { bb, opcode, 0, type, 1, { length }, 1, { reg } }
-
-  void AddTypes(const TypeDef* defs, size_t count) {
-    for (size_t i = 0; i != count; ++i) {
-      const TypeDef* def = &defs[i];
-      dex_file_builder_.AddType(def->descriptor);
-    }
-  }
-
-  template <size_t count>
-  void PrepareTypes(const TypeDef (&defs)[count]) {
-    type_defs_ = defs;
-    type_count_ = count;
-    AddTypes(defs, count);
-  }
-
-  void AddFields(const FieldDef* defs, size_t count) {
-    for (size_t i = 0; i != count; ++i) {
-      const FieldDef* def = &defs[i];
-      dex_file_builder_.AddField(def->class_descriptor, def->type, def->name);
-    }
-  }
-
-  template <size_t count>
-  void PrepareIFields(const FieldDef (&defs)[count]) {
-    ifield_defs_ = defs;
-    ifield_count_ = count;
-    AddFields(defs, count);
-  }
-
-  template <size_t count>
-  void PrepareSFields(const FieldDef (&defs)[count]) {
-    sfield_defs_ = defs;
-    sfield_count_ = count;
-    AddFields(defs, count);
-  }
-
-  void AddMethods(const MethodDef* defs, size_t count) {
-    for (size_t i = 0; i != count; ++i) {
-      const MethodDef* def = &defs[i];
-      dex_file_builder_.AddMethod(def->class_descriptor, def->signature, def->name);
-    }
-  }
-
-  template <size_t count>
-  void PrepareMethods(const MethodDef (&defs)[count]) {
-    method_defs_ = defs;
-    method_count_ = count;
-    AddMethods(defs, count);
-  }
-
-  DexMemAccessType AccessTypeForDescriptor(const char* descriptor) {
-    switch (descriptor[0]) {
-      case 'I':
-      case 'F':
-        return kDexMemAccessWord;
-      case 'J':
-      case 'D':
-        return kDexMemAccessWide;
-      case '[':
-      case 'L':
-        return kDexMemAccessObject;
-      case 'Z':
-        return kDexMemAccessBoolean;
-      case 'B':
-        return kDexMemAccessByte;
-      case 'C':
-        return kDexMemAccessChar;
-      case 'S':
-        return kDexMemAccessShort;
-      default:
-        LOG(FATAL) << "Bad descriptor: " << descriptor;
-        UNREACHABLE();
-    }
-  }
-
-  size_t CountIns(const std::string& test_method_signature, bool is_static) {
-    const char* sig = test_method_signature.c_str();
-    CHECK_EQ(sig[0], '(');
-    ++sig;
-    size_t result = is_static ? 0u : 1u;
-    while (*sig != ')') {
-      result += (AccessTypeForDescriptor(sig) == kDexMemAccessWide) ? 2u : 1u;
-      while (*sig == '[') {
-        ++sig;
-      }
-      if (*sig == 'L') {
-        do {
-          ++sig;
-          CHECK(*sig != '\0' && *sig != ')');
-        } while (*sig != ';');
-      }
-      ++sig;
-    }
-    return result;
-  }
-
-  void BuildDexFile(const std::string& test_method_signature, bool is_static) {
-    dex_file_builder_.AddMethod(kClassName, test_method_signature, kMethodName);
-    dex_file_ = dex_file_builder_.Build(kDexLocation);
-    cu_.dex_file = dex_file_.get();
-    cu_.method_idx = dex_file_builder_.GetMethodIdx(kClassName, test_method_signature, kMethodName);
-    cu_.access_flags = is_static ? kAccStatic : 0u;
-    cu_.mir_graph->m_units_.push_back(new (cu_.mir_graph->arena_) DexCompilationUnit(
-        &cu_, cu_.class_loader, cu_.class_linker, *cu_.dex_file, nullptr /* code_item not used */,
-        0u /* class_def_idx not used */, 0u /* method_index not used */,
-        cu_.access_flags, nullptr /* verified_method not used */,
-        ScopedNullHandle<mirror::DexCache>()));
-    cu_.mir_graph->current_method_ = 0u;
-    code_item_ = static_cast<DexFile::CodeItem*>(
-        cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
-
-    code_item_->ins_size_ = CountIns(test_method_signature, is_static);
-    code_item_->registers_size_ = kLocalVRs + code_item_->ins_size_;
-    cu_.mir_graph->current_code_item_ = code_item_;
-    cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
-
-    cu_.mir_graph->ifield_lowering_infos_.clear();
-    cu_.mir_graph->ifield_lowering_infos_.reserve(ifield_count_);
-    for (size_t i = 0u; i != ifield_count_; ++i) {
-      const FieldDef* def = &ifield_defs_[i];
-      uint32_t field_idx =
-          dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
-      MirIFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type), false);
-      field_info.declaring_dex_file_ = cu_.dex_file;
-      field_info.declaring_field_idx_ = field_idx;
-      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
-    }
-
-    cu_.mir_graph->sfield_lowering_infos_.clear();
-    cu_.mir_graph->sfield_lowering_infos_.reserve(sfield_count_);
-    for (size_t i = 0u; i != sfield_count_; ++i) {
-      const FieldDef* def = &sfield_defs_[i];
-      uint32_t field_idx =
-          dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
-      MirSFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type));
-      field_info.declaring_dex_file_ = cu_.dex_file;
-      field_info.declaring_field_idx_ = field_idx;
-      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
-    }
-
-    cu_.mir_graph->method_lowering_infos_.clear();
-    cu_.mir_graph->method_lowering_infos_.reserve(ifield_count_);
-    for (size_t i = 0u; i != method_count_; ++i) {
-      const MethodDef* def = &method_defs_[i];
-      uint32_t method_idx =
-          dex_file_builder_.GetMethodIdx(def->class_descriptor, def->signature, def->name);
-      MirMethodLoweringInfo method_info(method_idx, def->type, false);
-      method_info.declaring_dex_file_ = cu_.dex_file;
-      method_info.declaring_method_idx_ = method_idx;
-      cu_.mir_graph->method_lowering_infos_.push_back(method_info);
-    }
-  }
-
-  void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
-    cu_.mir_graph->block_id_map_.clear();
-    cu_.mir_graph->block_list_.clear();
-    ASSERT_LT(3u, count);  // null, entry, exit and at least one bytecode block.
-    ASSERT_EQ(kNullBlock, defs[0].type);
-    ASSERT_EQ(kEntryBlock, defs[1].type);
-    ASSERT_EQ(kExitBlock, defs[2].type);
-    for (size_t i = 0u; i != count; ++i) {
-      const BBDef* def = &defs[i];
-      BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
-      if (def->num_successors <= 2) {
-        bb->successor_block_list_type = kNotUsed;
-        bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
-        bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
-      } else {
-        bb->successor_block_list_type = kPackedSwitch;
-        bb->fall_through = 0u;
-        bb->taken = 0u;
-        bb->successor_blocks.reserve(def->num_successors);
-        for (size_t j = 0u; j != def->num_successors; ++j) {
-          SuccessorBlockInfo* successor_block_info =
-              static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
-                                                               kArenaAllocSuccessors));
-          successor_block_info->block = j;
-          successor_block_info->key = 0u;  // Not used by class init check elimination.
-          bb->successor_blocks.push_back(successor_block_info);
-        }
-      }
-      bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
-      if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
-        bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
-            cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
-        bb->data_flow_info->live_in_v = live_in_v_;
-      }
-    }
-    ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
-    cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
-    ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
-    cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
-    ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
-  }
-
-  template <size_t count>
-  void PrepareBasicBlocks(const BBDef (&defs)[count]) {
-    DoPrepareBasicBlocks(defs, count);
-  }
-
-  void PrepareSingleBlock() {
-    static const BBDef bbs[] = {
-        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
-    };
-    PrepareBasicBlocks(bbs);
-  }
-
-  void PrepareDiamond() {
-    static const BBDef bbs[] = {
-        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),
-    };
-    PrepareBasicBlocks(bbs);
-  }
-
-  void PrepareLoop() {
-    static const BBDef bbs[] = {
-        DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
-        DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
-        DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
-        DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)),  // "taken" loops to self.
-        DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
-    };
-    PrepareBasicBlocks(bbs);
-  }
-
-  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
-    mir_count_ = count;
-    mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
-    ssa_reps_.resize(count);
-    for (size_t i = 0u; i != count; ++i) {
-      const MIRDef* def = &defs[i];
-      MIR* mir = &mirs_[i];
-      ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
-      BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
-      bb->AppendMIR(mir);
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
-      mir->dalvikInsn.vB_wide = def->value;
-      if (IsInstructionIGetOrIPut(def->opcode)) {
-        ASSERT_LT(def->metadata, cu_.mir_graph->ifield_lowering_infos_.size());
-        mir->meta.ifield_lowering_info = def->metadata;
-        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->metadata].MemAccessType(),
-                  IGetOrIPutMemAccessType(def->opcode));
-        cu_.mir_graph->merged_df_flags_ |= DF_IFIELD;
-      } else if (IsInstructionSGetOrSPut(def->opcode)) {
-        ASSERT_LT(def->metadata, cu_.mir_graph->sfield_lowering_infos_.size());
-        mir->meta.sfield_lowering_info = def->metadata;
-        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->metadata].MemAccessType(),
-                  SGetOrSPutMemAccessType(def->opcode));
-        cu_.mir_graph->merged_df_flags_ |= DF_SFIELD;
-      } else if (IsInstructionInvoke(def->opcode)) {
-        ASSERT_LT(def->metadata, cu_.mir_graph->method_lowering_infos_.size());
-        mir->meta.method_lowering_info = def->metadata;
-        mir->dalvikInsn.vA = def->num_uses;
-        cu_.mir_graph->merged_df_flags_ |= DF_FORMAT_35C;
-      } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
-        mir->meta.phi_incoming =
-            allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
-        ASSERT_EQ(def->num_uses, bb->predecessors.size());
-        std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
-      } else if (def->opcode == Instruction::CHECK_CAST) {
-        ASSERT_LT(def->metadata, type_count_);
-        mir->dalvikInsn.vB = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
-        cu_.mir_graph->merged_df_flags_ |= DF_CHK_CAST;
-      } else if (def->opcode == Instruction::NEW_ARRAY) {
-        ASSERT_LT(def->metadata, type_count_);
-        mir->dalvikInsn.vC = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
-      }
-      mir->ssa_rep = &ssa_reps_[i];
-      mir->ssa_rep->num_uses = def->num_uses;
-      mir->ssa_rep->uses = const_cast<int32_t*>(def->uses);  // Not modified by LVN.
-      mir->ssa_rep->num_defs = def->num_defs;
-      mir->ssa_rep->defs = const_cast<int32_t*>(def->defs);  // Not modified by LVN.
-      mir->dalvikInsn.opcode = def->opcode;
-      mir->offset = i;  // LVN uses offset only for debug output
-      mir->optimization_flags = 0u;
-    }
-    code_item_->insns_size_in_code_units_ = 2u * count;
-  }
-
-  template <size_t count>
-  void PrepareMIRs(const MIRDef (&defs)[count]) {
-    DoPrepareMIRs(defs, count);
-  }
-
-  // BasicBlockDataFlow::vreg_to_ssa_map_exit is used only for check-casts.
-  void AllocEndingVRegToSRegMaps() {
-    AllNodesIterator iterator(cu_.mir_graph.get());
-    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
-      if (bb->data_flow_info != nullptr) {
-        if (bb->data_flow_info->vreg_to_ssa_map_exit == nullptr) {
-          size_t num_vregs = code_item_->registers_size_;
-          bb->data_flow_info->vreg_to_ssa_map_exit = static_cast<int32_t*>(
-              cu_.arena.AllocArray<int32_t>(num_vregs, kArenaAllocDFInfo));
-          std::fill_n(bb->data_flow_info->vreg_to_ssa_map_exit, num_vregs, INVALID_SREG);
-        }
-      }
-    }
-  }
-
-  template <size_t count>
-  void MapVRegToSReg(int vreg, int32_t sreg, const BasicBlockId (&bb_ids)[count]) {
-    AllocEndingVRegToSRegMaps();
-    for (BasicBlockId bb_id : bb_ids) {
-      BasicBlock* bb = cu_.mir_graph->GetBasicBlock(bb_id);
-      CHECK(bb != nullptr);
-      CHECK(bb->data_flow_info != nullptr);
-      CHECK(bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
-      bb->data_flow_info->vreg_to_ssa_map_exit[vreg] = sreg;
-    }
-  }
-
-  void PerformTypeInference() {
-    cu_.mir_graph->SSATransformationStart();
-    cu_.mir_graph->ComputeDFSOrders();
-    cu_.mir_graph->ComputeDominators();
-    cu_.mir_graph->ComputeTopologicalSortOrder();
-    cu_.mir_graph->SSATransformationEnd();
-    ASSERT_TRUE(type_inference_ == nullptr);
-    type_inference_.reset(new (allocator_.get()) TypeInference(cu_.mir_graph.get(),
-                                                               allocator_.get()));
-    RepeatingPreOrderDfsIterator iter(cu_.mir_graph.get());
-    bool changed = false;
-    for (BasicBlock* bb = iter.Next(changed); bb != nullptr; bb = iter.Next(changed)) {
-      changed = type_inference_->Apply(bb);
-    }
-    type_inference_->Finish();
-  }
-
-  TypeInferenceTest()
-      : pool_(),
-        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
-        mir_count_(0u),
-        mirs_(nullptr),
-        code_item_(nullptr),
-        ssa_reps_(),
-        allocator_(),
-        live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false)),
-        type_defs_(nullptr),
-        type_count_(0u),
-        ifield_defs_(nullptr),
-        ifield_count_(0u),
-        sfield_defs_(nullptr),
-        sfield_count_(0u),
-        method_defs_(nullptr),
-        method_count_(0u),
-        dex_file_builder_(),
-        dex_file_(nullptr) {
-    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
-    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
-    // Bind all possible sregs to live vregs for test purposes.
-    live_in_v_->SetInitialBits(kMaxSsaRegs);
-    cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
-        kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
-    cu_.mir_graph->method_sreg_ = kMaxSsaRegs - 1u;
-    cu_.mir_graph->reg_location_[cu_.mir_graph->GetMethodSReg()].location = kLocCompilerTemp;
-    // Bind all possible sregs to live vregs for test purposes.
-    live_in_v_->SetInitialBits(kMaxSsaRegs);
-    cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
-    cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
-    for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
-      cu_.mir_graph->ssa_base_vregs_.push_back(i);
-      cu_.mir_graph->ssa_subscripts_.push_back(0);
-    }
-  }
-
-  enum ExpectFlags : uint32_t {
-    kExpectWide         = 0x0001u,
-    kExpectNarrow       = 0x0002u,
-    kExpectFp           = 0x0004u,
-    kExpectCore         = 0x0008u,
-    kExpectRef          = 0x0010u,
-    kExpectArrayWide    = 0x0020u,
-    kExpectArrayNarrow  = 0x0040u,
-    kExpectArrayFp      = 0x0080u,
-    kExpectArrayCore    = 0x0100u,
-    kExpectArrayRef     = 0x0200u,
-    kExpectNull         = 0x0400u,
-    kExpectHigh         = 0x0800u,  // Reserved for ExpectSRegType().
-  };
-
-  struct SRegExpectation {
-    uint32_t array_depth;
-    uint32_t flags;
-  };
-
-  void ExpectSRegType(int s_reg, const SRegExpectation& expectation, bool check_loc = true) {
-    uint32_t flags = expectation.flags;
-    uint32_t array_depth = expectation.array_depth;
-    TypeInference::Type type = type_inference_->sregs_[s_reg];
-
-    if (check_loc) {
-      RegLocation loc = cu_.mir_graph->reg_location_[s_reg];
-      EXPECT_EQ((flags & kExpectWide) != 0u, loc.wide) << s_reg;
-      EXPECT_EQ((flags & kExpectFp) != 0u, loc.fp) << s_reg;
-      EXPECT_EQ((flags & kExpectCore) != 0u, loc.core) << s_reg;
-      EXPECT_EQ((flags & kExpectRef) != 0u, loc.ref) << s_reg;
-      EXPECT_EQ((flags & kExpectHigh) != 0u, loc.high_word) << s_reg;
-    }
-
-    EXPECT_EQ((flags & kExpectWide) != 0u, type.Wide()) << s_reg;
-    EXPECT_EQ((flags & kExpectNarrow) != 0u, type.Narrow()) << s_reg;
-    EXPECT_EQ((flags & kExpectFp) != 0u, type.Fp()) << s_reg;
-    EXPECT_EQ((flags & kExpectCore) != 0u, type.Core()) << s_reg;
-    EXPECT_EQ((flags & kExpectRef) != 0u, type.Ref()) << s_reg;
-    EXPECT_EQ((flags & kExpectHigh) == 0u, type.LowWord()) << s_reg;
-    EXPECT_EQ((flags & kExpectHigh) != 0u, type.HighWord()) << s_reg;
-
-    if ((flags & kExpectRef) != 0u) {
-      EXPECT_EQ((flags & kExpectNull) != 0u, !type.NonNull()) << s_reg;
-    } else {
-      // Null should be checked only for references.
-      ASSERT_EQ((flags & kExpectNull), 0u);
-    }
-
-    ASSERT_EQ(array_depth, type.ArrayDepth()) << s_reg;
-    if (array_depth != 0u) {
-      ASSERT_NE((flags & kExpectRef), 0u);
-      TypeInference::Type nested_type = type.NestedType();
-      EXPECT_EQ((flags & kExpectArrayWide) != 0u, nested_type.Wide()) << s_reg;
-      EXPECT_EQ((flags & kExpectArrayNarrow) != 0u, nested_type.Narrow()) << s_reg;
-      EXPECT_EQ((flags & kExpectArrayFp) != 0u, nested_type.Fp()) << s_reg;
-      EXPECT_EQ((flags & kExpectArrayCore) != 0u, nested_type.Core()) << s_reg;
-      EXPECT_EQ((flags & kExpectArrayRef) != 0u, nested_type.Ref()) << s_reg;
-    }
-    if (!type.Narrow() && type.LowWord() &&
-        (expectation.flags & (kExpectWide | kExpectNarrow | kExpectHigh)) == kExpectWide) {
-      SRegExpectation high_expectation = { array_depth, flags | kExpectHigh };
-      ExpectSRegType(s_reg + 1, high_expectation);
-    }
-  }
-
-  void ExpectCore(int s_reg, bool core) {
-    EXPECT_EQ(core, type_inference_->sregs_[s_reg].Core());
-  }
-
-  void ExpectRef(int s_reg, bool ref) {
-    EXPECT_EQ(ref, type_inference_->sregs_[s_reg].Ref());
-  }
-
-  void ExpectArrayDepth(int s_reg, uint32_t array_depth) {
-    EXPECT_EQ(array_depth, type_inference_->sregs_[s_reg].ArrayDepth());
-  }
-
-  static constexpr size_t kMaxSsaRegs = 16384u;
-  static constexpr uint16_t kLocalVRs = 1000u;
-
-  static constexpr const char* kDexLocation = "TypeInferenceDexFile;";
-  static constexpr const char* kClassName = "LTypeInferenceTest;";
-  static constexpr const char* kMethodName = "test";
-
-  ArenaPool pool_;
-  CompilationUnit cu_;
-  size_t mir_count_;
-  MIR* mirs_;
-  DexFile::CodeItem* code_item_;
-  std::vector<SSARepresentation> ssa_reps_;
-  std::unique_ptr<ScopedArenaAllocator> allocator_;
-  std::unique_ptr<TypeInference> type_inference_;
-  ArenaBitVector* live_in_v_;
-
-  const TypeDef* type_defs_;
-  size_t type_count_;
-  const FieldDef* ifield_defs_;
-  size_t ifield_count_;
-  const FieldDef* sfield_defs_;
-  size_t sfield_count_;
-  const MethodDef* method_defs_;
-  size_t method_count_;
-
-  TestDexFileBuilder dex_file_builder_;
-  std::unique_ptr<const DexFile> dex_file_;
-};
-
-TEST_F(TypeInferenceTest, IGet) {
-  static const FieldDef ifields[] = {
-      { kClassName, "B", "byteField" },
-      { kClassName, "C", "charField" },
-      { kClassName, "D", "doubleField" },
-      { kClassName, "F", "floatField" },
-      { kClassName, "I", "intField" },
-      { kClassName, "J", "longField" },
-      { kClassName, "S", "shortField" },
-      { kClassName, "Z", "booleanField" },
-      { kClassName, "Ljava/lang/Object;", "objectField" },
-      { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
-  };
-  constexpr uint32_t thiz = kLocalVRs;
-  static const MIRDef mirs[] = {
-      DEF_IGET(3u, Instruction::IGET_BYTE, 0u, thiz, 0u),
-      DEF_IGET(3u, Instruction::IGET_CHAR, 1u, thiz, 1u),
-      DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 2u, thiz, 2u),
-      DEF_IGET(3u, Instruction::IGET, 4u, thiz, 3u),
-      DEF_IGET(3u, Instruction::IGET, 5u, thiz, 4u),
-      DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 6u, thiz, 5u),
-      DEF_IGET(3u, Instruction::IGET_SHORT, 8u, thiz, 6u),
-      DEF_IGET(3u, Instruction::IGET_BOOLEAN, 9u, thiz, 7u),
-      DEF_IGET(3u, Instruction::IGET_OBJECT, 10u, thiz, 8u),
-      DEF_IGET(3u, Instruction::IGET_OBJECT, 11u, thiz, 9u),
-  };
-
-  PrepareIFields(ifields);
-  BuildDexFile("()V", false);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectFp | kExpectWide },
-      { 0u, kExpectFp | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectWide },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-  };
-  static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(expectations); ++i) {
-    EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
-    ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
-    ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, SGet) {
-  static const FieldDef sfields[] = {
-      { kClassName, "B", "staticByteField" },
-      { kClassName, "C", "staticCharField" },
-      { kClassName, "D", "staticDoubleField" },
-      { kClassName, "F", "staticFloatField" },
-      { kClassName, "I", "staticIntField" },
-      { kClassName, "J", "staticLongField" },
-      { kClassName, "S", "staticShortField" },
-      { kClassName, "Z", "staticBooleanField" },
-      { kClassName, "Ljava/lang/Object;", "staticObjectField" },
-      { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(3u, Instruction::SGET_BYTE, 0u, 0u),
-      DEF_SGET(3u, Instruction::SGET_CHAR, 1u, 1u),
-      DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 2u, 2u),
-      DEF_SGET(3u, Instruction::SGET, 4u, 3u),
-      DEF_SGET(3u, Instruction::SGET, 5u, 4u),
-      DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 6u, 5u),
-      DEF_SGET(3u, Instruction::SGET_SHORT, 8u, 6u),
-      DEF_SGET(3u, Instruction::SGET_BOOLEAN, 9u, 7u),
-      DEF_SGET(3u, Instruction::SGET_OBJECT, 10u, 8u),
-      DEF_SGET(3u, Instruction::SGET_OBJECT, 11u, 9u),
-  };
-
-  PrepareSFields(sfields);
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectFp | kExpectWide },
-      { 0u, kExpectFp | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectWide },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-  };
-  static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(expectations); ++i) {
-    EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
-    ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
-    ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, IPut) {
-  static const FieldDef ifields[] = {
-      { kClassName, "B", "byteField" },
-      { kClassName, "C", "charField" },
-      { kClassName, "D", "doubleField" },
-      { kClassName, "F", "floatField" },
-      { kClassName, "I", "intField" },
-      { kClassName, "J", "longField" },
-      { kClassName, "S", "shortField" },
-      { kClassName, "Z", "booleanField" },
-      { kClassName, "Ljava/lang/Object;", "objectField" },
-      { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
-  };
-  constexpr uint32_t thiz = kLocalVRs;
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_IPUT(3u, Instruction::IPUT_BYTE, 0u, thiz, 0u),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),
-      DEF_IPUT(3u, Instruction::IPUT_CHAR, 1u, thiz, 1u),
-      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
-      DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 2u, thiz, 2u),
-      DEF_CONST(3u, Instruction::CONST, 4u, 0),
-      DEF_IPUT(3u, Instruction::IPUT, 4u, thiz, 3u),
-      DEF_CONST(3u, Instruction::CONST, 5u, 0),
-      DEF_IPUT(3u, Instruction::IPUT, 5u, thiz, 4u),
-      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
-      DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 6u, thiz, 5u),
-      DEF_CONST(3u, Instruction::CONST, 8u, 0),
-      DEF_IPUT(3u, Instruction::IPUT_SHORT, 8u, thiz, 6u),
-      DEF_CONST(3u, Instruction::CONST, 9u, 0),
-      DEF_IPUT(3u, Instruction::IPUT_BOOLEAN, 9u, thiz, 7u),
-      DEF_CONST(3u, Instruction::CONST, 10u, 0),
-      DEF_IPUT(3u, Instruction::IPUT_OBJECT, 10u, thiz, 8u),
-      DEF_CONST(3u, Instruction::CONST, 11u, 0),
-      DEF_IPUT(3u, Instruction::IPUT_OBJECT, 11u, thiz, 9u),
-  };
-
-  PrepareIFields(ifields);
-  BuildDexFile("()V", false);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      // One expectation for every 2 MIRs.
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectFp | kExpectWide },
-      { 0u, kExpectFp | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectWide },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-  };
-  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(expectations); ++i) {
-    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
-    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
-    ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
-    ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, SPut) {
-  static const FieldDef sfields[] = {
-      { kClassName, "B", "staticByteField" },
-      { kClassName, "C", "staticCharField" },
-      { kClassName, "D", "staticDoubleField" },
-      { kClassName, "F", "staticFloatField" },
-      { kClassName, "I", "staticIntField" },
-      { kClassName, "J", "staticLongField" },
-      { kClassName, "S", "staticShortField" },
-      { kClassName, "Z", "staticBooleanField" },
-      { kClassName, "Ljava/lang/Object;", "staticObjectField" },
-      { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_SPUT(3u, Instruction::SPUT_BYTE, 0u, 0u),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),
-      DEF_SPUT(3u, Instruction::SPUT_CHAR, 1u, 1u),
-      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
-      DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 2u, 2u),
-      DEF_CONST(3u, Instruction::CONST, 4u, 0),
-      DEF_SPUT(3u, Instruction::SPUT, 4u, 3u),
-      DEF_CONST(3u, Instruction::CONST, 5u, 0),
-      DEF_SPUT(3u, Instruction::SPUT, 5u, 4u),
-      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
-      DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 6u, 5u),
-      DEF_CONST(3u, Instruction::CONST, 8u, 0),
-      DEF_SPUT(3u, Instruction::SPUT_SHORT, 8u, 6u),
-      DEF_CONST(3u, Instruction::CONST, 9u, 0),
-      DEF_SPUT(3u, Instruction::SPUT_BOOLEAN, 9u, 7u),
-      DEF_CONST(3u, Instruction::CONST, 10u, 0),
-      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 10u, 8u),
-      DEF_CONST(3u, Instruction::CONST, 11u, 0),
-      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 11u, 9u),
-  };
-
-  PrepareSFields(sfields);
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      // One expectation for every 2 MIRs.
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectFp | kExpectWide },
-      { 0u, kExpectFp | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectWide },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-  };
-  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(expectations); ++i) {
-    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
-    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
-    ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
-    ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, MethodReturnType) {
-  static const MethodDef methods[] = {
-      { kClassName, "()B", "byteFoo", kStatic },
-      { kClassName, "()C", "charFoo", kStatic },
-      { kClassName, "()D", "doubleFoo", kStatic },
-      { kClassName, "()F", "floatFoo", kStatic },
-      { kClassName, "()I", "intFoo", kStatic },
-      { kClassName, "()J", "longFoo", kStatic },
-      { kClassName, "()S", "shortFoo", kStatic },
-      { kClassName, "()Z", "booleanFoo", kStatic },
-      { kClassName, "()Ljava/lang/Object;", "objectFoo", kStatic },
-      { kClassName, "()[Ljava/lang/Object;", "objectArrayFoo", kStatic },
-  };
-  static const MIRDef mirs[] = {
-      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 0u),
-      DEF_NULOP(3u, Instruction::MOVE_RESULT, 0u),
-      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 1u),
-      DEF_NULOP(3u, Instruction::MOVE_RESULT, 1u),
-      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 2u),
-      DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 2u),
-      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 3u),
-      DEF_NULOP(3u, Instruction::MOVE_RESULT, 4u),
-      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 4u),
-      DEF_NULOP(3u, Instruction::MOVE_RESULT, 5u),
-      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 5u),
-      DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 6u),
-      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 6u),
-      DEF_NULOP(3u, Instruction::MOVE_RESULT, 8u),
-      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 7u),
-      DEF_NULOP(3u, Instruction::MOVE_RESULT, 9u),
-      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 8u),
-      DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 10u),
-      DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 9u),
-      DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 11u),
-  };
-
-  PrepareMethods(methods);
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      // One expectation for every 2 MIRs.
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectFp | kExpectWide },
-      { 0u, kExpectFp | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectWide },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-  };
-  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(expectations); ++i) {
-    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
-    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
-    ASSERT_LE(1u, mirs_[2 * i + 1].ssa_rep->num_defs);
-    ExpectSRegType(mirs_[2 * i + 1].ssa_rep->defs[0], expectations[i]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, MethodArgType) {
-  static const MethodDef methods[] = {
-      { kClassName, "(B)V", "fooByte", kStatic },
-      { kClassName, "(C)V", "fooChar", kStatic },
-      { kClassName, "(D)V", "fooDouble", kStatic },
-      { kClassName, "(F)V", "fooFloat", kStatic },
-      { kClassName, "(I)V", "fooInt", kStatic },
-      { kClassName, "(J)V", "fooLong", kStatic },
-      { kClassName, "(S)V", "fooShort", kStatic },
-      { kClassName, "(Z)V", "fooBoolean", kStatic },
-      { kClassName, "(Ljava/lang/Object;)V", "fooObject", kStatic },
-      { kClassName, "([Ljava/lang/Object;)V", "fooObjectArray", kStatic },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 0u, 0u),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),
-      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 1u, 1u),
-      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
-      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 2u, 3u, 2u),
-      DEF_CONST(3u, Instruction::CONST, 4u, 0),
-      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 4u, 3u),
-      DEF_CONST(3u, Instruction::CONST, 5u, 0),
-      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 5u, 4u),
-      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
-      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 6u, 7u, 5u),
-      DEF_CONST(3u, Instruction::CONST, 8u, 0),
-      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 8u, 6u),
-      DEF_CONST(3u, Instruction::CONST, 9u, 0),
-      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 9u, 7u),
-      DEF_CONST(3u, Instruction::CONST, 10u, 0),
-      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 10u, 8u),
-      DEF_CONST(3u, Instruction::CONST, 11u, 0),
-      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 11u, 9u),
-  };
-
-  PrepareMethods(methods);
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      // One expectation for every 2 MIRs.
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectFp | kExpectWide },
-      { 0u, kExpectFp | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectWide },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-  };
-  static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
-  for (size_t i = 0; i != arraysize(expectations); ++i) {
-    EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
-    EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
-    ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
-    ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, APut1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),  // Object[] array
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // value; can't even determine whether core or fp.
-      DEF_CONST(3u, Instruction::CONST, 2u, 0),  // index
-      DEF_APUT(3u, Instruction::APUT, 1u, 0u, 2u),
-  };
-
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayNarrow },
-      { 0u, kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, APut2) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),  // Object[] array
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // Object[] value
-      DEF_CONST(3u, Instruction::CONST, 2u, 0),  // index
-      DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
-  };
-
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, APut3) {
-  static const MIRDef mirs[] = {
-      // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),  // Object[] array1
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // Object[] array2
-      DEF_CONST(3u, Instruction::CONST, 2u, 0),  // index
-      DEF_APUT(3u, Instruction::APUT_OBJECT, 0u, 1u, 2u),
-      DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
-  };
-
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, APut4) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // Object[] array
-      DEF_CONST(3u, Instruction::CONST, 3u, 0),  // value; can't even determine whether core or fp.
-      DEF_APUT(3u, Instruction::APUT, 3u, 2u, 1u),
-  };
-
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayNarrow },
-      { 0u, kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, APut5) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // Object[] array
-      DEF_CONST(3u, Instruction::CONST, 3u, 0),  // Object[] value
-      DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
-  };
-
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, APut6) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
-      // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // Object[] array1
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u),  // Object[] array2
-      DEF_APUT(3u, Instruction::APUT_OBJECT, 2u, 3u, 1u),
-      DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
-  };
-
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, TwoNullObjectArraysInLoop) {
-  static const MIRDef mirs[] = {
-      // void foo() {
-      //   Object[] array1 = ((Object[])null)[0];
-      //   Object[] array2 = ((Object[])null)[0];
-      //   for (int i = 0; i != 3; ++i) {
-      //     Object[] a1 = null;  // One of these could be Object[][] but not both.
-      //     Object[] a2 = null;  // But they will be deduced as Object[].
-      //     try { a1[0] = a2; } catch (Throwable ignored) { }
-      //     try { a2[0] = a1; } catch (Throwable ignored) { }
-      //     array1 = a1;
-      //     array2 = a2;
-      //   }
-      // }
-      //
-      // Omitting the try-catch:
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),            // null
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),            // index
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // array1
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u),  // array2
-      DEF_PHI2(4u, 4u, 2u, 8u),  // ? + [L -> [? gives [L (see array-length below)
-      DEF_PHI2(4u, 5u, 3u, 9u),  // ? + [L -> ? gives ?
-      DEF_AGET(4u, Instruction::AGET_OBJECT, 6u, 0u, 1u),  // a1
-      DEF_AGET(4u, Instruction::AGET_OBJECT, 7u, 0u, 1u),  // a2
-      DEF_APUT(4u, Instruction::APUT_OBJECT, 6u, 7u, 1u),
-      DEF_APUT(4u, Instruction::APUT_OBJECT, 7u, 6u, 1u),
-      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 8u, 6u),
-      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 9u, 7u),
-      DEF_UNOP(5u, Instruction::ARRAY_LENGTH, 10u, 4u),
-  };
-
-  BuildDexFile("()V", true);
-  PrepareLoop();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, ArrayArrayFloat) {
-  static const MethodDef methods[] = {
-      { kClassName, "(F)V", "fooFloat", kStatic },
-  };
-  static const MIRDef mirs[] = {
-      // void foo() {
-      //   try {
-      //     float[][][] aaaf = null;
-      //     float[][] array = aaaf[0];  // Make sure array is treated as properly typed.
-      //     array[0][0] = 0.0f;      // const + aget-object[1] + aput
-      //     fooFloat(array[0][0]);   // aget-object[2] + aget + invoke
-      //     // invoke: signature => input is F.
-      //     // aget: output is F => base is [F (precise)
-      //     // aget-object[2]: output is [F => base is [[F (precise)
-      //     // aput: unknown input type => base is [?
-      //     // aget-object[1]: base is [[F => result is L or [F, merge with [? => result is [F
-      //     // aput (again): base is [F => result is F
-      //     // const: F determined by the aput reprocessing.
-      //   } catch (Throwable ignored) {
-      //   }
-      // }
-      //
-      // Omitting the try-catch:
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),             // 0
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),             // aaaf
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 1u, 0u),   // array = aaaf[0]
-      DEF_CONST(3u, Instruction::CONST, 3u, 0),             // 0.0f
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 4u, 2u, 0u),   // array[0]
-      DEF_APUT(3u, Instruction::APUT, 3u, 4u, 0u),          // array[0][0] = 0.0f
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 5u, 2u, 0u),   // array[0]
-      DEF_AGET(3u, Instruction::AGET, 6u, 5u, 0u),          // array[0][0]
-      DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 6u, 0u),  // fooFloat(array[0][0])
-  };
-
-  PrepareMethods(methods);
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 2u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
-      { 0u, kExpectFp | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
-      { 0u, kExpectFp | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, CheckCast1) {
-  static const TypeDef types[] = {
-      { "[I" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
-      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
-      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
-      // Pseudo-phi from [I and [I into L infers only L but not [.
-      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
-  MapVRegToSReg(2, 2, v0_def_blocks);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, CheckCast2) {
-  static const TypeDef types[] = {
-      { "[I" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
-      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
-      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
-      // Pseudo-phi from [I and [I into [? infers [I.
-      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
-  MapVRegToSReg(2, 2, v0_def_blocks);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, CheckCast3) {
-  static const TypeDef types[] = {
-      { "[I" },
-      { "[F" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
-      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
-      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
-      // Pseudo-phi from [I and [F into L correctly leaves it as L.
-      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
-  MapVRegToSReg(2, 2, v0_def_blocks);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, CheckCastConflict1) {
-  static const TypeDef types[] = {
-      { "[I" },
-      { "[F" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
-      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
-      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
-      // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
-      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
-  MapVRegToSReg(2, 2, v0_def_blocks);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg], false);
-  }
-  // The type conflict in array element wasn't propagated to an SSA reg.
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, CheckCastConflict2) {
-  static const TypeDef types[] = {
-      { "[I" },
-      { "[F" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
-      DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
-      DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
-      // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
-      DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
-      DEF_AGET(6u, Instruction::AGET, 4u, 2u, 1u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
-  MapVRegToSReg(2, 2, v0_def_blocks);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectFp | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg], false);
-  }
-  // Type conflict in an SSA reg, register promotion disabled.
-  EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, Phi1) {
-  static const TypeDef types[] = {
-      { "[I" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 100),
-      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
-      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
-      // Phi from [I and [I infers only L but not [.
-      DEF_PHI2(6u, 3u, 1u, 2u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, Phi2) {
-  static const TypeDef types[] = {
-      { "[F" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 100),
-      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
-      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
-      // Phi from [F and [F into [? infers [F.
-      DEF_PHI2(6u, 3u, 1u, 2u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, Phi3) {
-  static const TypeDef types[] = {
-      { "[I" },
-      { "[F" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 100),
-      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
-      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
-      // Phi from [I and [F infers L.
-      DEF_PHI2(6u, 3u, 1u, 2u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
-      { 0u, kExpectRef | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, Phi4) {
-  static const TypeDef types[] = {
-      { "[I" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 100),
-      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
-      DEF_CONST(5u, Instruction::CONST, 2u, 0),
-      // Pseudo-phi from [I and null infers L.
-      DEF_PHI2(6u, 3u, 1u, 2u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
-      { 0u, kExpectRef | kExpectNarrow | kExpectNull },
-      { 0u, kExpectRef | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, PhiConflict1) {
-  static const TypeDef types[] = {
-      { "[I" },
-      { "[F" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 100),
-      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
-      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
-      // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
-      DEF_PHI2(6u, 3u, 1u, 2u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg], false);
-  }
-  // The type conflict in array element wasn't propagated to an SSA reg.
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, PhiConflict2) {
-  static const TypeDef types[] = {
-      { "[I" },
-      { "[F" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 100),
-      DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
-      DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
-      // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
-      DEF_PHI2(6u, 3u, 1u, 2u),
-      DEF_AGET(6u, Instruction::AGET, 4u, 3u, 0u),
-  };
-  PrepareTypes(types);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectFp | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg], false);
-  }
-  // Type conflict in an SSA reg, register promotion disabled.
-  EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, Wide1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0),  // index
-      DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),  // long[]
-      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 3u, 0),  // long
-      DEF_APUT_WIDE(3u, Instruction::APUT_WIDE, 3u, 2u, 1u),
-      { 3u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 2u }, 0u, { } },
-  };
-
-  BuildDexFile("()[J", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
-      { 0u, kExpectCore | kExpectWide },
-      // NOTE: High word checked implicitly for sreg = 3.
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg], false);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, WideSizeConflict1) {
-  static const MIRDef mirs[] = {
-      DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 0u, 0),
-      DEF_MOVE(3u, Instruction::MOVE, 2u, 0u),
-  };
-
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectNarrow | kExpectWide },
-      { 0u, kExpectNarrow | kExpectWide },
-  };
-  ExpectSRegType(0u, expectations[0], false);
-  ExpectSRegType(2u, expectations[1], false);
-  EXPECT_TRUE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, ArrayLongLength) {
-  static const FieldDef sfields[] = {
-      { kClassName, "[J", "arrayLongField" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(4u, Instruction::CONST, 0u, 0),
-      DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
-      DEF_PHI2(6u, 2u, 0u, 1u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
-      DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
-  };
-
-  PrepareSFields(sfields);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayCore | kExpectArrayWide },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, ArrayArrayObjectLength) {
-  static const FieldDef sfields[] = {
-      { kClassName, "[[Ljava/lang/Object;", "arrayLongField" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(4u, Instruction::CONST, 0u, 0),
-      DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
-      DEF_PHI2(6u, 2u, 0u, 1u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
-      DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
-  };
-
-  PrepareSFields(sfields);
-  BuildDexFile("()V", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
-      { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, SGetAdd0SPut) {
-  static const FieldDef sfields[] = {
-      { kClassName, "I", "staticIntField" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(3u, Instruction::SGET, 0u, 0u),
-      DEF_UNOP(3u, Instruction::ADD_INT_LIT8, 1u, 0u),  // +0
-      DEF_SPUT(3u, Instruction::SPUT, 1u, 0u),
-  };
-
-  PrepareSFields(sfields);
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, MoveObjectNull) {
-  static const MethodDef methods[] = {
-      { kClassName, "([I[D)V", "foo", kStatic },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_MOVE(3u, Instruction::MOVE_OBJECT, 1u, 0u),
-      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
-  };
-
-  PrepareMethods(methods);
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectation = {
-      1u,
-      kExpectRef | kExpectNarrow | kExpectNull |
-      kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
-  };
-  ExpectSRegType(0u, expectation);
-  ExpectSRegType(1u, expectation);
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, MoveNull1) {
-  static const MethodDef methods[] = {
-      { kClassName, "([I[D)V", "foo", kStatic },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_MOVE(3u, Instruction::MOVE, 1u, 0u),
-      DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
-  };
-
-  PrepareMethods(methods);
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectation = {
-      1u,
-      kExpectCore | kExpectRef | kExpectFp | kExpectNarrow | kExpectNull |
-      kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
-  };
-  ExpectSRegType(0u, expectation);
-  ExpectSRegType(1u, expectation);
-  // Type conflict using move instead of move-object for null, register promotion disabled.
-  EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, MoveNull2) {
-  static const FieldDef sfields[] = {
-      { kClassName, "[F", "staticArrayArrayFloatField" },
-      { kClassName, "[I", "staticArrayIntField" },
-      { kClassName, "[[I", "staticArrayArrayIntField" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(4u, Instruction::CONST, 0u, 0),
-      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 1u, 0u),
-      DEF_MOVE(4u, Instruction::MOVE_OBJECT, 2u, 1u),
-      DEF_SGET(5u, Instruction::SGET_OBJECT, 3u, 0u),
-      DEF_SGET(5u, Instruction::SGET_OBJECT, 4u, 1u),
-      DEF_SGET(5u, Instruction::SGET_OBJECT, 5u, 2u),
-      DEF_PHI2(6u, 6u, 0u, 3u),
-      DEF_PHI2(6u, 7u, 1u, 4u),
-      DEF_PHI2(6u, 8u, 2u, 5u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 9u, 6u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 10u, 7u),
-      DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 11u, 8u),
-      { 6u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 8u }, 0u, { } },
-  };
-
-  PrepareSFields(sfields);
-  BuildDexFile("()[[I", true);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull |
-          kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull |
-          kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
-      { 1u, kExpectRef | kExpectNarrow | kExpectNull |
-          kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
-      { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
-      { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
-      { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  // Type conflict in array type not propagated to actual register.
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, ReuseNull1) {
-  static const FieldDef sfields[] = {
-      { kClassName, "[I", "staticArrayLongField" },
-      { kClassName, "[[F", "staticArrayArrayFloatField" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
-      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
-  };
-
-  PrepareSFields(sfields);
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectation = {
-      1u,
-      kExpectRef | kExpectNarrow | kExpectNull |
-      kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow
-  };
-  ExpectSRegType(0u, expectation);
-  // Type conflict in array type not propagated to actual register.
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, ReuseNull2) {
-  static const FieldDef sfields[] = {
-      { kClassName, "[J", "staticArrayLongField" },
-      { kClassName, "[[F", "staticArrayArrayFloatField" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_CONST(3u, Instruction::CONST, 0u, 0),
-      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
-      DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
-  };
-
-  PrepareSFields(sfields);
-  BuildDexFile("()V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectation = {
-      1u,
-      kExpectRef | kExpectNarrow | kExpectNull |
-      kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
-  };
-  ExpectSRegType(0u, expectation);
-  // Type conflict in array type not propagated to actual register.
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, ArgIsNonNull) {
-  constexpr uint32_t thiz = kLocalVRs;
-  static const MIRDef mirs[] = {
-      DEF_MOVE(3u, Instruction::MOVE_OBJECT, 0u, thiz),
-  };
-
-  BuildDexFile("(Ljava/lang/Object;)V", true);
-  PrepareSingleBlock();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectation = {
-      0u,
-      kExpectRef | kExpectNarrow
-  };
-  ExpectSRegType(0u, expectation);
-  // Type conflict in array type not propagated to actual register.
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-TEST_F(TypeInferenceTest, IfCc) {
-  static const FieldDef sfields[] = {
-      { kClassName, "I", "intField" },
-  };
-  static const MIRDef mirs[] = {
-      DEF_SGET(3u, Instruction::SGET, 0u, 0u),
-      DEF_CONST(3u, Instruction::CONST, 1u, 0u),
-      { 3u, Instruction::IF_EQ, 0, 0u, 2, { 0u, 1u }, 0, { } },
-  };
-
-  PrepareSFields(sfields);
-  BuildDexFile("()V", false);
-  PrepareDiamond();
-  PrepareMIRs(mirs);
-  PerformTypeInference();
-
-  ASSERT_EQ(arraysize(mirs), mir_count_);
-  static const SRegExpectation expectations[] = {
-      { 0u, kExpectCore | kExpectNarrow },
-      { 0u, kExpectCore | kExpectNarrow },
-  };
-  for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
-    ExpectSRegType(sreg, expectations[sreg]);
-  }
-  EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
-  EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
-}
-
-}  // namespace art
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
deleted file mode 100644
index 948ba7b..0000000
--- a/compiler/dex/vreg_analysis.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "base/stringprintf.h"
-#include "compiler_ir.h"
-#include "dex/dataflow_iterator-inl.h"
-#include "dex_flags.h"
-#include "driver/dex_compilation_unit.h"
-
-namespace art {
-
-static const char* storage_name[] = {" Frame ", "PhysReg", " CompilerTemp "};
-
-void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
-  for (int i = 0; i < count; i++) {
-    LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c 0x%04x S%d",
-                              table[i].orig_sreg, storage_name[table[i].location],
-                              table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
-                              table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
-                              table[i].is_const ? 'c' : 'n',
-                              table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
-                              table[i].reg.GetRawBits(),
-                              table[i].s_reg_low);
-  }
-}
-
-// FIXME - will likely need to revisit all uses of this.
-static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
-                                      RegStorage(), INVALID_SREG, INVALID_SREG};
-
-void MIRGraph::InitRegLocations() {
-  // Allocate the location map. We also include the maximum possible temps because
-  // the temp allocation initializes reg location as well (in order to deal with
-  // case when it will be called after this pass).
-  int max_regs = GetNumSSARegs() + GetMaxPossibleCompilerTemps();
-  RegLocation* loc = arena_->AllocArray<RegLocation>(max_regs, kArenaAllocRegAlloc);
-  for (int i = 0; i < GetNumSSARegs(); i++) {
-    loc[i] = fresh_loc;
-    loc[i].s_reg_low = i;
-    loc[i].is_const = false;  // Constants will be marked by constant propagation pass later.
-  }
-
-  /* Mark the location of ArtMethod* as temporary */
-  loc[GetMethodSReg()].location = kLocCompilerTemp;
-
-  reg_location_ = loc;
-}
-
-/*
- * Set the s_reg_low field to refer to the pre-SSA name of the
- * base Dalvik virtual register.  Once we add a better register
- * allocator, remove this remapping.
- */
-void MIRGraph::RemapRegLocations() {
-  for (int i = 0; i < GetNumSSARegs(); i++) {
-    int orig_sreg = reg_location_[i].s_reg_low;
-    reg_location_[i].orig_sreg = orig_sreg;
-    reg_location_[i].s_reg_low = SRegToVReg(orig_sreg);
-  }
-}
-
-}  // namespace art
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index b4389d3..f5969aa 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -18,8 +18,6 @@
 
 #include <fstream>
 
-#include "dex/pass_manager.h"
-
 namespace art {
 
 CompilerOptions::CompilerOptions()
@@ -42,7 +40,6 @@
       implicit_suspend_checks_(false),
       compile_pic_(false),
       verbose_methods_(nullptr),
-      pass_manager_options_(),
       abort_on_hard_verifier_failure_(false),
       init_failure_output_(nullptr),
       dump_cfg_file_name_(""),
@@ -98,7 +95,6 @@
     implicit_suspend_checks_(implicit_suspend_checks),
     compile_pic_(compile_pic),
     verbose_methods_(verbose_methods),
-    pass_manager_options_(),
     abort_on_hard_verifier_failure_(abort_on_hard_verifier_failure),
     init_failure_output_(init_failure_output),
     dump_cfg_file_name_(dump_cfg_file_name),
@@ -134,34 +130,6 @@
   ParseUintOption(option, "--inline-max-code-units", &inline_max_code_units_, Usage);
 }
 
-void CompilerOptions::ParseDisablePasses(const StringPiece& option,
-                                         UsageFn Usage ATTRIBUTE_UNUSED) {
-  DCHECK(option.starts_with("--disable-passes="));
-  const std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
-  pass_manager_options_.SetDisablePassList(disable_passes);
-}
-
-void CompilerOptions::ParsePrintPasses(const StringPiece& option,
-                                       UsageFn Usage ATTRIBUTE_UNUSED) {
-  DCHECK(option.starts_with("--print-passes="));
-  const std::string print_passes = option.substr(strlen("--print-passes=")).data();
-  pass_manager_options_.SetPrintPassList(print_passes);
-}
-
-void CompilerOptions::ParseDumpCfgPasses(const StringPiece& option,
-                                         UsageFn Usage ATTRIBUTE_UNUSED) {
-  DCHECK(option.starts_with("--dump-cfg-passes="));
-  const std::string dump_passes_string = option.substr(strlen("--dump-cfg-passes=")).data();
-  pass_manager_options_.SetDumpPassList(dump_passes_string);
-}
-
-void CompilerOptions::ParsePassOptions(const StringPiece& option,
-                                       UsageFn Usage ATTRIBUTE_UNUSED) {
-  DCHECK(option.starts_with("--pass-options="));
-  const std::string pass_options = option.substr(strlen("--pass-options=")).data();
-  pass_manager_options_.SetOverriddenPassOptions(pass_options);
-}
-
 void CompilerOptions::ParseDumpInitFailures(const StringPiece& option,
                                             UsageFn Usage ATTRIBUTE_UNUSED) {
   DCHECK(option.starts_with("--dump-init-failures="));
@@ -234,20 +202,6 @@
     include_patch_information_ = false;
   } else if (option == "--abort-on-hard-verifier-error") {
     abort_on_hard_verifier_failure_ = true;
-  } else if (option == "--print-pass-names") {
-    pass_manager_options_.SetPrintPassNames(true);
-  } else if (option.starts_with("--disable-passes=")) {
-    ParseDisablePasses(option, Usage);
-  } else if (option.starts_with("--print-passes=")) {
-    ParsePrintPasses(option, Usage);
-  } else if (option == "--print-all-passes") {
-    pass_manager_options_.SetPrintAllPasses();
-  } else if (option.starts_with("--dump-cfg-passes=")) {
-    ParseDumpCfgPasses(option, Usage);
-  } else if (option == "--print-pass-options") {
-    pass_manager_options_.SetPrintPassOptions(true);
-  } else if (option.starts_with("--pass-options=")) {
-    ParsePassOptions(option, Usage);
   } else if (option.starts_with("--dump-init-failures=")) {
     ParseDumpInitFailures(option, Usage);
   } else if (option.starts_with("--dump-cfg=")) {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 59698af..11a4e06 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -22,7 +22,6 @@
 #include <vector>
 
 #include "base/macros.h"
-#include "dex/pass_manager.h"
 #include "globals.h"
 #include "utils.h"
 
@@ -239,10 +238,6 @@
     return init_failure_output_.get();
   }
 
-  const PassManagerOptions* GetPassManagerOptions() const {
-    return &pass_manager_options_;
-  }
-
   bool AbortOnHardVerifierFailure() const {
     return abort_on_hard_verifier_failure_;
   }
@@ -267,10 +262,7 @@
 
  private:
   void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage);
-  void ParsePassOptions(const StringPiece& option, UsageFn Usage);
   void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
-  void ParsePrintPasses(const StringPiece& option, UsageFn Usage);
-  void ParseDisablePasses(const StringPiece& option, UsageFn Usage);
   void ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage);
   void ParseInlineDepthLimit(const StringPiece& option, UsageFn Usage);
   void ParseNumDexMethods(const StringPiece& option, UsageFn Usage);
@@ -307,8 +299,6 @@
   // Vector of methods to have verbose output enabled for.
   const std::vector<std::string>* verbose_methods_;
 
-  PassManagerOptions pass_manager_options_;
-
   // Abort compilation with an error if we find a class that fails verification with a hard
   // failure.
   bool abort_on_hard_verifier_failure_;
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
index cfaa01b..e458b98 100644
--- a/compiler/driver/dex_compilation_unit.cc
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -17,7 +17,6 @@
 #include "dex_compilation_unit.h"
 
 #include "base/stringprintf.h"
-#include "dex/compiler_ir.h"
 #include "mirror/dex_cache.h"
 #include "utils.h"
 
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 4b48107..eaf0e17 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -22,7 +22,6 @@
 #include "compiled_method.h"
 #include "compiler.h"
 #include "debug/method_debug_info.h"
-#include "dex/pass_manager.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
 #include "dex/quick_compiler_callbacks.h"
 #include "dex/verification_results.h"
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 0c7648e..0ca7305 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -20,7 +20,6 @@
 #include "nodes.h"
 #include "builder.h"
 #include "common_compiler_test.h"
-#include "compiler/dex/pass_manager.h"
 #include "dex_file.h"
 #include "dex_instruction.h"
 #include "handle_scope-inl.h"
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0debd42..ede0bda 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -48,7 +48,6 @@
 #include "compiler_callbacks.h"
 #include "debug/elf_debug_writer.h"
 #include "debug/method_debug_info.h"
-#include "dex/pass_manager.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
 #include "dex/quick_compiler_callbacks.h"
 #include "dex/verification_results.h"
@@ -342,20 +341,6 @@
   UsageError("  --profile-file-fd=<number>: same as --profile-file but accepts a file descriptor.");
   UsageError("      Cannot be used together with --profile-file.");
   UsageError("");
-  UsageError("  --print-pass-names: print a list of pass names");
-  UsageError("");
-  UsageError("  --disable-passes=<pass-names>:  disable one or more passes separated by comma.");
-  UsageError("      Example: --disable-passes=UseCount,BBOptimizations");
-  UsageError("");
-  UsageError("  --print-pass-options: print a list of passes that have configurable options along "
-             "with the setting.");
-  UsageError("      Will print default if no overridden setting exists.");
-  UsageError("");
-  UsageError("  --pass-options=Pass1Name:Pass1OptionName:Pass1Option#,"
-             "Pass2Name:Pass2OptionName:Pass2Option#");
-  UsageError("      Used to specify a pass specific option. The setting itself must be integer.");
-  UsageError("      Separator used between options is a comma.");
-  UsageError("");
   UsageError("  --swap-file=<file-name>:  specifies a file to use for swap.");
   UsageError("      Example: --swap-file=/data/tmp/swap.001");
   UsageError("");