Merge "Some clean-up for the handling of HSelect in LSE"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index e3f0c24..704d69a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -239,13 +239,13 @@
   runtime/proxy_test.cc \
   runtime/reflection_test.cc \
   compiler/compiled_method_test.cc \
+  compiler/debug/dwarf/dwarf_test.cc \
   compiler/dex/gvn_dead_code_elimination_test.cc \
   compiler/dex/global_value_numbering_test.cc \
   compiler/dex/local_value_numbering_test.cc \
   compiler/dex/mir_graph_test.cc \
   compiler/dex/mir_optimization_test.cc \
   compiler/dex/type_inference_test.cc \
-  compiler/dwarf/dwarf_test.cc \
   compiler/driver/compiled_method_storage_test.cc \
   compiler/driver/compiler_driver_test.cc \
   compiler/elf_writer_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 87eff82..b164942 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -20,6 +20,7 @@
 
 LIBART_COMPILER_SRC_FILES := \
 	compiled_method.cc \
+	debug/elf_debug_writer.cc \
 	dex/global_value_numbering.cc \
 	dex/gvn_dead_code_elimination.cc \
 	dex/local_value_numbering.cc \
@@ -105,7 +106,6 @@
 	utils/swap_space.cc \
 	compiler.cc \
 	elf_writer.cc \
-	elf_writer_debug.cc \
 	elf_writer_quick.cc \
 	image_writer.cc \
 	oat_writer.cc \
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 508b04a..230cb9a 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -22,9 +22,9 @@
 #include <sstream>
 
 #include "arch/instruction_set.h"
-#include "dwarf/dwarf_constants.h"
-#include "dwarf/dwarf_test.h"
-#include "dwarf/headers.h"
+#include "debug/dwarf/dwarf_constants.h"
+#include "debug/dwarf/dwarf_test.h"
+#include "debug/dwarf/headers.h"
 #include "disassembler/disassembler.h"
 #include "gtest/gtest.h"
 
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 7a93613..5887620 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -283,11 +283,13 @@
     static_assert(sizeof(element_offset_) == sizeof(cmp1_), "needed by relational operators");
   };
   union {
-    uint32_t cmp2_;             // Used for relational operators.
+    // Note: To avoid uninitialized padding on 64-bit systems, we use `size_t` for `cmp2_`.
+    // This allows a hashing function to treat an array of linker patches as raw memory.
+    size_t cmp2_;             // Used for relational operators.
     // Literal offset of the insn loading PC (same as literal_offset if it's the same insn,
     // may be different if the PC-relative addressing needs multiple insns).
     uint32_t pc_insn_offset_;
-    static_assert(sizeof(pc_insn_offset_) == sizeof(cmp2_), "needed by relational operators");
+    static_assert(sizeof(pc_insn_offset_) <= sizeof(cmp2_), "needed by relational operators");
   };
 
   friend bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs);
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 3a9ce1b..97c60de 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -64,7 +64,8 @@
 
   virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED,
                           jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
-                          ArtMethod* method ATTRIBUTE_UNUSED)
+                          ArtMethod* method ATTRIBUTE_UNUSED,
+                          bool osr ATTRIBUTE_UNUSED)
       SHARED_REQUIRES(Locks::mutator_lock_) {
     return false;
   }
diff --git a/compiler/dwarf/debug_abbrev_writer.h b/compiler/debug/dwarf/debug_abbrev_writer.h
similarity index 92%
rename from compiler/dwarf/debug_abbrev_writer.h
rename to compiler/debug/dwarf/debug_abbrev_writer.h
index 71367e8..0fc843c 100644
--- a/compiler/dwarf/debug_abbrev_writer.h
+++ b/compiler/debug/dwarf/debug_abbrev_writer.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_DWARF_DEBUG_ABBREV_WRITER_H_
-#define ART_COMPILER_DWARF_DEBUG_ABBREV_WRITER_H_
+#ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_ABBREV_WRITER_H_
+#define ART_COMPILER_DEBUG_DWARF_DEBUG_ABBREV_WRITER_H_
 
 #include <cstdint>
 #include <type_traits>
@@ -23,8 +23,8 @@
 
 #include "base/casts.h"
 #include "base/stl_util.h"
-#include "dwarf/dwarf_constants.h"
-#include "dwarf/writer.h"
+#include "debug/dwarf/dwarf_constants.h"
+#include "debug/dwarf/writer.h"
 #include "leb128.h"
 
 namespace art {
@@ -95,4 +95,4 @@
 }  // namespace dwarf
 }  // namespace art
 
-#endif  // ART_COMPILER_DWARF_DEBUG_ABBREV_WRITER_H_
+#endif  // ART_COMPILER_DEBUG_DWARF_DEBUG_ABBREV_WRITER_H_
diff --git a/compiler/dwarf/debug_frame_opcode_writer.h b/compiler/debug/dwarf/debug_frame_opcode_writer.h
similarity index 96%
rename from compiler/dwarf/debug_frame_opcode_writer.h
rename to compiler/debug/dwarf/debug_frame_opcode_writer.h
index f74f37c..7c75c9b 100644
--- a/compiler/dwarf/debug_frame_opcode_writer.h
+++ b/compiler/debug/dwarf/debug_frame_opcode_writer.h
@@ -14,13 +14,13 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
-#define ART_COMPILER_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
+#ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
+#define ART_COMPILER_DEBUG_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
 
 #include "base/bit_utils.h"
-#include "dwarf/dwarf_constants.h"
-#include "dwarf/register.h"
-#include "dwarf/writer.h"
+#include "debug/dwarf/dwarf_constants.h"
+#include "debug/dwarf/register.h"
+#include "debug/dwarf/writer.h"
 
 namespace art {
 namespace dwarf {
@@ -338,4 +338,4 @@
 }  // namespace dwarf
 }  // namespace art
 
-#endif  // ART_COMPILER_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
+#endif  // ART_COMPILER_DEBUG_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
diff --git a/compiler/dwarf/debug_info_entry_writer.h b/compiler/debug/dwarf/debug_info_entry_writer.h
similarity index 95%
rename from compiler/dwarf/debug_info_entry_writer.h
rename to compiler/debug/dwarf/debug_info_entry_writer.h
index 1e29859..85f021e 100644
--- a/compiler/dwarf/debug_info_entry_writer.h
+++ b/compiler/debug/dwarf/debug_info_entry_writer.h
@@ -14,17 +14,17 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_DWARF_DEBUG_INFO_ENTRY_WRITER_H_
-#define ART_COMPILER_DWARF_DEBUG_INFO_ENTRY_WRITER_H_
+#ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_INFO_ENTRY_WRITER_H_
+#define ART_COMPILER_DEBUG_DWARF_DEBUG_INFO_ENTRY_WRITER_H_
 
 #include <cstdint>
 #include <unordered_map>
 
 #include "base/casts.h"
-#include "dwarf/debug_abbrev_writer.h"
-#include "dwarf/dwarf_constants.h"
-#include "dwarf/expression.h"
-#include "dwarf/writer.h"
+#include "debug/dwarf/debug_abbrev_writer.h"
+#include "debug/dwarf/dwarf_constants.h"
+#include "debug/dwarf/expression.h"
+#include "debug/dwarf/writer.h"
 #include "leb128.h"
 
 namespace art {
@@ -225,4 +225,4 @@
 }  // namespace dwarf
 }  // namespace art
 
-#endif  // ART_COMPILER_DWARF_DEBUG_INFO_ENTRY_WRITER_H_
+#endif  // ART_COMPILER_DEBUG_DWARF_DEBUG_INFO_ENTRY_WRITER_H_
diff --git a/compiler/dwarf/debug_line_opcode_writer.h b/compiler/debug/dwarf/debug_line_opcode_writer.h
similarity index 96%
rename from compiler/dwarf/debug_line_opcode_writer.h
rename to compiler/debug/dwarf/debug_line_opcode_writer.h
index 201f0b4..58502a3 100644
--- a/compiler/dwarf/debug_line_opcode_writer.h
+++ b/compiler/debug/dwarf/debug_line_opcode_writer.h
@@ -14,13 +14,13 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
-#define ART_COMPILER_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
+#ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
+#define ART_COMPILER_DEBUG_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
 
 #include <cstdint>
 
-#include "dwarf/dwarf_constants.h"
-#include "dwarf/writer.h"
+#include "debug/dwarf/dwarf_constants.h"
+#include "debug/dwarf/writer.h"
 
 namespace art {
 namespace dwarf {
@@ -252,4 +252,4 @@
 }  // namespace dwarf
 }  // namespace art
 
-#endif  // ART_COMPILER_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
+#endif  // ART_COMPILER_DEBUG_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
diff --git a/compiler/dwarf/dwarf_constants.h b/compiler/debug/dwarf/dwarf_constants.h
similarity index 98%
rename from compiler/dwarf/dwarf_constants.h
rename to compiler/debug/dwarf/dwarf_constants.h
index 0d7951b..96f805e 100644
--- a/compiler/dwarf/dwarf_constants.h
+++ b/compiler/debug/dwarf/dwarf_constants.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_DWARF_DWARF_CONSTANTS_H_
-#define ART_COMPILER_DWARF_DWARF_CONSTANTS_H_
+#ifndef ART_COMPILER_DEBUG_DWARF_DWARF_CONSTANTS_H_
+#define ART_COMPILER_DEBUG_DWARF_DWARF_CONSTANTS_H_
 
 namespace art {
 namespace dwarf {
@@ -691,4 +691,4 @@
 }  // namespace dwarf
 }  // namespace art
 
-#endif  // ART_COMPILER_DWARF_DWARF_CONSTANTS_H_
+#endif  // ART_COMPILER_DEBUG_DWARF_DWARF_CONSTANTS_H_
diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/debug/dwarf/dwarf_test.cc
similarity index 98%
rename from compiler/dwarf/dwarf_test.cc
rename to compiler/debug/dwarf/dwarf_test.cc
index 3237311..e455d0d 100644
--- a/compiler/dwarf/dwarf_test.cc
+++ b/compiler/debug/dwarf/dwarf_test.cc
@@ -16,11 +16,11 @@
 
 #include "dwarf_test.h"
 
-#include "dwarf/dwarf_constants.h"
-#include "dwarf/debug_frame_opcode_writer.h"
-#include "dwarf/debug_info_entry_writer.h"
-#include "dwarf/debug_line_opcode_writer.h"
-#include "dwarf/headers.h"
+#include "debug/dwarf/debug_frame_opcode_writer.h"
+#include "debug/dwarf/debug_info_entry_writer.h"
+#include "debug/dwarf/debug_line_opcode_writer.h"
+#include "debug/dwarf/dwarf_constants.h"
+#include "debug/dwarf/headers.h"
 #include "gtest/gtest.h"
 
 namespace art {
diff --git a/compiler/dwarf/dwarf_test.h b/compiler/debug/dwarf/dwarf_test.h
similarity index 97%
rename from compiler/dwarf/dwarf_test.h
rename to compiler/debug/dwarf/dwarf_test.h
index c3a3ca9..41bfe79 100644
--- a/compiler/dwarf/dwarf_test.h
+++ b/compiler/debug/dwarf/dwarf_test.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_DWARF_DWARF_TEST_H_
-#define ART_COMPILER_DWARF_DWARF_TEST_H_
+#ifndef ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_
+#define ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_
 
 #include <cstring>
 #include <dirent.h>
@@ -169,4 +169,4 @@
 }  // namespace dwarf
 }  // namespace art
 
-#endif  // ART_COMPILER_DWARF_DWARF_TEST_H_
+#endif  // ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_
diff --git a/compiler/dwarf/expression.h b/compiler/debug/dwarf/expression.h
similarity index 93%
rename from compiler/dwarf/expression.h
rename to compiler/debug/dwarf/expression.h
index 1503d03..fafc046 100644
--- a/compiler/dwarf/expression.h
+++ b/compiler/debug/dwarf/expression.h
@@ -14,14 +14,14 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_DWARF_EXPRESSION_H_
-#define ART_COMPILER_DWARF_EXPRESSION_H_
+#ifndef ART_COMPILER_DEBUG_DWARF_EXPRESSION_H_
+#define ART_COMPILER_DEBUG_DWARF_EXPRESSION_H_
 
 #include <cstddef>
 #include <cstdint>
 
-#include "dwarf/dwarf_constants.h"
-#include "dwarf/writer.h"
+#include "debug/dwarf/dwarf_constants.h"
+#include "debug/dwarf/writer.h"
 
 namespace art {
 namespace dwarf {
@@ -118,4 +118,4 @@
 }  // namespace dwarf
 }  // namespace art
 
-#endif  // ART_COMPILER_DWARF_EXPRESSION_H_
+#endif  // ART_COMPILER_DEBUG_DWARF_EXPRESSION_H_
diff --git a/compiler/dwarf/headers.h b/compiler/debug/dwarf/headers.h
similarity index 95%
rename from compiler/dwarf/headers.h
rename to compiler/debug/dwarf/headers.h
index 137c566..146d9fd 100644
--- a/compiler/dwarf/headers.h
+++ b/compiler/debug/dwarf/headers.h
@@ -14,17 +14,17 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_DWARF_HEADERS_H_
-#define ART_COMPILER_DWARF_HEADERS_H_
+#ifndef ART_COMPILER_DEBUG_DWARF_HEADERS_H_
+#define ART_COMPILER_DEBUG_DWARF_HEADERS_H_
 
 #include <cstdint>
 
-#include "dwarf/debug_frame_opcode_writer.h"
-#include "dwarf/debug_info_entry_writer.h"
-#include "dwarf/debug_line_opcode_writer.h"
-#include "dwarf/dwarf_constants.h"
-#include "dwarf/register.h"
-#include "dwarf/writer.h"
+#include "debug/dwarf/debug_frame_opcode_writer.h"
+#include "debug/dwarf/debug_info_entry_writer.h"
+#include "debug/dwarf/debug_line_opcode_writer.h"
+#include "debug/dwarf/dwarf_constants.h"
+#include "debug/dwarf/register.h"
+#include "debug/dwarf/writer.h"
 #include "utils/array_ref.h"
 
 namespace art {
@@ -204,4 +204,4 @@
 }  // namespace dwarf
 }  // namespace art
 
-#endif  // ART_COMPILER_DWARF_HEADERS_H_
+#endif  // ART_COMPILER_DEBUG_DWARF_HEADERS_H_
diff --git a/compiler/dwarf/register.h b/compiler/debug/dwarf/register.h
similarity index 93%
rename from compiler/dwarf/register.h
rename to compiler/debug/dwarf/register.h
index aa3070a..24bacac 100644
--- a/compiler/dwarf/register.h
+++ b/compiler/debug/dwarf/register.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_DWARF_REGISTER_H_
-#define ART_COMPILER_DWARF_REGISTER_H_
+#ifndef ART_COMPILER_DEBUG_DWARF_REGISTER_H_
+#define ART_COMPILER_DEBUG_DWARF_REGISTER_H_
 
 namespace art {
 namespace dwarf {
@@ -59,4 +59,4 @@
 }  // namespace dwarf
 }  // namespace art
 
-#endif  // ART_COMPILER_DWARF_REGISTER_H_
+#endif  // ART_COMPILER_DEBUG_DWARF_REGISTER_H_
diff --git a/compiler/dwarf/writer.h b/compiler/debug/dwarf/writer.h
similarity index 97%
rename from compiler/dwarf/writer.h
rename to compiler/debug/dwarf/writer.h
index 74acf07..95912ad 100644
--- a/compiler/dwarf/writer.h
+++ b/compiler/debug/dwarf/writer.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_DWARF_WRITER_H_
-#define ART_COMPILER_DWARF_WRITER_H_
+#ifndef ART_COMPILER_DEBUG_DWARF_WRITER_H_
+#define ART_COMPILER_DEBUG_DWARF_WRITER_H_
 
 #include <type_traits>
 #include <vector>
@@ -179,4 +179,4 @@
 }  // namespace dwarf
 }  // namespace art
 
-#endif  // ART_COMPILER_DWARF_WRITER_H_
+#endif  // ART_COMPILER_DEBUG_DWARF_WRITER_H_
diff --git a/compiler/debug/elf_compilation_unit.h b/compiler/debug/elf_compilation_unit.h
new file mode 100644
index 0000000..f725f45
--- /dev/null
+++ b/compiler/debug/elf_compilation_unit.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_
+#define ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_
+
+#include <vector>
+
+#include "debug/method_debug_info.h"
+
+namespace art {
+namespace debug {
+
+struct ElfCompilationUnit {
+  std::vector<const MethodDebugInfo*> methods;
+  size_t debug_line_offset = 0;
+  uintptr_t low_pc = std::numeric_limits<uintptr_t>::max();
+  uintptr_t high_pc = 0;
+};
+
+}  // namespace debug
+}  // namespace art
+
+#endif  // ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_
+
diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h
new file mode 100644
index 0000000..f6d9b16
--- /dev/null
+++ b/compiler/debug/elf_debug_frame_writer.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_
+#define ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_
+
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "debug/dwarf/debug_frame_opcode_writer.h"
+#include "debug/dwarf/dwarf_constants.h"
+#include "debug/dwarf/headers.h"
+#include "debug/method_debug_info.h"
+#include "elf_builder.h"
+
+namespace art {
+namespace debug {
+
+static void WriteCIE(InstructionSet isa,
+                     dwarf::CFIFormat format,
+                     std::vector<uint8_t>* buffer) {
+  using Reg = dwarf::Reg;
+  // Scratch registers should be marked as undefined.  This tells the
+  // debugger that its value in the previous frame is not recoverable.
+  bool is64bit = Is64BitInstructionSet(isa);
+  switch (isa) {
+    case kArm:
+    case kThumb2: {
+      dwarf::DebugFrameOpCodeWriter<> opcodes;
+      opcodes.DefCFA(Reg::ArmCore(13), 0);  // R13(SP).
+      // core registers.
+      for (int reg = 0; reg < 13; reg++) {
+        if (reg < 4 || reg == 12) {
+          opcodes.Undefined(Reg::ArmCore(reg));
+        } else {
+          opcodes.SameValue(Reg::ArmCore(reg));
+        }
+      }
+      // fp registers.
+      for (int reg = 0; reg < 32; reg++) {
+        if (reg < 16) {
+          opcodes.Undefined(Reg::ArmFp(reg));
+        } else {
+          opcodes.SameValue(Reg::ArmFp(reg));
+        }
+      }
+      auto return_reg = Reg::ArmCore(14);  // R14(LR).
+      WriteCIE(is64bit, return_reg, opcodes, format, buffer);
+      return;
+    }
+    case kArm64: {
+      dwarf::DebugFrameOpCodeWriter<> opcodes;
+      opcodes.DefCFA(Reg::Arm64Core(31), 0);  // R31(SP).
+      // core registers.
+      for (int reg = 0; reg < 30; reg++) {
+        if (reg < 8 || reg == 16 || reg == 17) {
+          opcodes.Undefined(Reg::Arm64Core(reg));
+        } else {
+          opcodes.SameValue(Reg::Arm64Core(reg));
+        }
+      }
+      // fp registers.
+      for (int reg = 0; reg < 32; reg++) {
+        if (reg < 8 || reg >= 16) {
+          opcodes.Undefined(Reg::Arm64Fp(reg));
+        } else {
+          opcodes.SameValue(Reg::Arm64Fp(reg));
+        }
+      }
+      auto return_reg = Reg::Arm64Core(30);  // R30(LR).
+      WriteCIE(is64bit, return_reg, opcodes, format, buffer);
+      return;
+    }
+    case kMips:
+    case kMips64: {
+      dwarf::DebugFrameOpCodeWriter<> opcodes;
+      opcodes.DefCFA(Reg::MipsCore(29), 0);  // R29(SP).
+      // core registers.
+      for (int reg = 1; reg < 26; reg++) {
+        if (reg < 16 || reg == 24 || reg == 25) {  // AT, V*, A*, T*.
+          opcodes.Undefined(Reg::MipsCore(reg));
+        } else {
+          opcodes.SameValue(Reg::MipsCore(reg));
+        }
+      }
+      // fp registers.
+      for (int reg = 0; reg < 32; reg++) {
+        if (reg < 24) {
+          opcodes.Undefined(Reg::Mips64Fp(reg));
+        } else {
+          opcodes.SameValue(Reg::Mips64Fp(reg));
+        }
+      }
+      auto return_reg = Reg::MipsCore(31);  // R31(RA).
+      WriteCIE(is64bit, return_reg, opcodes, format, buffer);
+      return;
+    }
+    case kX86: {
+      // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296
+      constexpr bool generate_opcodes_for_x86_fp = false;
+      dwarf::DebugFrameOpCodeWriter<> opcodes;
+      opcodes.DefCFA(Reg::X86Core(4), 4);   // R4(ESP).
+      opcodes.Offset(Reg::X86Core(8), -4);  // R8(EIP).
+      // core registers.
+      for (int reg = 0; reg < 8; reg++) {
+        if (reg <= 3) {
+          opcodes.Undefined(Reg::X86Core(reg));
+        } else if (reg == 4) {
+          // Stack pointer.
+        } else {
+          opcodes.SameValue(Reg::X86Core(reg));
+        }
+      }
+      // fp registers.
+      if (generate_opcodes_for_x86_fp) {
+        for (int reg = 0; reg < 8; reg++) {
+          opcodes.Undefined(Reg::X86Fp(reg));
+        }
+      }
+      auto return_reg = Reg::X86Core(8);  // R8(EIP).
+      WriteCIE(is64bit, return_reg, opcodes, format, buffer);
+      return;
+    }
+    case kX86_64: {
+      dwarf::DebugFrameOpCodeWriter<> opcodes;
+      opcodes.DefCFA(Reg::X86_64Core(4), 8);  // R4(RSP).
+      opcodes.Offset(Reg::X86_64Core(16), -8);  // R16(RIP).
+      // core registers.
+      for (int reg = 0; reg < 16; reg++) {
+        if (reg == 4) {
+          // Stack pointer.
+        } else if (reg < 12 && reg != 3 && reg != 5) {  // except EBX and EBP.
+          opcodes.Undefined(Reg::X86_64Core(reg));
+        } else {
+          opcodes.SameValue(Reg::X86_64Core(reg));
+        }
+      }
+      // fp registers.
+      for (int reg = 0; reg < 16; reg++) {
+        if (reg < 12) {
+          opcodes.Undefined(Reg::X86_64Fp(reg));
+        } else {
+          opcodes.SameValue(Reg::X86_64Fp(reg));
+        }
+      }
+      auto return_reg = Reg::X86_64Core(16);  // R16(RIP).
+      WriteCIE(is64bit, return_reg, opcodes, format, buffer);
+      return;
+    }
+    case kNone:
+      break;
+  }
+  LOG(FATAL) << "Cannot write CIE frame for ISA " << isa;
+  UNREACHABLE();
+}
+
+template<typename ElfTypes>
+void WriteCFISection(ElfBuilder<ElfTypes>* builder,
+                     const ArrayRef<const MethodDebugInfo>& method_infos,
+                     dwarf::CFIFormat format,
+                     bool write_oat_patches) {
+  CHECK(format == dwarf::DW_DEBUG_FRAME_FORMAT || format == dwarf::DW_EH_FRAME_FORMAT);
+  typedef typename ElfTypes::Addr Elf_Addr;
+
+  if (method_infos.empty()) {
+    return;
+  }
+
+  std::vector<uint32_t> binary_search_table;
+  std::vector<uintptr_t> patch_locations;
+  if (format == dwarf::DW_EH_FRAME_FORMAT) {
+    binary_search_table.reserve(2 * method_infos.size());
+  } else {
+    patch_locations.reserve(method_infos.size());
+  }
+
+  // The methods can be written in any order.
+  // Let's therefore sort them in the lexicographical order of the opcodes.
+  // This has no effect on its own. However, if the final .debug_frame section is
+  // compressed it reduces the size since similar opcodes sequences are grouped.
+  std::vector<const MethodDebugInfo*> sorted_method_infos;
+  sorted_method_infos.reserve(method_infos.size());
+  for (size_t i = 0; i < method_infos.size(); i++) {
+    sorted_method_infos.push_back(&method_infos[i]);
+  }
+  std::sort(
+      sorted_method_infos.begin(),
+      sorted_method_infos.end(),
+      [](const MethodDebugInfo* lhs, const MethodDebugInfo* rhs) {
+        ArrayRef<const uint8_t> l = lhs->compiled_method->GetCFIInfo();
+        ArrayRef<const uint8_t> r = rhs->compiled_method->GetCFIInfo();
+        return std::lexicographical_compare(l.begin(), l.end(), r.begin(), r.end());
+      });
+
+  // Write .eh_frame/.debug_frame section.
+  auto* cfi_section = (format == dwarf::DW_DEBUG_FRAME_FORMAT
+                       ? builder->GetDebugFrame()
+                       : builder->GetEhFrame());
+  {
+    cfi_section->Start();
+    const bool is64bit = Is64BitInstructionSet(builder->GetIsa());
+    const Elf_Addr text_address = builder->GetText()->Exists()
+        ? builder->GetText()->GetAddress()
+        : 0;
+    const Elf_Addr cfi_address = cfi_section->GetAddress();
+    const Elf_Addr cie_address = cfi_address;
+    Elf_Addr buffer_address = cfi_address;
+    std::vector<uint8_t> buffer;  // Small temporary buffer.
+    WriteCIE(builder->GetIsa(), format, &buffer);
+    cfi_section->WriteFully(buffer.data(), buffer.size());
+    buffer_address += buffer.size();
+    buffer.clear();
+    for (const MethodDebugInfo* mi : sorted_method_infos) {
+      if (!mi->deduped) {  // Only one FDE per unique address.
+        ArrayRef<const uint8_t> opcodes = mi->compiled_method->GetCFIInfo();
+        if (!opcodes.empty()) {
+          const Elf_Addr code_address = text_address + mi->low_pc;
+          if (format == dwarf::DW_EH_FRAME_FORMAT) {
+            binary_search_table.push_back(
+                dchecked_integral_cast<uint32_t>(code_address));
+            binary_search_table.push_back(
+                dchecked_integral_cast<uint32_t>(buffer_address));
+          }
+          WriteFDE(is64bit, cfi_address, cie_address,
+                   code_address, mi->high_pc - mi->low_pc,
+                   opcodes, format, buffer_address, &buffer,
+                   &patch_locations);
+          cfi_section->WriteFully(buffer.data(), buffer.size());
+          buffer_address += buffer.size();
+          buffer.clear();
+        }
+      }
+    }
+    cfi_section->End();
+  }
+
+  if (format == dwarf::DW_EH_FRAME_FORMAT) {
+    auto* header_section = builder->GetEhFrameHdr();
+    header_section->Start();
+    uint32_t header_address = dchecked_integral_cast<int32_t>(header_section->GetAddress());
+    // Write .eh_frame_hdr section.
+    std::vector<uint8_t> buffer;
+    dwarf::Writer<> header(&buffer);
+    header.PushUint8(1);  // Version.
+    // Encoding of .eh_frame pointer - libunwind does not honor datarel here,
+    // so we have to use pcrel which means relative to the pointer's location.
+    header.PushUint8(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
+    // Encoding of binary search table size.
+    header.PushUint8(dwarf::DW_EH_PE_udata4);
+    // Encoding of binary search table addresses - libunwind supports only this
+    // specific combination, which means relative to the start of .eh_frame_hdr.
+    header.PushUint8(dwarf::DW_EH_PE_datarel | dwarf::DW_EH_PE_sdata4);
+    // .eh_frame pointer
+    header.PushInt32(cfi_section->GetAddress() - (header_address + 4u));
+    // Binary search table size (number of entries).
+    header.PushUint32(dchecked_integral_cast<uint32_t>(binary_search_table.size()/2));
+    header_section->WriteFully(buffer.data(), buffer.size());
+    // Binary search table.
+    for (size_t i = 0; i < binary_search_table.size(); i++) {
+      // Make addresses section-relative since we know the header address now.
+      binary_search_table[i] -= header_address;
+    }
+    header_section->WriteFully(binary_search_table.data(), binary_search_table.size());
+    header_section->End();
+  } else {
+    if (write_oat_patches) {
+      builder->WritePatches(".debug_frame.oat_patches",
+                            ArrayRef<const uintptr_t>(patch_locations));
+    }
+  }
+}
+
+}  // namespace debug
+}  // namespace art
+
+#endif  // ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_
+
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
new file mode 100644
index 0000000..eed032f
--- /dev/null
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -0,0 +1,649 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_
+#define ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_
+
+#include <map>
+#include <unordered_set>
+#include <vector>
+
+#include "debug/dwarf/debug_abbrev_writer.h"
+#include "debug/dwarf/debug_info_entry_writer.h"
+#include "debug/elf_compilation_unit.h"
+#include "debug/elf_debug_loc_writer.h"
+#include "debug/method_debug_info.h"
+#include "dex_file-inl.h"
+#include "dex_file.h"
+#include "elf_builder.h"
+#include "linear_alloc.h"
+#include "mirror/array.h"
+#include "mirror/class-inl.h"
+#include "mirror/class.h"
+
+namespace art {
+namespace debug {
+
+typedef std::vector<DexFile::LocalInfo> LocalInfos;
+
+static void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) {
+  static_cast<LocalInfos*>(ctx)->push_back(entry);
+}
+
+static std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
+  std::vector<const char*> names;
+  if (mi->code_item != nullptr) {
+    const uint8_t* stream = mi->dex_file->GetDebugInfoStream(mi->code_item);
+    if (stream != nullptr) {
+      DecodeUnsignedLeb128(&stream);  // line.
+      uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
+      for (uint32_t i = 0; i < parameters_size; ++i) {
+        uint32_t id = DecodeUnsignedLeb128P1(&stream);
+        names.push_back(mi->dex_file->StringDataByIdx(id));
+      }
+    }
+  }
+  return names;
+}
+
+// Helper class to write .debug_info and its supporting sections.
+template<typename ElfTypes>
+class ElfDebugInfoWriter {
+  using Elf_Addr = typename ElfTypes::Addr;
+
+ public:
+  explicit ElfDebugInfoWriter(ElfBuilder<ElfTypes>* builder)
+      : builder_(builder),
+        debug_abbrev_(&debug_abbrev_buffer_) {
+  }
+
+  void Start() {
+    builder_->GetDebugInfo()->Start();
+  }
+
+  void End(bool write_oat_patches) {
+    builder_->GetDebugInfo()->End();
+    if (write_oat_patches) {
+      builder_->WritePatches(".debug_info.oat_patches",
+                             ArrayRef<const uintptr_t>(debug_info_patches_));
+    }
+    builder_->WriteSection(".debug_abbrev", &debug_abbrev_buffer_);
+    if (!debug_loc_.empty()) {
+      builder_->WriteSection(".debug_loc", &debug_loc_);
+    }
+    if (!debug_ranges_.empty()) {
+      builder_->WriteSection(".debug_ranges", &debug_ranges_);
+    }
+  }
+
+ private:
+  ElfBuilder<ElfTypes>* builder_;
+  std::vector<uintptr_t> debug_info_patches_;
+  std::vector<uint8_t> debug_abbrev_buffer_;
+  dwarf::DebugAbbrevWriter<> debug_abbrev_;
+  std::vector<uint8_t> debug_loc_;
+  std::vector<uint8_t> debug_ranges_;
+
+  std::unordered_set<const char*> defined_dex_classes_;  // For CHECKs only.
+
+  template<typename ElfTypes2>
+  friend class ElfCompilationUnitWriter;
+};
+
+// Helper class to write one compilation unit.
+// It holds helper methods and temporary state.
+template<typename ElfTypes>
+class ElfCompilationUnitWriter {
+  using Elf_Addr = typename ElfTypes::Addr;
+
+ public:
+  explicit ElfCompilationUnitWriter(ElfDebugInfoWriter<ElfTypes>* owner)
+    : owner_(owner),
+      info_(Is64BitInstructionSet(owner_->builder_->GetIsa()), &owner->debug_abbrev_) {
+  }
+
+  void Write(const ElfCompilationUnit& compilation_unit) {
+    CHECK(!compilation_unit.methods.empty());
+    const Elf_Addr text_address = owner_->builder_->GetText()->Exists()
+        ? owner_->builder_->GetText()->GetAddress()
+        : 0;
+    const uintptr_t cu_size = compilation_unit.high_pc - compilation_unit.low_pc;
+    using namespace dwarf;  // NOLINT. For easy access to DWARF constants.
+
+    info_.StartTag(DW_TAG_compile_unit);
+    info_.WriteString(DW_AT_producer, "Android dex2oat");
+    info_.WriteData1(DW_AT_language, DW_LANG_Java);
+    info_.WriteString(DW_AT_comp_dir, "$JAVA_SRC_ROOT");
+    info_.WriteAddr(DW_AT_low_pc, text_address + compilation_unit.low_pc);
+    info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(cu_size));
+    info_.WriteSecOffset(DW_AT_stmt_list, compilation_unit.debug_line_offset);
+
+    const char* last_dex_class_desc = nullptr;
+    for (auto mi : compilation_unit.methods) {
+      const DexFile* dex = mi->dex_file;
+      const DexFile::CodeItem* dex_code = mi->code_item;
+      const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index);
+      const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
+      const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
+      const char* dex_class_desc = dex->GetMethodDeclaringClassDescriptor(dex_method);
+      const bool is_static = (mi->access_flags & kAccStatic) != 0;
+
+      // Enclose the method in correct class definition.
+      if (last_dex_class_desc != dex_class_desc) {
+        if (last_dex_class_desc != nullptr) {
+          EndClassTag();
+        }
+        // Write reference tag for the class we are about to declare.
+        size_t reference_tag_offset = info_.StartTag(DW_TAG_reference_type);
+        type_cache_.emplace(std::string(dex_class_desc), reference_tag_offset);
+        size_t type_attrib_offset = info_.size();
+        info_.WriteRef4(DW_AT_type, 0);
+        info_.EndTag();
+        // Declare the class that owns this method.
+        size_t class_offset = StartClassTag(dex_class_desc);
+        info_.UpdateUint32(type_attrib_offset, class_offset);
+        info_.WriteFlagPresent(DW_AT_declaration);
+        // Check that each class is defined only once.
+        bool unique = owner_->defined_dex_classes_.insert(dex_class_desc).second;
+        CHECK(unique) << "Redefinition of " << dex_class_desc;
+        last_dex_class_desc = dex_class_desc;
+      }
+
+      int start_depth = info_.Depth();
+      info_.StartTag(DW_TAG_subprogram);
+      WriteName(dex->GetMethodName(dex_method));
+      info_.WriteAddr(DW_AT_low_pc, text_address + mi->low_pc);
+      info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(mi->high_pc-mi->low_pc));
+      std::vector<uint8_t> expr_buffer;
+      Expression expr(&expr_buffer);
+      expr.WriteOpCallFrameCfa();
+      info_.WriteExprLoc(DW_AT_frame_base, expr);
+      WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto));
+
+      // Write parameters. DecodeDebugLocalInfo returns them as well, but it does not
+      // guarantee order or uniqueness so it is safer to iterate over them manually.
+      // DecodeDebugLocalInfo might not also be available if there is no debug info.
+      std::vector<const char*> param_names = GetParamNames(mi);
+      uint32_t arg_reg = 0;
+      if (!is_static) {
+        info_.StartTag(DW_TAG_formal_parameter);
+        WriteName("this");
+        info_.WriteFlagPresent(DW_AT_artificial);
+        WriteLazyType(dex_class_desc);
+        if (dex_code != nullptr) {
+          // Write the stack location of the parameter.
+          const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg;
+          const bool is64bitValue = false;
+          WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc);
+        }
+        arg_reg++;
+        info_.EndTag();
+      }
+      if (dex_params != nullptr) {
+        for (uint32_t i = 0; i < dex_params->Size(); ++i) {
+          info_.StartTag(DW_TAG_formal_parameter);
+          // Parameter names may not be always available.
+          if (i < param_names.size()) {
+            WriteName(param_names[i]);
+          }
+          // Write the type.
+          const char* type_desc = dex->StringByTypeIdx(dex_params->GetTypeItem(i).type_idx_);
+          WriteLazyType(type_desc);
+          const bool is64bitValue = type_desc[0] == 'D' || type_desc[0] == 'J';
+          if (dex_code != nullptr) {
+            // Write the stack location of the parameter.
+            const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg;
+            WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc);
+          }
+          arg_reg += is64bitValue ? 2 : 1;
+          info_.EndTag();
+        }
+        if (dex_code != nullptr) {
+          DCHECK_EQ(arg_reg, dex_code->ins_size_);
+        }
+      }
+
+      // Write local variables.
+      LocalInfos local_infos;
+      if (dex->DecodeDebugLocalInfo(dex_code,
+                                    is_static,
+                                    mi->dex_method_index,
+                                    LocalInfoCallback,
+                                    &local_infos)) {
+        for (const DexFile::LocalInfo& var : local_infos) {
+          if (var.reg_ < dex_code->registers_size_ - dex_code->ins_size_) {
+            info_.StartTag(DW_TAG_variable);
+            WriteName(var.name_);
+            WriteLazyType(var.descriptor_);
+            bool is64bitValue = var.descriptor_[0] == 'D' || var.descriptor_[0] == 'J';
+            WriteRegLocation(mi, var.reg_, is64bitValue, compilation_unit.low_pc,
+                             var.start_address_, var.end_address_);
+            info_.EndTag();
+          }
+        }
+      }
+
+      info_.EndTag();
+      CHECK_EQ(info_.Depth(), start_depth);  // Balanced start/end.
+    }
+    if (last_dex_class_desc != nullptr) {
+      EndClassTag();
+    }
+    FinishLazyTypes();
+    CloseNamespacesAboveDepth(0);
+    info_.EndTag();  // DW_TAG_compile_unit
+    CHECK_EQ(info_.Depth(), 0);
+    std::vector<uint8_t> buffer;
+    buffer.reserve(info_.data()->size() + KB);
+    const size_t offset = owner_->builder_->GetDebugInfo()->GetSize();
+    // All compilation units share single table which is at the start of .debug_abbrev.
+    const size_t debug_abbrev_offset = 0;
+    WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_);
+    owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
+  }
+
+  void Write(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
+    using namespace dwarf;  // NOLINT. For easy access to DWARF constants.
+
+    info_.StartTag(DW_TAG_compile_unit);
+    info_.WriteString(DW_AT_producer, "Android dex2oat");
+    info_.WriteData1(DW_AT_language, DW_LANG_Java);
+
+    // Base class references to be patched at the end.
+    std::map<size_t, mirror::Class*> base_class_references;
+
+    // Already written declarations or definitions.
+    std::map<mirror::Class*, size_t> class_declarations;
+
+    std::vector<uint8_t> expr_buffer;
+    for (mirror::Class* type : types) {
+      if (type->IsPrimitive()) {
+        // For primitive types the definition and the declaration is the same.
+        if (type->GetPrimitiveType() != Primitive::kPrimVoid) {
+          WriteTypeDeclaration(type->GetDescriptor(nullptr));
+        }
+      } else if (type->IsArrayClass()) {
+        mirror::Class* element_type = type->GetComponentType();
+        uint32_t component_size = type->GetComponentSize();
+        uint32_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
+        uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
+
+        CloseNamespacesAboveDepth(0);  // Declare in root namespace.
+        info_.StartTag(DW_TAG_array_type);
+        std::string descriptor_string;
+        WriteLazyType(element_type->GetDescriptor(&descriptor_string));
+        WriteLinkageName(type);
+        info_.WriteUdata(DW_AT_data_member_location, data_offset);
+        info_.StartTag(DW_TAG_subrange_type);
+        Expression count_expr(&expr_buffer);
+        count_expr.WriteOpPushObjectAddress();
+        count_expr.WriteOpPlusUconst(length_offset);
+        count_expr.WriteOpDerefSize(4);  // Array length is always 32-bit wide.
+        info_.WriteExprLoc(DW_AT_count, count_expr);
+        info_.EndTag();  // DW_TAG_subrange_type.
+        info_.EndTag();  // DW_TAG_array_type.
+      } else if (type->IsInterface()) {
+        // Skip.  Variables cannot have an interface as a dynamic type.
+        // We do not expose the interface information to the debugger in any way.
+      } else {
+        std::string descriptor_string;
+        const char* desc = type->GetDescriptor(&descriptor_string);
+        size_t class_offset = StartClassTag(desc);
+        class_declarations.emplace(type, class_offset);
+
+        if (!type->IsVariableSize()) {
+          info_.WriteUdata(DW_AT_byte_size, type->GetObjectSize());
+        }
+
+        WriteLinkageName(type);
+
+        if (type->IsObjectClass()) {
+          // Generate artificial member which is used to get the dynamic type of variable.
+          // The run-time value of this field will correspond to linkage name of some type.
+          // We need to do it only once in j.l.Object since all other types inherit it.
+          info_.StartTag(DW_TAG_member);
+          WriteName(".dynamic_type");
+          WriteLazyType(sizeof(uintptr_t) == 8 ? "J" : "I");
+          info_.WriteFlagPresent(DW_AT_artificial);
+          // Create DWARF expression to get the value of the methods_ field.
+          Expression expr(&expr_buffer);
+          // The address of the object has been implicitly pushed on the stack.
+          // Dereference the klass_ field of Object (32-bit; possibly poisoned).
+          DCHECK_EQ(type->ClassOffset().Uint32Value(), 0u);
+          DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Class>), 4u);
+          expr.WriteOpDerefSize(4);
+          if (kPoisonHeapReferences) {
+            expr.WriteOpNeg();
+            // DWARF stack is pointer sized. Ensure that the high bits are clear.
+            expr.WriteOpConstu(0xFFFFFFFF);
+            expr.WriteOpAnd();
+          }
+          // Add offset to the methods_ field.
+          expr.WriteOpPlusUconst(mirror::Class::MethodsOffset().Uint32Value());
+          // Top of stack holds the location of the field now.
+          info_.WriteExprLoc(DW_AT_data_member_location, expr);
+          info_.EndTag();  // DW_TAG_member.
+        }
+
+        // Base class.
+        mirror::Class* base_class = type->GetSuperClass();
+        if (base_class != nullptr) {
+          info_.StartTag(DW_TAG_inheritance);
+          base_class_references.emplace(info_.size(), base_class);
+          info_.WriteRef4(DW_AT_type, 0);
+          info_.WriteUdata(DW_AT_data_member_location, 0);
+          info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
+          info_.EndTag();  // DW_TAG_inheritance.
+        }
+
+        // Member variables.
+        for (uint32_t i = 0, count = type->NumInstanceFields(); i < count; ++i) {
+          ArtField* field = type->GetInstanceField(i);
+          info_.StartTag(DW_TAG_member);
+          WriteName(field->GetName());
+          WriteLazyType(field->GetTypeDescriptor());
+          info_.WriteUdata(DW_AT_data_member_location, field->GetOffset().Uint32Value());
+          uint32_t access_flags = field->GetAccessFlags();
+          if (access_flags & kAccPublic) {
+            info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
+          } else if (access_flags & kAccProtected) {
+            info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_protected);
+          } else if (access_flags & kAccPrivate) {
+            info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private);
+          }
+          info_.EndTag();  // DW_TAG_member.
+        }
+
+        if (type->IsStringClass()) {
+          // Emit debug info about an artifical class member for java.lang.String which represents
+          // the first element of the data stored in a string instance. Consumers of the debug
+          // info will be able to read the content of java.lang.String based on the count (real
+          // field) and based on the location of this data member.
+          info_.StartTag(DW_TAG_member);
+          WriteName("value");
+          // We don't support fields with C like array types so we just say its type is java char.
+          WriteLazyType("C");  // char.
+          info_.WriteUdata(DW_AT_data_member_location,
+                           mirror::String::ValueOffset().Uint32Value());
+          info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private);
+          info_.EndTag();  // DW_TAG_member.
+        }
+
+        EndClassTag();
+      }
+    }
+
+    // Write base class declarations.
+    for (const auto& base_class_reference : base_class_references) {
+      size_t reference_offset = base_class_reference.first;
+      mirror::Class* base_class = base_class_reference.second;
+      const auto& it = class_declarations.find(base_class);
+      if (it != class_declarations.end()) {
+        info_.UpdateUint32(reference_offset, it->second);
+      } else {
+        // Declare base class.  We can not use the standard WriteLazyType
+        // since we want to avoid the DW_TAG_reference_tag wrapping.
+        std::string tmp_storage;
+        const char* base_class_desc = base_class->GetDescriptor(&tmp_storage);
+        size_t base_class_declaration_offset = StartClassTag(base_class_desc);
+        info_.WriteFlagPresent(DW_AT_declaration);
+        WriteLinkageName(base_class);
+        EndClassTag();
+        class_declarations.emplace(base_class, base_class_declaration_offset);
+        info_.UpdateUint32(reference_offset, base_class_declaration_offset);
+      }
+    }
+
+    FinishLazyTypes();
+    CloseNamespacesAboveDepth(0);
+    info_.EndTag();  // DW_TAG_compile_unit.
+    CHECK_EQ(info_.Depth(), 0);
+    std::vector<uint8_t> buffer;
+    buffer.reserve(info_.data()->size() + KB);
+    const size_t offset = owner_->builder_->GetDebugInfo()->GetSize();
+    // All compilation units share single table which is at the start of .debug_abbrev.
+    const size_t debug_abbrev_offset = 0;
+    WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_);
+    owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
+  }
+
+  // Write table into .debug_loc which describes location of dex register.
+  // The dex register might be valid only at some points and it might
+  // move between machine registers and stack.
+  void WriteRegLocation(const MethodDebugInfo* method_info,
+                        uint16_t vreg,
+                        bool is64bitValue,
+                        uint32_t compilation_unit_low_pc,
+                        uint32_t dex_pc_low = 0,
+                        uint32_t dex_pc_high = 0xFFFFFFFF) {
+    WriteDebugLocEntry(method_info,
+                       vreg,
+                       is64bitValue,
+                       compilation_unit_low_pc,
+                       dex_pc_low,
+                       dex_pc_high,
+                       owner_->builder_->GetIsa(),
+                       &info_,
+                       &owner_->debug_loc_,
+                       &owner_->debug_ranges_);
+  }
+
+  // Linkage name uniquely identifies type.
+  // It is used to determine the dynamic type of objects.
+  // We use the methods_ field of class since it is unique and it is not moved by the GC.
+  void WriteLinkageName(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
+    auto* methods_ptr = type->GetMethodsPtr();
+    if (methods_ptr == nullptr) {
+      // Some types might have no methods.  Allocate empty array instead.
+      LinearAlloc* allocator = Runtime::Current()->GetLinearAlloc();
+      void* storage = allocator->Alloc(Thread::Current(), sizeof(LengthPrefixedArray<ArtMethod>));
+      methods_ptr = new (storage) LengthPrefixedArray<ArtMethod>(0);
+      type->SetMethodsPtr(methods_ptr, 0, 0);
+      DCHECK(type->GetMethodsPtr() != nullptr);
+    }
+    char name[32];
+    snprintf(name, sizeof(name), "0x%" PRIXPTR, reinterpret_cast<uintptr_t>(methods_ptr));
+    info_.WriteString(dwarf::DW_AT_linkage_name, name);
+  }
+
+  // Some types are difficult to define as we go since they need
+  // to be enclosed in the right set of namespaces. Therefore we
+  // just define all types lazily at the end of compilation unit.
+  void WriteLazyType(const char* type_descriptor) {
+    if (type_descriptor != nullptr && type_descriptor[0] != 'V') {
+      lazy_types_.emplace(std::string(type_descriptor), info_.size());
+      info_.WriteRef4(dwarf::DW_AT_type, 0);
+    }
+  }
+
+  void FinishLazyTypes() {
+    for (const auto& lazy_type : lazy_types_) {
+      info_.UpdateUint32(lazy_type.second, WriteTypeDeclaration(lazy_type.first));
+    }
+    lazy_types_.clear();
+  }
+
+ private:
+  void WriteName(const char* name) {
+    if (name != nullptr) {
+      info_.WriteString(dwarf::DW_AT_name, name);
+    }
+  }
+
+  // Convert dex type descriptor to DWARF.
+  // Returns offset in the compilation unit.
+  size_t WriteTypeDeclaration(const std::string& desc) {
+    using namespace dwarf;  // NOLINT. For easy access to DWARF constants.
+
+    DCHECK(!desc.empty());
+    const auto& it = type_cache_.find(desc);
+    if (it != type_cache_.end()) {
+      return it->second;
+    }
+
+    size_t offset;
+    if (desc[0] == 'L') {
+      // Class type. For example: Lpackage/name;
+      size_t class_offset = StartClassTag(desc.c_str());
+      info_.WriteFlagPresent(DW_AT_declaration);
+      EndClassTag();
+      // Reference to the class type.
+      offset = info_.StartTag(DW_TAG_reference_type);
+      info_.WriteRef(DW_AT_type, class_offset);
+      info_.EndTag();
+    } else if (desc[0] == '[') {
+      // Array type.
+      size_t element_type = WriteTypeDeclaration(desc.substr(1));
+      CloseNamespacesAboveDepth(0);  // Declare in root namespace.
+      size_t array_type = info_.StartTag(DW_TAG_array_type);
+      info_.WriteFlagPresent(DW_AT_declaration);
+      info_.WriteRef(DW_AT_type, element_type);
+      info_.EndTag();
+      offset = info_.StartTag(DW_TAG_reference_type);
+      info_.WriteRef4(DW_AT_type, array_type);
+      info_.EndTag();
+    } else {
+      // Primitive types.
+      DCHECK_EQ(desc.size(), 1u);
+
+      const char* name;
+      uint32_t encoding;
+      uint32_t byte_size;
+      switch (desc[0]) {
+      case 'B':
+        name = "byte";
+        encoding = DW_ATE_signed;
+        byte_size = 1;
+        break;
+      case 'C':
+        name = "char";
+        encoding = DW_ATE_UTF;
+        byte_size = 2;
+        break;
+      case 'D':
+        name = "double";
+        encoding = DW_ATE_float;
+        byte_size = 8;
+        break;
+      case 'F':
+        name = "float";
+        encoding = DW_ATE_float;
+        byte_size = 4;
+        break;
+      case 'I':
+        name = "int";
+        encoding = DW_ATE_signed;
+        byte_size = 4;
+        break;
+      case 'J':
+        name = "long";
+        encoding = DW_ATE_signed;
+        byte_size = 8;
+        break;
+      case 'S':
+        name = "short";
+        encoding = DW_ATE_signed;
+        byte_size = 2;
+        break;
+      case 'Z':
+        name = "boolean";
+        encoding = DW_ATE_boolean;
+        byte_size = 1;
+        break;
+      case 'V':
+        LOG(FATAL) << "Void type should not be encoded";
+        UNREACHABLE();
+      default:
+        LOG(FATAL) << "Unknown dex type descriptor: \"" << desc << "\"";
+        UNREACHABLE();
+      }
+      CloseNamespacesAboveDepth(0);  // Declare in root namespace.
+      offset = info_.StartTag(DW_TAG_base_type);
+      WriteName(name);
+      info_.WriteData1(DW_AT_encoding, encoding);
+      info_.WriteData1(DW_AT_byte_size, byte_size);
+      info_.EndTag();
+    }
+
+    type_cache_.emplace(desc, offset);
+    return offset;
+  }
+
+  // Start DW_TAG_class_type tag nested in DW_TAG_namespace tags.
+  // Returns offset of the class tag in the compilation unit.
+  size_t StartClassTag(const char* desc) {
+    std::string name = SetNamespaceForClass(desc);
+    size_t offset = info_.StartTag(dwarf::DW_TAG_class_type);
+    WriteName(name.c_str());
+    return offset;
+  }
+
+  void EndClassTag() {
+    info_.EndTag();
+  }
+
+  // Set the current namespace nesting to one required by the given class.
+  // Returns the class name with namespaces, 'L', and ';' stripped.
+  std::string SetNamespaceForClass(const char* desc) {
+    DCHECK(desc != nullptr && desc[0] == 'L');
+    desc++;  // Skip the initial 'L'.
+    size_t depth = 0;
+    for (const char* end; (end = strchr(desc, '/')) != nullptr; desc = end + 1, ++depth) {
+      // Check whether the name at this depth is already what we need.
+      if (depth < current_namespace_.size()) {
+        const std::string& name = current_namespace_[depth];
+        if (name.compare(0, name.size(), desc, end - desc) == 0) {
+          continue;
+        }
+      }
+      // Otherwise we need to open a new namespace tag at this depth.
+      CloseNamespacesAboveDepth(depth);
+      info_.StartTag(dwarf::DW_TAG_namespace);
+      std::string name(desc, end - desc);
+      WriteName(name.c_str());
+      current_namespace_.push_back(std::move(name));
+    }
+    CloseNamespacesAboveDepth(depth);
+    return std::string(desc, strchr(desc, ';') - desc);
+  }
+
+  // Close namespace tags to reach the given nesting depth.
+  void CloseNamespacesAboveDepth(size_t depth) {
+    DCHECK_LE(depth, current_namespace_.size());
+    while (current_namespace_.size() > depth) {
+      info_.EndTag();
+      current_namespace_.pop_back();
+    }
+  }
+
+  // For access to the ELF sections.
+  ElfDebugInfoWriter<ElfTypes>* owner_;
+  // Temporary buffer to create and store the entries.
+  dwarf::DebugInfoEntryWriter<> info_;
+  // Cache of already translated type descriptors.
+  std::map<std::string, size_t> type_cache_;  // type_desc -> definition_offset.
+  // 32-bit references which need to be resolved to a type later.
+  // Given type may be used multiple times.  Therefore we need a multimap.
+  std::multimap<std::string, size_t> lazy_types_;  // type_desc -> patch_offset.
+  // The current set of open namespace tags which are active and not closed yet.
+  std::vector<std::string> current_namespace_;
+};
+
+}  // namespace debug
+}  // namespace art
+
+#endif  // ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_
+
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
new file mode 100644
index 0000000..ac0f4ca
--- /dev/null
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_
+#define ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_
+
+#include <vector>
+
+#include "compiled_method.h"
+#include "debug/dwarf/debug_line_opcode_writer.h"
+#include "debug/dwarf/headers.h"
+#include "debug/elf_compilation_unit.h"
+#include "dex_file-inl.h"
+#include "dex_file.h"
+#include "elf_builder.h"
+#include "stack_map.h"
+
+namespace art {
+namespace debug {
+
+typedef std::vector<DexFile::PositionInfo> PositionInfos;
+
+static bool PositionInfoCallback(void* ctx, const DexFile::PositionInfo& entry) {
+  static_cast<PositionInfos*>(ctx)->push_back(entry);
+  return false;
+}
+
+template<typename ElfTypes>
+class ElfDebugLineWriter {
+  using Elf_Addr = typename ElfTypes::Addr;
+
+ public:
+  explicit ElfDebugLineWriter(ElfBuilder<ElfTypes>* builder) : builder_(builder) {
+  }
+
+  void Start() {
+    builder_->GetDebugLine()->Start();
+  }
+
+  // Write line table for given set of methods.
+  // Returns the number of bytes written.
+  size_t WriteCompilationUnit(ElfCompilationUnit& compilation_unit) {
+    const bool is64bit = Is64BitInstructionSet(builder_->GetIsa());
+    const Elf_Addr text_address = builder_->GetText()->Exists()
+        ? builder_->GetText()->GetAddress()
+        : 0;
+
+    compilation_unit.debug_line_offset = builder_->GetDebugLine()->GetSize();
+
+    std::vector<dwarf::FileEntry> files;
+    std::unordered_map<std::string, size_t> files_map;
+    std::vector<std::string> directories;
+    std::unordered_map<std::string, size_t> directories_map;
+    int code_factor_bits_ = 0;
+    int dwarf_isa = -1;
+    switch (builder_->GetIsa()) {
+      case kArm:  // arm actually means thumb2.
+      case kThumb2:
+        code_factor_bits_ = 1;  // 16-bit instuctions
+        dwarf_isa = 1;  // DW_ISA_ARM_thumb.
+        break;
+      case kArm64:
+      case kMips:
+      case kMips64:
+        code_factor_bits_ = 2;  // 32-bit instructions
+        break;
+      case kNone:
+      case kX86:
+      case kX86_64:
+        break;
+    }
+    dwarf::DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits_);
+    for (const MethodDebugInfo* mi : compilation_unit.methods) {
+      // Ignore function if we have already generated line table for the same address.
+      // It would confuse the debugger and the DWARF specification forbids it.
+      if (mi->deduped) {
+        continue;
+      }
+
+      ArrayRef<const SrcMapElem> src_mapping_table;
+      std::vector<SrcMapElem> src_mapping_table_from_stack_maps;
+      if (mi->IsFromOptimizingCompiler()) {
+        // Use stack maps to create mapping table from pc to dex.
+        const CodeInfo code_info(mi->compiled_method->GetVmapTable().data());
+        const StackMapEncoding encoding = code_info.ExtractEncoding();
+        for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
+          StackMap stack_map = code_info.GetStackMapAt(s, encoding);
+          DCHECK(stack_map.IsValid());
+          // Emit only locations where we have local-variable information.
+          // In particular, skip mappings inside the prologue.
+          if (stack_map.HasDexRegisterMap(encoding)) {
+            const uint32_t pc = stack_map.GetNativePcOffset(encoding);
+            const int32_t dex = stack_map.GetDexPc(encoding);
+            src_mapping_table_from_stack_maps.push_back({pc, dex});
+          }
+        }
+        std::sort(src_mapping_table_from_stack_maps.begin(),
+                  src_mapping_table_from_stack_maps.end());
+        src_mapping_table = ArrayRef<const SrcMapElem>(src_mapping_table_from_stack_maps);
+      } else {
+        // Use the mapping table provided by the quick compiler.
+        src_mapping_table = mi->compiled_method->GetSrcMappingTable();
+      }
+
+      if (src_mapping_table.empty()) {
+        continue;
+      }
+
+      Elf_Addr method_address = text_address + mi->low_pc;
+
+      PositionInfos position_infos;
+      const DexFile* dex = mi->dex_file;
+      if (!dex->DecodeDebugPositionInfo(mi->code_item, PositionInfoCallback, &position_infos)) {
+        continue;
+      }
+
+      if (position_infos.empty()) {
+        continue;
+      }
+
+      opcodes.SetAddress(method_address);
+      if (dwarf_isa != -1) {
+        opcodes.SetISA(dwarf_isa);
+      }
+
+      // Get and deduplicate directory and filename.
+      int file_index = 0;  // 0 - primary source file of the compilation.
+      auto& dex_class_def = dex->GetClassDef(mi->class_def_index);
+      const char* source_file = dex->GetSourceFile(dex_class_def);
+      if (source_file != nullptr) {
+        std::string file_name(source_file);
+        size_t file_name_slash = file_name.find_last_of('/');
+        std::string class_name(dex->GetClassDescriptor(dex_class_def));
+        size_t class_name_slash = class_name.find_last_of('/');
+        std::string full_path(file_name);
+
+        // Guess directory from package name.
+        int directory_index = 0;  // 0 - current directory of the compilation.
+        if (file_name_slash == std::string::npos &&  // Just filename.
+            class_name.front() == 'L' &&  // Type descriptor for a class.
+            class_name_slash != std::string::npos) {  // Has package name.
+          std::string package_name = class_name.substr(1, class_name_slash - 1);
+          auto it = directories_map.find(package_name);
+          if (it == directories_map.end()) {
+            directory_index = 1 + directories.size();
+            directories_map.emplace(package_name, directory_index);
+            directories.push_back(package_name);
+          } else {
+            directory_index = it->second;
+          }
+          full_path = package_name + "/" + file_name;
+        }
+
+        // Add file entry.
+        auto it2 = files_map.find(full_path);
+        if (it2 == files_map.end()) {
+          file_index = 1 + files.size();
+          files_map.emplace(full_path, file_index);
+          files.push_back(dwarf::FileEntry {
+            file_name,
+            directory_index,
+            0,  // Modification time - NA.
+            0,  // File size - NA.
+          });
+        } else {
+          file_index = it2->second;
+        }
+      }
+      opcodes.SetFile(file_index);
+
+      // Generate mapping opcodes from PC to Java lines.
+      if (file_index != 0) {
+        bool first = true;
+        for (SrcMapElem pc2dex : src_mapping_table) {
+          uint32_t pc = pc2dex.from_;
+          int dex_pc = pc2dex.to_;
+          // Find mapping with address with is greater than our dex pc; then go back one step.
+          auto ub = std::upper_bound(position_infos.begin(), position_infos.end(), dex_pc,
+              [](uint32_t address, const DexFile::PositionInfo& entry) {
+                  return address < entry.address_;
+              });
+          if (ub != position_infos.begin()) {
+            int line = (--ub)->line_;
+            if (first) {
+              first = false;
+              if (pc > 0) {
+                // Assume that any preceding code is prologue.
+                int first_line = position_infos.front().line_;
+                // Prologue is not a sensible place for a breakpoint.
+                opcodes.NegateStmt();
+                opcodes.AddRow(method_address, first_line);
+                opcodes.NegateStmt();
+                opcodes.SetPrologueEnd();
+              }
+              opcodes.AddRow(method_address + pc, line);
+            } else if (line != opcodes.CurrentLine()) {
+              opcodes.AddRow(method_address + pc, line);
+            }
+          }
+        }
+      } else {
+        // line 0 - instruction cannot be attributed to any source line.
+        opcodes.AddRow(method_address, 0);
+      }
+
+      opcodes.AdvancePC(text_address + mi->high_pc);
+      opcodes.EndSequence();
+    }
+    std::vector<uint8_t> buffer;
+    buffer.reserve(opcodes.data()->size() + KB);
+    size_t offset = builder_->GetDebugLine()->GetSize();
+    WriteDebugLineTable(directories, files, opcodes, offset, &buffer, &debug_line_patches_);
+    builder_->GetDebugLine()->WriteFully(buffer.data(), buffer.size());
+    return buffer.size();
+  }
+
+  void End(bool write_oat_patches) {
+    builder_->GetDebugLine()->End();
+    if (write_oat_patches) {
+      builder_->WritePatches(".debug_line.oat_patches",
+                             ArrayRef<const uintptr_t>(debug_line_patches_));
+    }
+  }
+
+ private:
+  ElfBuilder<ElfTypes>* builder_;
+  std::vector<uintptr_t> debug_line_patches_;
+};
+
+}  // namespace debug
+}  // namespace art
+
+#endif  // ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_
+
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
new file mode 100644
index 0000000..fd7f949
--- /dev/null
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_
+#define ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_
+
+#include <map>
+
+#include "arch/instruction_set.h"
+#include "compiled_method.h"
+#include "debug/dwarf/debug_info_entry_writer.h"
+#include "debug/dwarf/register.h"
+#include "debug/method_debug_info.h"
+#include "stack_map.h"
+
+namespace art {
+namespace debug {
+using Reg = dwarf::Reg;
+
+static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) {
+  switch (isa) {
+    case kArm:
+    case kThumb2:
+      return Reg::ArmCore(machine_reg);
+    case kArm64:
+      return Reg::Arm64Core(machine_reg);
+    case kX86:
+      return Reg::X86Core(machine_reg);
+    case kX86_64:
+      return Reg::X86_64Core(machine_reg);
+    case kMips:
+      return Reg::MipsCore(machine_reg);
+    case kMips64:
+      return Reg::Mips64Core(machine_reg);
+    case kNone:
+      LOG(FATAL) << "No instruction set";
+  }
+  UNREACHABLE();
+}
+
+static Reg GetDwarfFpReg(InstructionSet isa, int machine_reg) {
+  switch (isa) {
+    case kArm:
+    case kThumb2:
+      return Reg::ArmFp(machine_reg);
+    case kArm64:
+      return Reg::Arm64Fp(machine_reg);
+    case kX86:
+      return Reg::X86Fp(machine_reg);
+    case kX86_64:
+      return Reg::X86_64Fp(machine_reg);
+    case kMips:
+      return Reg::MipsFp(machine_reg);
+    case kMips64:
+      return Reg::Mips64Fp(machine_reg);
+    case kNone:
+      LOG(FATAL) << "No instruction set";
+  }
+  UNREACHABLE();
+}
+
+struct VariableLocation {
+  uint32_t low_pc;
+  uint32_t high_pc;
+  DexRegisterLocation reg_lo;  // May be None if the location is unknown.
+  DexRegisterLocation reg_hi;  // Most significant bits of 64-bit value.
+};
+
+// Get the location of given dex register (e.g. stack or machine register).
+// Note that the location might be different based on the current pc.
+// The result will cover all ranges where the variable is in scope.
+std::vector<VariableLocation> GetVariableLocations(const MethodDebugInfo* method_info,
+                                                   uint16_t vreg,
+                                                   bool is64bitValue,
+                                                   uint32_t dex_pc_low,
+                                                   uint32_t dex_pc_high) {
+  std::vector<VariableLocation> variable_locations;
+
+  // Get stack maps sorted by pc (they might not be sorted internally).
+  const CodeInfo code_info(method_info->compiled_method->GetVmapTable().data());
+  const StackMapEncoding encoding = code_info.ExtractEncoding();
+  std::map<uint32_t, StackMap> stack_maps;
+  for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
+    StackMap stack_map = code_info.GetStackMapAt(s, encoding);
+    DCHECK(stack_map.IsValid());
+    const uint32_t low_pc = method_info->low_pc + stack_map.GetNativePcOffset(encoding);
+    DCHECK_LE(low_pc, method_info->high_pc);
+    stack_maps.emplace(low_pc, stack_map);
+  }
+
+  // Create entries for the requested register based on stack map data.
+  for (auto it = stack_maps.begin(); it != stack_maps.end(); it++) {
+    const StackMap& stack_map = it->second;
+    const uint32_t low_pc = it->first;
+    auto next_it = it;
+    next_it++;
+    const uint32_t high_pc = next_it != stack_maps.end() ? next_it->first
+                                                         : method_info->high_pc;
+    DCHECK_LE(low_pc, high_pc);
+    if (low_pc == high_pc) {
+      continue;  // Ignore if the address range is empty.
+    }
+
+    // Check that the stack map is in the requested range.
+    uint32_t dex_pc = stack_map.GetDexPc(encoding);
+    if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) {
+      continue;
+    }
+
+    // Find the location of the dex register.
+    DexRegisterLocation reg_lo = DexRegisterLocation::None();
+    DexRegisterLocation reg_hi = DexRegisterLocation::None();
+    if (stack_map.HasDexRegisterMap(encoding)) {
+      DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
+          stack_map, encoding, method_info->code_item->registers_size_);
+      reg_lo = dex_register_map.GetDexRegisterLocation(
+          vreg, method_info->code_item->registers_size_, code_info, encoding);
+      if (is64bitValue) {
+        reg_hi = dex_register_map.GetDexRegisterLocation(
+            vreg + 1, method_info->code_item->registers_size_, code_info, encoding);
+      }
+    }
+
+    // Add location entry for this address range.
+    if (!variable_locations.empty() &&
+        variable_locations.back().reg_lo == reg_lo &&
+        variable_locations.back().reg_hi == reg_hi &&
+        variable_locations.back().high_pc == low_pc) {
+      // Merge with the previous entry (extend its range).
+      variable_locations.back().high_pc = high_pc;
+    } else {
+      variable_locations.push_back({low_pc, high_pc, reg_lo, reg_hi});
+    }
+  }
+
+  return variable_locations;
+}
+
+// Write table into .debug_loc which describes location of dex register.
+// The dex register might be valid only at some points and it might
+// move between machine registers and stack.
+static void WriteDebugLocEntry(const MethodDebugInfo* method_info,
+                               uint16_t vreg,
+                               bool is64bitValue,
+                               uint32_t compilation_unit_low_pc,
+                               uint32_t dex_pc_low,
+                               uint32_t dex_pc_high,
+                               InstructionSet isa,
+                               dwarf::DebugInfoEntryWriter<>* debug_info,
+                               std::vector<uint8_t>* debug_loc_buffer,
+                               std::vector<uint8_t>* debug_ranges_buffer) {
+  using Kind = DexRegisterLocation::Kind;
+  if (!method_info->IsFromOptimizingCompiler()) {
+    return;
+  }
+
+  dwarf::Writer<> debug_loc(debug_loc_buffer);
+  dwarf::Writer<> debug_ranges(debug_ranges_buffer);
+  debug_info->WriteSecOffset(dwarf::DW_AT_location, debug_loc.size());
+  debug_info->WriteSecOffset(dwarf::DW_AT_start_scope, debug_ranges.size());
+
+  std::vector<VariableLocation> variable_locations = GetVariableLocations(
+      method_info,
+      vreg,
+      is64bitValue,
+      dex_pc_low,
+      dex_pc_high);
+
+  // Write .debug_loc entries.
+  const bool is64bit = Is64BitInstructionSet(isa);
+  std::vector<uint8_t> expr_buffer;
+  for (const VariableLocation& variable_location : variable_locations) {
+    // Translate dex register location to DWARF expression.
+    // Note that 64-bit value might be split to two distinct locations.
+    // (for example, two 32-bit machine registers, or even stack and register)
+    dwarf::Expression expr(&expr_buffer);
+    DexRegisterLocation reg_lo = variable_location.reg_lo;
+    DexRegisterLocation reg_hi = variable_location.reg_hi;
+    for (int piece = 0; piece < (is64bitValue ? 2 : 1); piece++) {
+      DexRegisterLocation reg_loc = (piece == 0 ? reg_lo : reg_hi);
+      const Kind kind = reg_loc.GetKind();
+      const int32_t value = reg_loc.GetValue();
+      if (kind == Kind::kInStack) {
+        const size_t frame_size = method_info->compiled_method->GetFrameSizeInBytes();
+        // The stack offset is relative to SP. Make it relative to CFA.
+        expr.WriteOpFbreg(value - frame_size);
+        if (piece == 0 && reg_hi.GetKind() == Kind::kInStack &&
+            reg_hi.GetValue() == value + 4) {
+          break;  // the high word is correctly implied by the low word.
+        }
+      } else if (kind == Kind::kInRegister) {
+        expr.WriteOpReg(GetDwarfCoreReg(isa, value).num());
+        if (piece == 0 && reg_hi.GetKind() == Kind::kInRegisterHigh &&
+            reg_hi.GetValue() == value) {
+          break;  // the high word is correctly implied by the low word.
+        }
+      } else if (kind == Kind::kInFpuRegister) {
+        if ((isa == kArm || isa == kThumb2) &&
+            piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegister &&
+            reg_hi.GetValue() == value + 1 && value % 2 == 0) {
+          // Translate S register pair to D register (e.g. S4+S5 to D2).
+          expr.WriteOpReg(Reg::ArmDp(value / 2).num());
+          break;
+        }
+        expr.WriteOpReg(GetDwarfFpReg(isa, value).num());
+        if (piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegisterHigh &&
+            reg_hi.GetValue() == reg_lo.GetValue()) {
+          break;  // the high word is correctly implied by the low word.
+        }
+      } else if (kind == Kind::kConstant) {
+        expr.WriteOpConsts(value);
+        expr.WriteOpStackValue();
+      } else if (kind == Kind::kNone) {
+        break;
+      } else {
+        // kInStackLargeOffset and kConstantLargeValue are hidden by GetKind().
+        // kInRegisterHigh and kInFpuRegisterHigh should be handled by
+        // the special cases above and they should not occur alone.
+        LOG(ERROR) << "Unexpected register location kind: "
+                   << DexRegisterLocation::PrettyDescriptor(kind);
+        break;
+      }
+      if (is64bitValue) {
+        // Write the marker which is needed by split 64-bit values.
+        // This code is skipped by the special cases.
+        expr.WriteOpPiece(4);
+      }
+    }
+
+    if (expr.size() > 0) {
+      if (is64bit) {
+        debug_loc.PushUint64(variable_location.low_pc - compilation_unit_low_pc);
+        debug_loc.PushUint64(variable_location.high_pc - compilation_unit_low_pc);
+      } else {
+        debug_loc.PushUint32(variable_location.low_pc - compilation_unit_low_pc);
+        debug_loc.PushUint32(variable_location.high_pc - compilation_unit_low_pc);
+      }
+      // Write the expression.
+      debug_loc.PushUint16(expr.size());
+      debug_loc.PushData(expr.data());
+    } else {
+      // Do not generate .debug_loc if the location is not known.
+    }
+  }
+  // Write end-of-list entry.
+  if (is64bit) {
+    debug_loc.PushUint64(0);
+    debug_loc.PushUint64(0);
+  } else {
+    debug_loc.PushUint32(0);
+    debug_loc.PushUint32(0);
+  }
+
+  // Write .debug_ranges entries.
+  // This includes ranges where the variable is in scope but the location is not known.
+  for (size_t i = 0; i < variable_locations.size(); i++) {
+    uint32_t low_pc = variable_locations[i].low_pc;
+    uint32_t high_pc = variable_locations[i].high_pc;
+    while (i + 1 < variable_locations.size() && variable_locations[i+1].low_pc == high_pc) {
+      // Merge address range with the next entry.
+      high_pc = variable_locations[++i].high_pc;
+    }
+    if (is64bit) {
+      debug_ranges.PushUint64(low_pc - compilation_unit_low_pc);
+      debug_ranges.PushUint64(high_pc - compilation_unit_low_pc);
+    } else {
+      debug_ranges.PushUint32(low_pc - compilation_unit_low_pc);
+      debug_ranges.PushUint32(high_pc - compilation_unit_low_pc);
+    }
+  }
+  // Write end-of-list entry.
+  if (is64bit) {
+    debug_ranges.PushUint64(0);
+    debug_ranges.PushUint64(0);
+  } else {
+    debug_ranges.PushUint32(0);
+    debug_ranges.PushUint32(0);
+  }
+}
+
+}  // namespace debug
+}  // namespace art
+
+#endif  // ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_
+
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
new file mode 100644
index 0000000..01bd679
--- /dev/null
+++ b/compiler/debug/elf_debug_writer.cc
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "elf_debug_writer.h"
+
+#include <vector>
+
+#include "debug/dwarf/dwarf_constants.h"
+#include "debug/elf_compilation_unit.h"
+#include "debug/elf_debug_frame_writer.h"
+#include "debug/elf_debug_info_writer.h"
+#include "debug/elf_debug_line_writer.h"
+#include "debug/elf_debug_loc_writer.h"
+#include "debug/elf_gnu_debugdata_writer.h"
+#include "debug/elf_symtab_writer.h"
+#include "debug/method_debug_info.h"
+#include "elf_builder.h"
+#include "linker/vector_output_stream.h"
+#include "utils/array_ref.h"
+
+namespace art {
+namespace debug {
+
+template <typename ElfTypes>
+void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
+                    const ArrayRef<const MethodDebugInfo>& method_infos,
+                    dwarf::CFIFormat cfi_format,
+                    bool write_oat_patches) {
+  // Add methods to .symtab.
+  WriteDebugSymbols(builder, method_infos, true /* with_signature */);
+  // Generate CFI (stack unwinding information).
+  WriteCFISection(builder, method_infos, cfi_format, write_oat_patches);
+  // Write DWARF .debug_* sections.
+  WriteDebugSections(builder, method_infos, write_oat_patches);
+}
+
+template<typename ElfTypes>
+static void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
+                               const ArrayRef<const MethodDebugInfo>& method_infos,
+                               bool write_oat_patches) {
+  // Group the methods into compilation units based on source file.
+  std::vector<ElfCompilationUnit> compilation_units;
+  const char* last_source_file = nullptr;
+  for (const MethodDebugInfo& mi : method_infos) {
+    auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index);
+    const char* source_file = mi.dex_file->GetSourceFile(dex_class_def);
+    if (compilation_units.empty() || source_file != last_source_file) {
+      compilation_units.push_back(ElfCompilationUnit());
+    }
+    ElfCompilationUnit& cu = compilation_units.back();
+    cu.methods.push_back(&mi);
+    cu.low_pc = std::min(cu.low_pc, mi.low_pc);
+    cu.high_pc = std::max(cu.high_pc, mi.high_pc);
+    last_source_file = source_file;
+  }
+
+  // Write .debug_line section.
+  if (!compilation_units.empty()) {
+    ElfDebugLineWriter<ElfTypes> line_writer(builder);
+    line_writer.Start();
+    for (auto& compilation_unit : compilation_units) {
+      line_writer.WriteCompilationUnit(compilation_unit);
+    }
+    line_writer.End(write_oat_patches);
+  }
+
+  // Write .debug_info section.
+  if (!compilation_units.empty()) {
+    ElfDebugInfoWriter<ElfTypes> info_writer(builder);
+    info_writer.Start();
+    for (const auto& compilation_unit : compilation_units) {
+      ElfCompilationUnitWriter<ElfTypes> cu_writer(&info_writer);
+      cu_writer.Write(compilation_unit);
+    }
+    info_writer.End(write_oat_patches);
+  }
+}
+
+std::vector<uint8_t> MakeMiniDebugInfo(
+    InstructionSet isa,
+    size_t rodata_size,
+    size_t text_size,
+    const ArrayRef<const MethodDebugInfo>& method_infos) {
+  if (Is64BitInstructionSet(isa)) {
+    return MakeMiniDebugInfoInternal<ElfTypes64>(isa, rodata_size, text_size, method_infos);
+  } else {
+    return MakeMiniDebugInfoInternal<ElfTypes32>(isa, rodata_size, text_size, method_infos);
+  }
+}
+
+template <typename ElfTypes>
+static ArrayRef<const uint8_t> WriteDebugElfFileForMethodInternal(
+    const MethodDebugInfo& method_info) {
+  const InstructionSet isa = method_info.compiled_method->GetInstructionSet();
+  std::vector<uint8_t> buffer;
+  buffer.reserve(KB);
+  VectorOutputStream out("Debug ELF file", &buffer);
+  std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
+  // No program headers since the ELF file is not linked and has no allocated sections.
+  builder->Start(false /* write_program_headers */);
+  WriteDebugInfo(builder.get(),
+                 ArrayRef<const MethodDebugInfo>(&method_info, 1),
+                 dwarf::DW_DEBUG_FRAME_FORMAT,
+                 false /* write_oat_patches */);
+  builder->End();
+  CHECK(builder->Good());
+  // Make a copy of the buffer.  We want to shrink it anyway.
+  uint8_t* result = new uint8_t[buffer.size()];
+  CHECK(result != nullptr);
+  memcpy(result, buffer.data(), buffer.size());
+  return ArrayRef<const uint8_t>(result, buffer.size());
+}
+
+ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const MethodDebugInfo& method_info) {
+  const InstructionSet isa = method_info.compiled_method->GetInstructionSet();
+  if (Is64BitInstructionSet(isa)) {
+    return WriteDebugElfFileForMethodInternal<ElfTypes64>(method_info);
+  } else {
+    return WriteDebugElfFileForMethodInternal<ElfTypes32>(method_info);
+  }
+}
+
+template <typename ElfTypes>
+static ArrayRef<const uint8_t> WriteDebugElfFileForClassesInternal(
+    const InstructionSet isa, const ArrayRef<mirror::Class*>& types)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  std::vector<uint8_t> buffer;
+  buffer.reserve(KB);
+  VectorOutputStream out("Debug ELF file", &buffer);
+  std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
+  // No program headers since the ELF file is not linked and has no allocated sections.
+  builder->Start(false /* write_program_headers */);
+  ElfDebugInfoWriter<ElfTypes> info_writer(builder.get());
+  info_writer.Start();
+  ElfCompilationUnitWriter<ElfTypes> cu_writer(&info_writer);
+  cu_writer.Write(types);
+  info_writer.End(false /* write_oat_patches */);
+
+  builder->End();
+  CHECK(builder->Good());
+  // Make a copy of the buffer.  We want to shrink it anyway.
+  uint8_t* result = new uint8_t[buffer.size()];
+  CHECK(result != nullptr);
+  memcpy(result, buffer.data(), buffer.size());
+  return ArrayRef<const uint8_t>(result, buffer.size());
+}
+
+ArrayRef<const uint8_t> WriteDebugElfFileForClasses(const InstructionSet isa,
+                                                    const ArrayRef<mirror::Class*>& types) {
+  if (Is64BitInstructionSet(isa)) {
+    return WriteDebugElfFileForClassesInternal<ElfTypes64>(isa, types);
+  } else {
+    return WriteDebugElfFileForClassesInternal<ElfTypes32>(isa, types);
+  }
+}
+
+// Explicit instantiations
+template void WriteDebugInfo<ElfTypes32>(
+    ElfBuilder<ElfTypes32>* builder,
+    const ArrayRef<const MethodDebugInfo>& method_infos,
+    dwarf::CFIFormat cfi_format,
+    bool write_oat_patches);
+template void WriteDebugInfo<ElfTypes64>(
+    ElfBuilder<ElfTypes64>* builder,
+    const ArrayRef<const MethodDebugInfo>& method_infos,
+    dwarf::CFIFormat cfi_format,
+    bool write_oat_patches);
+
+}  // namespace debug
+}  // namespace art
diff --git a/compiler/elf_writer_debug.h b/compiler/debug/elf_debug_writer.h
similarity index 60%
rename from compiler/elf_writer_debug.h
rename to compiler/debug/elf_debug_writer.h
index e19da08..103b501 100644
--- a/compiler/elf_writer_debug.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,12 +14,12 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_ELF_WRITER_DEBUG_H_
-#define ART_COMPILER_ELF_WRITER_DEBUG_H_
+#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_
+#define ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_
 
 #include "base/macros.h"
 #include "base/mutex.h"
-#include "dwarf/dwarf_constants.h"
+#include "debug/dwarf/dwarf_constants.h"
 #include "elf_builder.h"
 #include "utils/array_ref.h"
 
@@ -27,25 +27,27 @@
 namespace mirror {
 class Class;
 }
-namespace dwarf {
+namespace debug {
 struct MethodDebugInfo;
 
 template <typename ElfTypes>
 void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
                     const ArrayRef<const MethodDebugInfo>& method_infos,
-                    CFIFormat cfi_format);
+                    dwarf::CFIFormat cfi_format,
+                    bool write_oat_patches);
 
-template <typename ElfTypes>
-void WriteMiniDebugInfo(ElfBuilder<ElfTypes>* builder,
-                        const ArrayRef<const MethodDebugInfo>& method_infos);
+std::vector<uint8_t> MakeMiniDebugInfo(InstructionSet isa,
+                                       size_t rodata_section_size,
+                                       size_t text_section_size,
+                                       const ArrayRef<const MethodDebugInfo>& method_infos);
 
-ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const dwarf::MethodDebugInfo& method_info);
+ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const MethodDebugInfo& method_info);
 
 ArrayRef<const uint8_t> WriteDebugElfFileForClasses(const InstructionSet isa,
                                                     const ArrayRef<mirror::Class*>& types)
     SHARED_REQUIRES(Locks::mutator_lock_);
 
-}  // namespace dwarf
+}  // namespace debug
 }  // namespace art
 
-#endif  // ART_COMPILER_ELF_WRITER_DEBUG_H_
+#endif  // ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_
diff --git a/compiler/debug/elf_gnu_debugdata_writer.h b/compiler/debug/elf_gnu_debugdata_writer.h
new file mode 100644
index 0000000..5c7d1c7
--- /dev/null
+++ b/compiler/debug/elf_gnu_debugdata_writer.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_ELF_GNU_DEBUGDATA_WRITER_H_
+#define ART_COMPILER_DEBUG_ELF_GNU_DEBUGDATA_WRITER_H_
+
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "elf_builder.h"
+#include "linker/vector_output_stream.h"
+
+// liblzma.
+#include "7zCrc.h"
+#include "XzCrc64.h"
+#include "XzEnc.h"
+
+namespace art {
+namespace debug {
+
+static void XzCompress(const std::vector<uint8_t>* src, std::vector<uint8_t>* dst) {
+  // Configure the compression library.
+  CrcGenerateTable();
+  Crc64GenerateTable();
+  CLzma2EncProps lzma2Props;
+  Lzma2EncProps_Init(&lzma2Props);
+  lzma2Props.lzmaProps.level = 1;  // Fast compression.
+  Lzma2EncProps_Normalize(&lzma2Props);
+  CXzProps props;
+  XzProps_Init(&props);
+  props.lzma2Props = &lzma2Props;
+  // Implement the required interface for communication (written in C so no virtual methods).
+  struct XzCallbacks : public ISeqInStream, public ISeqOutStream, public ICompressProgress {
+    static SRes ReadImpl(void* p, void* buf, size_t* size) {
+      auto* ctx = static_cast<XzCallbacks*>(reinterpret_cast<ISeqInStream*>(p));
+      *size = std::min(*size, ctx->src_->size() - ctx->src_pos_);
+      memcpy(buf, ctx->src_->data() + ctx->src_pos_, *size);
+      ctx->src_pos_ += *size;
+      return SZ_OK;
+    }
+    static size_t WriteImpl(void* p, const void* buf, size_t size) {
+      auto* ctx = static_cast<XzCallbacks*>(reinterpret_cast<ISeqOutStream*>(p));
+      const uint8_t* buffer = reinterpret_cast<const uint8_t*>(buf);
+      ctx->dst_->insert(ctx->dst_->end(), buffer, buffer + size);
+      return size;
+    }
+    static SRes ProgressImpl(void* , UInt64, UInt64) {
+      return SZ_OK;
+    }
+    size_t src_pos_;
+    const std::vector<uint8_t>* src_;
+    std::vector<uint8_t>* dst_;
+  };
+  XzCallbacks callbacks;
+  callbacks.Read = XzCallbacks::ReadImpl;
+  callbacks.Write = XzCallbacks::WriteImpl;
+  callbacks.Progress = XzCallbacks::ProgressImpl;
+  callbacks.src_pos_ = 0;
+  callbacks.src_ = src;
+  callbacks.dst_ = dst;
+  // Compress.
+  SRes res = Xz_Encode(&callbacks, &callbacks, &props, &callbacks);
+  CHECK_EQ(res, SZ_OK);
+}
+
+template <typename ElfTypes>
+static std::vector<uint8_t> MakeMiniDebugInfoInternal(
+    InstructionSet isa,
+    size_t rodata_section_size,
+    size_t text_section_size,
+    const ArrayRef<const MethodDebugInfo>& method_infos) {
+  std::vector<uint8_t> buffer;
+  buffer.reserve(KB);
+  VectorOutputStream out("Mini-debug-info ELF file", &buffer);
+  std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
+  builder->Start();
+  // Mirror .rodata and .text as NOBITS sections.
+  // It is needed to detected relocations after compression.
+  builder->GetRoData()->WriteNoBitsSection(rodata_section_size);
+  builder->GetText()->WriteNoBitsSection(text_section_size);
+  WriteDebugSymbols(builder.get(), method_infos, false /* with_signature */);
+  WriteCFISection(builder.get(),
+                  method_infos,
+                  dwarf::DW_DEBUG_FRAME_FORMAT,
+                  false /* write_oat_paches */);
+  builder->End();
+  CHECK(builder->Good());
+  std::vector<uint8_t> compressed_buffer;
+  compressed_buffer.reserve(buffer.size() / 4);
+  XzCompress(&buffer, &compressed_buffer);
+  return compressed_buffer;
+}
+
+}  // namespace debug
+}  // namespace art
+
+#endif  // ART_COMPILER_DEBUG_ELF_GNU_DEBUGDATA_WRITER_H_
+
diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h
new file mode 100644
index 0000000..41508f4
--- /dev/null
+++ b/compiler/debug/elf_symtab_writer.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_
+#define ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_
+
+#include <unordered_set>
+
+#include "debug/method_debug_info.h"
+#include "elf_builder.h"
+#include "utils.h"
+
+namespace art {
+namespace debug {
+
+// The ARM specification defines three special mapping symbols
+// $a, $t and $d which mark ARM, Thumb and data ranges respectively.
+// These symbols can be used by tools, for example, to pretty
+// print instructions correctly.  Objdump will use them if they
+// exist, but it will still work well without them.
+// However, these extra symbols take space, so let's just generate
+// one symbol which marks the whole .text section as code.
+constexpr bool kGenerateSingleArmMappingSymbol = true;
+
+template <typename ElfTypes>
+static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder,
+                              const ArrayRef<const MethodDebugInfo>& method_infos,
+                              bool with_signature) {
+  bool generated_mapping_symbol = false;
+  auto* strtab = builder->GetStrTab();
+  auto* symtab = builder->GetSymTab();
+
+  if (method_infos.empty()) {
+    return;
+  }
+
+  // Find all addresses (low_pc) which contain deduped methods.
+  // The first instance of method is not marked deduped_, but the rest is.
+  std::unordered_set<uint32_t> deduped_addresses;
+  for (const MethodDebugInfo& info : method_infos) {
+    if (info.deduped) {
+      deduped_addresses.insert(info.low_pc);
+    }
+  }
+
+  strtab->Start();
+  strtab->Write("");  // strtab should start with empty string.
+  std::string last_name;
+  size_t last_name_offset = 0;
+  for (const MethodDebugInfo& info : method_infos) {
+    if (info.deduped) {
+      continue;  // Add symbol only for the first instance.
+    }
+    std::string name = PrettyMethod(info.dex_method_index, *info.dex_file, with_signature);
+    if (deduped_addresses.find(info.low_pc) != deduped_addresses.end()) {
+      name += " [DEDUPED]";
+    }
+    // If we write method names without signature, we might see the same name multiple times.
+    size_t name_offset = (name == last_name ? last_name_offset : strtab->Write(name));
+
+    const auto* text = builder->GetText()->Exists() ? builder->GetText() : nullptr;
+    const bool is_relative = (text != nullptr);
+    uint32_t low_pc = info.low_pc;
+    // Add in code delta, e.g., thumb bit 0 for Thumb2 code.
+    low_pc += info.compiled_method->CodeDelta();
+    symtab->Add(name_offset,
+                text,
+                low_pc,
+                is_relative,
+                info.high_pc - info.low_pc,
+                STB_GLOBAL,
+                STT_FUNC);
+
+    // Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2
+    // instructions, so that disassembler tools can correctly disassemble.
+    // Note that even if we generate just a single mapping symbol, ARM's Streamline
+    // requires it to match function symbol.  Just address 0 does not work.
+    if (info.compiled_method->GetInstructionSet() == kThumb2) {
+      if (!generated_mapping_symbol || !kGenerateSingleArmMappingSymbol) {
+        symtab->Add(strtab->Write("$t"), text, info.low_pc & ~1,
+                    is_relative, 0, STB_LOCAL, STT_NOTYPE);
+        generated_mapping_symbol = true;
+      }
+    }
+
+    last_name = std::move(name);
+    last_name_offset = name_offset;
+  }
+  strtab->End();
+
+  // Symbols are buffered and written after names (because they are smaller).
+  // We could also do two passes in this function to avoid the buffering.
+  symtab->Start();
+  symtab->Write();
+  symtab->End();
+}
+
+}  // namespace debug
+}  // namespace art
+
+#endif  // ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_
+
diff --git a/compiler/debug/method_debug_info.h b/compiler/debug/method_debug_info.h
new file mode 100644
index 0000000..6b3dd8c
--- /dev/null
+++ b/compiler/debug/method_debug_info.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_
+#define ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_
+
+#include "compiled_method.h"
+#include "dex_file.h"
+
+namespace art {
+namespace debug {
+
+struct MethodDebugInfo {
+  const DexFile* dex_file;
+  size_t class_def_index;
+  uint32_t dex_method_index;
+  uint32_t access_flags;
+  const DexFile::CodeItem* code_item;
+  bool deduped;
+  uintptr_t low_pc;
+  uintptr_t high_pc;
+  CompiledMethod* compiled_method;
+
+  bool IsFromOptimizingCompiler() const {
+    return compiled_method->GetQuickCode().size() > 0 &&
+           compiled_method->GetVmapTable().size() > 0 &&
+           compiled_method->GetGcMap().size() == 0 &&
+           code_item != nullptr;
+  }
+};
+
+}  // namespace debug
+}  // namespace art
+
+#endif  // ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 22b178c..209f101 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -875,6 +875,7 @@
       move_result = mir_graph->FindMoveResult(bb, invoke);
       result = GenInlineIPut(mir_graph, bb, invoke, move_result, method);
       break;
+    case kInlineOpConstructor:
     case kInlineStringInit:
       return false;
     default:
diff --git a/compiler/dex/quick/lazy_debug_frame_opcode_writer.h b/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
index c425fc8..85050f4 100644
--- a/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
+++ b/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
@@ -19,7 +19,7 @@
 
 #include "base/arena_allocator.h"
 #include "base/arena_containers.h"
-#include "dwarf/debug_frame_opcode_writer.h"
+#include "debug/dwarf/debug_frame_opcode_writer.h"
 
 namespace art {
 struct LIR;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6bc2a13..f078bf6 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2306,9 +2306,9 @@
           mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self());
           // Mark methods as pre-verified. If we don't do this, the interpreter will run with
           // access checks.
-          klass->SetPreverifiedFlagOnAllMethods(
+          klass->SetSkipAccessChecksFlagOnAllMethods(
               GetInstructionSetPointerSize(manager_->GetCompiler()->GetInstructionSet()));
-          klass->SetPreverified();
+          klass->SetVerificationAttempted();
         }
         // Record the final class status if necessary.
         ClassReference ref(manager_->GetDexFile(), class_def_index);
diff --git a/compiler/dwarf/method_debug_info.h b/compiler/dwarf/method_debug_info.h
deleted file mode 100644
index e8ba914..0000000
--- a/compiler/dwarf/method_debug_info.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DWARF_METHOD_DEBUG_INFO_H_
-#define ART_COMPILER_DWARF_METHOD_DEBUG_INFO_H_
-
-#include "dex_file.h"
-
-namespace art {
-class CompiledMethod;
-namespace dwarf {
-
-struct MethodDebugInfo {
-  const DexFile* dex_file_;
-  size_t class_def_index_;
-  uint32_t dex_method_index_;
-  uint32_t access_flags_;
-  const DexFile::CodeItem* code_item_;
-  bool deduped_;
-  uintptr_t low_pc_;
-  uintptr_t high_pc_;
-  CompiledMethod* compiled_method_;
-};
-
-}  // namespace dwarf
-}  // namespace art
-
-#endif  // ART_COMPILER_DWARF_METHOD_DEBUG_INFO_H_
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 3d24d19..b673eeb 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -110,18 +110,27 @@
       CHECK(sections.empty() || sections.back()->finished_);
       // The first ELF section index is 1. Index 0 is reserved for NULL.
       section_index_ = sections.size() + 1;
-      // Push this section on the list of written sections.
-      sections.push_back(this);
+      // Page-align if we switch between allocated and non-allocated sections,
+      // or if we change the type of allocation (e.g. executable vs non-executable).
+      if (!sections.empty()) {
+        if (header_.sh_flags != sections.back()->header_.sh_flags) {
+          header_.sh_addralign = kPageSize;
+        }
+      }
       // Align file position.
       if (header_.sh_type != SHT_NOBITS) {
-        header_.sh_offset = RoundUp(owner_->stream_.Seek(0, kSeekCurrent), header_.sh_addralign);
-        owner_->stream_.Seek(header_.sh_offset, kSeekSet);
+        header_.sh_offset = owner_->AlignFileOffset(header_.sh_addralign);
+      } else {
+        header_.sh_offset = 0;
       }
       // Align virtual memory address.
       if ((header_.sh_flags & SHF_ALLOC) != 0) {
-        header_.sh_addr = RoundUp(owner_->virtual_address_, header_.sh_addralign);
-        owner_->virtual_address_ = header_.sh_addr;
+        header_.sh_addr = owner_->AlignVirtualAddress(header_.sh_addralign);
+      } else {
+        header_.sh_addr = 0;
       }
+      // Push this section on the list of written sections.
+      sections.push_back(this);
     }
 
     // Finish writing of this section.
@@ -170,8 +179,8 @@
     // and it will be zero-initialized when the ELF file is loaded in the running program.
     void WriteNoBitsSection(Elf_Word size) {
       DCHECK_NE(header_.sh_flags & SHF_ALLOC, 0u);
-      Start();
       header_.sh_type = SHT_NOBITS;
+      Start();
       header_.sh_size = size;
       End();
     }
@@ -293,12 +302,13 @@
         dynamic_(this, ".dynamic", SHT_DYNAMIC, SHF_ALLOC, &dynstr_, 0, kPageSize, sizeof(Elf_Dyn)),
         eh_frame_(this, ".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
         eh_frame_hdr_(this, ".eh_frame_hdr", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0),
-        strtab_(this, ".strtab", 0, kPageSize),
+        strtab_(this, ".strtab", 0, 1),
         symtab_(this, ".symtab", SHT_SYMTAB, 0, &strtab_),
         debug_frame_(this, ".debug_frame", SHT_PROGBITS, 0, nullptr, 0, sizeof(Elf_Addr), 0),
         debug_info_(this, ".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0),
         debug_line_(this, ".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0),
         shstrtab_(this, ".shstrtab", 0, 1),
+        started_(false),
         virtual_address_(0) {
     text_.phdr_flags_ = PF_R | PF_X;
     bss_.phdr_flags_ = PF_R | PF_W;
@@ -351,22 +361,25 @@
     other_sections_.push_back(std::move(s));
   }
 
-  // Set where the next section will be allocated in the virtual address space.
-  void SetVirtualAddress(Elf_Addr address) {
-    DCHECK_GE(address, virtual_address_);
-    virtual_address_ = address;
-  }
-
-  void Start() {
-    // Reserve space for ELF header and program headers.
-    // We do not know the number of headers until later, so
-    // it is easiest to just reserve a fixed amount of space.
-    int size = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * kMaxProgramHeaders;
+  // Reserve space for ELF header and program headers.
+  // We do not know the number of headers until later, so
+  // it is easiest to just reserve a fixed amount of space.
+  // Program headers are required for loading by the linker.
+  // It is possible to omit them for ELF files used for debugging.
+  void Start(bool write_program_headers = true) {
+    int size = sizeof(Elf_Ehdr);
+    if (write_program_headers) {
+      size += sizeof(Elf_Phdr) * kMaxProgramHeaders;
+    }
     stream_.Seek(size, kSeekSet);
+    started_ = true;
     virtual_address_ += size;
+    write_program_headers_ = write_program_headers;
   }
 
   void End() {
+    DCHECK(started_);
+
     // Write section names and finish the section headers.
     shstrtab_.Start();
     shstrtab_.Write("");
@@ -386,8 +399,7 @@
       shdrs.push_back(section->header_);
     }
     Elf_Off section_headers_offset;
-    section_headers_offset = RoundUp(stream_.Seek(0, kSeekCurrent), sizeof(Elf_Off));
-    stream_.Seek(section_headers_offset, kSeekSet);
+    section_headers_offset = AlignFileOffset(sizeof(Elf_Off));
     stream_.WriteFully(shdrs.data(), shdrs.size() * sizeof(shdrs[0]));
 
     // Flush everything else before writing the program headers. This should prevent
@@ -395,14 +407,21 @@
     // and partially written data if we suddenly lose power, for example.
     stream_.Flush();
 
-    // Write the initial file headers.
-    std::vector<Elf_Phdr> phdrs = MakeProgramHeaders();
+    // The main ELF header.
     Elf_Ehdr elf_header = MakeElfHeader(isa_);
-    elf_header.e_phoff = sizeof(Elf_Ehdr);
     elf_header.e_shoff = section_headers_offset;
-    elf_header.e_phnum = phdrs.size();
     elf_header.e_shnum = shdrs.size();
     elf_header.e_shstrndx = shstrtab_.GetSectionIndex();
+
+    // Program headers (i.e. mmap instructions).
+    std::vector<Elf_Phdr> phdrs;
+    if (write_program_headers_) {
+      phdrs = MakeProgramHeaders();
+      CHECK_LE(phdrs.size(), kMaxProgramHeaders);
+      elf_header.e_phoff = sizeof(Elf_Ehdr);
+      elf_header.e_phnum = phdrs.size();
+    }
+
     stream_.Seek(0, kSeekSet);
     stream_.WriteFully(&elf_header, sizeof(elf_header));
     stream_.WriteFully(phdrs.data(), phdrs.size() * sizeof(phdrs[0]));
@@ -492,6 +511,14 @@
     return &stream_;
   }
 
+  off_t AlignFileOffset(size_t alignment) {
+     return stream_.Seek(RoundUp(stream_.Seek(0, kSeekCurrent), alignment), kSeekSet);
+  }
+
+  Elf_Addr AlignVirtualAddress(size_t alignment) {
+     return virtual_address_ = RoundUp(virtual_address_, alignment);
+  }
+
  private:
   static Elf_Ehdr MakeElfHeader(InstructionSet isa) {
     Elf_Ehdr elf_header = Elf_Ehdr();
@@ -666,9 +693,13 @@
   // List of used section in the order in which they were written.
   std::vector<Section*> sections_;
 
+  bool started_;
+
   // Used for allocation of virtual address space.
   Elf_Addr virtual_address_;
 
+  size_t write_program_headers_;
+
   DISALLOW_COPY_AND_ASSIGN(ElfBuilder);
 };
 
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index c5a0fd5..d50a08c 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -32,9 +32,9 @@
 class ElfFile;
 class OutputStream;
 
-namespace dwarf {
+namespace debug {
 struct MethodDebugInfo;
-}  // namespace dwarf
+}  // namespace debug
 
 class ElfWriter {
  public:
@@ -52,13 +52,16 @@
   virtual ~ElfWriter() {}
 
   virtual void Start() = 0;
+  virtual void PrepareDebugInfo(size_t rodata_section_size,
+                                size_t text_section_size,
+                                const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
   virtual OutputStream* StartRoData() = 0;
   virtual void EndRoData(OutputStream* rodata) = 0;
   virtual OutputStream* StartText() = 0;
   virtual void EndText(OutputStream* text) = 0;
   virtual void SetBssSize(size_t bss_size) = 0;
   virtual void WriteDynamicSection() = 0;
-  virtual void WriteDebugInfo(const ArrayRef<const dwarf::MethodDebugInfo>& method_infos) = 0;
+  virtual void WriteDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
   virtual void WritePatchLocations(const ArrayRef<const uintptr_t>& patch_locations) = 0;
   virtual bool End() = 0;
 
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
deleted file mode 100644
index d1f5007..0000000
--- a/compiler/elf_writer_debug.cc
+++ /dev/null
@@ -1,1646 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "elf_writer_debug.h"
-
-#include <algorithm>
-#include <unordered_set>
-#include <vector>
-#include <cstdio>
-
-#include "base/casts.h"
-#include "base/stl_util.h"
-#include "linear_alloc.h"
-#include "compiled_method.h"
-#include "dex_file-inl.h"
-#include "driver/compiler_driver.h"
-#include "dwarf/expression.h"
-#include "dwarf/headers.h"
-#include "dwarf/method_debug_info.h"
-#include "dwarf/register.h"
-#include "elf_builder.h"
-#include "linker/vector_output_stream.h"
-#include "mirror/array.h"
-#include "mirror/class-inl.h"
-#include "mirror/class.h"
-#include "oat_writer.h"
-#include "stack_map.h"
-#include "utils.h"
-
-// liblzma.
-#include "XzEnc.h"
-#include "7zCrc.h"
-#include "XzCrc64.h"
-
-namespace art {
-namespace dwarf {
-
-// The ARM specification defines three special mapping symbols
-// $a, $t and $d which mark ARM, Thumb and data ranges respectively.
-// These symbols can be used by tools, for example, to pretty
-// print instructions correctly.  Objdump will use them if they
-// exist, but it will still work well without them.
-// However, these extra symbols take space, so let's just generate
-// one symbol which marks the whole .text section as code.
-constexpr bool kGenerateSingleArmMappingSymbol = true;
-
-static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) {
-  switch (isa) {
-    case kArm:
-    case kThumb2:
-      return Reg::ArmCore(machine_reg);
-    case kArm64:
-      return Reg::Arm64Core(machine_reg);
-    case kX86:
-      return Reg::X86Core(machine_reg);
-    case kX86_64:
-      return Reg::X86_64Core(machine_reg);
-    case kMips:
-      return Reg::MipsCore(machine_reg);
-    case kMips64:
-      return Reg::Mips64Core(machine_reg);
-    default:
-      LOG(FATAL) << "Unknown instruction set: " << isa;
-      UNREACHABLE();
-  }
-}
-
-static Reg GetDwarfFpReg(InstructionSet isa, int machine_reg) {
-  switch (isa) {
-    case kArm:
-    case kThumb2:
-      return Reg::ArmFp(machine_reg);
-    case kArm64:
-      return Reg::Arm64Fp(machine_reg);
-    case kX86:
-      return Reg::X86Fp(machine_reg);
-    case kX86_64:
-      return Reg::X86_64Fp(machine_reg);
-    case kMips:
-      return Reg::MipsFp(machine_reg);
-    case kMips64:
-      return Reg::Mips64Fp(machine_reg);
-    default:
-      LOG(FATAL) << "Unknown instruction set: " << isa;
-      UNREACHABLE();
-  }
-}
-
-static void WriteCIE(InstructionSet isa,
-                     CFIFormat format,
-                     std::vector<uint8_t>* buffer) {
-  // Scratch registers should be marked as undefined.  This tells the
-  // debugger that its value in the previous frame is not recoverable.
-  bool is64bit = Is64BitInstructionSet(isa);
-  switch (isa) {
-    case kArm:
-    case kThumb2: {
-      DebugFrameOpCodeWriter<> opcodes;
-      opcodes.DefCFA(Reg::ArmCore(13), 0);  // R13(SP).
-      // core registers.
-      for (int reg = 0; reg < 13; reg++) {
-        if (reg < 4 || reg == 12) {
-          opcodes.Undefined(Reg::ArmCore(reg));
-        } else {
-          opcodes.SameValue(Reg::ArmCore(reg));
-        }
-      }
-      // fp registers.
-      for (int reg = 0; reg < 32; reg++) {
-        if (reg < 16) {
-          opcodes.Undefined(Reg::ArmFp(reg));
-        } else {
-          opcodes.SameValue(Reg::ArmFp(reg));
-        }
-      }
-      auto return_reg = Reg::ArmCore(14);  // R14(LR).
-      WriteCIE(is64bit, return_reg, opcodes, format, buffer);
-      return;
-    }
-    case kArm64: {
-      DebugFrameOpCodeWriter<> opcodes;
-      opcodes.DefCFA(Reg::Arm64Core(31), 0);  // R31(SP).
-      // core registers.
-      for (int reg = 0; reg < 30; reg++) {
-        if (reg < 8 || reg == 16 || reg == 17) {
-          opcodes.Undefined(Reg::Arm64Core(reg));
-        } else {
-          opcodes.SameValue(Reg::Arm64Core(reg));
-        }
-      }
-      // fp registers.
-      for (int reg = 0; reg < 32; reg++) {
-        if (reg < 8 || reg >= 16) {
-          opcodes.Undefined(Reg::Arm64Fp(reg));
-        } else {
-          opcodes.SameValue(Reg::Arm64Fp(reg));
-        }
-      }
-      auto return_reg = Reg::Arm64Core(30);  // R30(LR).
-      WriteCIE(is64bit, return_reg, opcodes, format, buffer);
-      return;
-    }
-    case kMips:
-    case kMips64: {
-      DebugFrameOpCodeWriter<> opcodes;
-      opcodes.DefCFA(Reg::MipsCore(29), 0);  // R29(SP).
-      // core registers.
-      for (int reg = 1; reg < 26; reg++) {
-        if (reg < 16 || reg == 24 || reg == 25) {  // AT, V*, A*, T*.
-          opcodes.Undefined(Reg::MipsCore(reg));
-        } else {
-          opcodes.SameValue(Reg::MipsCore(reg));
-        }
-      }
-      // fp registers.
-      for (int reg = 0; reg < 32; reg++) {
-        if (reg < 24) {
-          opcodes.Undefined(Reg::Mips64Fp(reg));
-        } else {
-          opcodes.SameValue(Reg::Mips64Fp(reg));
-        }
-      }
-      auto return_reg = Reg::MipsCore(31);  // R31(RA).
-      WriteCIE(is64bit, return_reg, opcodes, format, buffer);
-      return;
-    }
-    case kX86: {
-      // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296
-      constexpr bool generate_opcodes_for_x86_fp = false;
-      DebugFrameOpCodeWriter<> opcodes;
-      opcodes.DefCFA(Reg::X86Core(4), 4);   // R4(ESP).
-      opcodes.Offset(Reg::X86Core(8), -4);  // R8(EIP).
-      // core registers.
-      for (int reg = 0; reg < 8; reg++) {
-        if (reg <= 3) {
-          opcodes.Undefined(Reg::X86Core(reg));
-        } else if (reg == 4) {
-          // Stack pointer.
-        } else {
-          opcodes.SameValue(Reg::X86Core(reg));
-        }
-      }
-      // fp registers.
-      if (generate_opcodes_for_x86_fp) {
-        for (int reg = 0; reg < 8; reg++) {
-          opcodes.Undefined(Reg::X86Fp(reg));
-        }
-      }
-      auto return_reg = Reg::X86Core(8);  // R8(EIP).
-      WriteCIE(is64bit, return_reg, opcodes, format, buffer);
-      return;
-    }
-    case kX86_64: {
-      DebugFrameOpCodeWriter<> opcodes;
-      opcodes.DefCFA(Reg::X86_64Core(4), 8);  // R4(RSP).
-      opcodes.Offset(Reg::X86_64Core(16), -8);  // R16(RIP).
-      // core registers.
-      for (int reg = 0; reg < 16; reg++) {
-        if (reg == 4) {
-          // Stack pointer.
-        } else if (reg < 12 && reg != 3 && reg != 5) {  // except EBX and EBP.
-          opcodes.Undefined(Reg::X86_64Core(reg));
-        } else {
-          opcodes.SameValue(Reg::X86_64Core(reg));
-        }
-      }
-      // fp registers.
-      for (int reg = 0; reg < 16; reg++) {
-        if (reg < 12) {
-          opcodes.Undefined(Reg::X86_64Fp(reg));
-        } else {
-          opcodes.SameValue(Reg::X86_64Fp(reg));
-        }
-      }
-      auto return_reg = Reg::X86_64Core(16);  // R16(RIP).
-      WriteCIE(is64bit, return_reg, opcodes, format, buffer);
-      return;
-    }
-    case kNone:
-      break;
-  }
-  LOG(FATAL) << "Cannot write CIE frame for ISA " << isa;
-  UNREACHABLE();
-}
-
-template<typename ElfTypes>
-void WriteCFISection(ElfBuilder<ElfTypes>* builder,
-                     const ArrayRef<const MethodDebugInfo>& method_infos,
-                     CFIFormat format,
-                     bool write_oat_patches) {
-  CHECK(format == DW_DEBUG_FRAME_FORMAT || format == DW_EH_FRAME_FORMAT);
-  typedef typename ElfTypes::Addr Elf_Addr;
-
-  if (method_infos.empty()) {
-    return;
-  }
-
-  std::vector<uint32_t> binary_search_table;
-  std::vector<uintptr_t> patch_locations;
-  if (format == DW_EH_FRAME_FORMAT) {
-    binary_search_table.reserve(2 * method_infos.size());
-  } else {
-    patch_locations.reserve(method_infos.size());
-  }
-
-  // The methods can be written any order.
-  // Let's therefore sort them in the lexicographical order of the opcodes.
-  // This has no effect on its own. However, if the final .debug_frame section is
-  // compressed it reduces the size since similar opcodes sequences are grouped.
-  std::vector<const MethodDebugInfo*> sorted_method_infos;
-  sorted_method_infos.reserve(method_infos.size());
-  for (size_t i = 0; i < method_infos.size(); i++) {
-    sorted_method_infos.push_back(&method_infos[i]);
-  }
-  std::sort(
-      sorted_method_infos.begin(),
-      sorted_method_infos.end(),
-      [](const MethodDebugInfo* lhs, const MethodDebugInfo* rhs) {
-        ArrayRef<const uint8_t> l = lhs->compiled_method_->GetCFIInfo();
-        ArrayRef<const uint8_t> r = rhs->compiled_method_->GetCFIInfo();
-        return std::lexicographical_compare(l.begin(), l.end(), r.begin(), r.end());
-      });
-
-  // Write .eh_frame/.debug_frame section.
-  auto* cfi_section = (format == DW_DEBUG_FRAME_FORMAT
-                       ? builder->GetDebugFrame()
-                       : builder->GetEhFrame());
-  {
-    cfi_section->Start();
-    const bool is64bit = Is64BitInstructionSet(builder->GetIsa());
-    const Elf_Addr text_address = builder->GetText()->Exists()
-        ? builder->GetText()->GetAddress()
-        : 0;
-    const Elf_Addr cfi_address = cfi_section->GetAddress();
-    const Elf_Addr cie_address = cfi_address;
-    Elf_Addr buffer_address = cfi_address;
-    std::vector<uint8_t> buffer;  // Small temporary buffer.
-    WriteCIE(builder->GetIsa(), format, &buffer);
-    cfi_section->WriteFully(buffer.data(), buffer.size());
-    buffer_address += buffer.size();
-    buffer.clear();
-    for (const MethodDebugInfo* mi : sorted_method_infos) {
-      if (!mi->deduped_) {  // Only one FDE per unique address.
-        ArrayRef<const uint8_t> opcodes = mi->compiled_method_->GetCFIInfo();
-        if (!opcodes.empty()) {
-          const Elf_Addr code_address = text_address + mi->low_pc_;
-          if (format == DW_EH_FRAME_FORMAT) {
-            binary_search_table.push_back(
-                dchecked_integral_cast<uint32_t>(code_address));
-            binary_search_table.push_back(
-                dchecked_integral_cast<uint32_t>(buffer_address));
-          }
-          WriteFDE(is64bit, cfi_address, cie_address,
-                   code_address, mi->high_pc_ - mi->low_pc_,
-                   opcodes, format, buffer_address, &buffer,
-                   &patch_locations);
-          cfi_section->WriteFully(buffer.data(), buffer.size());
-          buffer_address += buffer.size();
-          buffer.clear();
-        }
-      }
-    }
-    cfi_section->End();
-  }
-
-  if (format == DW_EH_FRAME_FORMAT) {
-    auto* header_section = builder->GetEhFrameHdr();
-    header_section->Start();
-    uint32_t header_address = dchecked_integral_cast<int32_t>(header_section->GetAddress());
-    // Write .eh_frame_hdr section.
-    std::vector<uint8_t> buffer;
-    Writer<> header(&buffer);
-    header.PushUint8(1);  // Version.
-    // Encoding of .eh_frame pointer - libunwind does not honor datarel here,
-    // so we have to use pcrel which means relative to the pointer's location.
-    header.PushUint8(DW_EH_PE_pcrel | DW_EH_PE_sdata4);
-    // Encoding of binary search table size.
-    header.PushUint8(DW_EH_PE_udata4);
-    // Encoding of binary search table addresses - libunwind supports only this
-    // specific combination, which means relative to the start of .eh_frame_hdr.
-    header.PushUint8(DW_EH_PE_datarel | DW_EH_PE_sdata4);
-    // .eh_frame pointer
-    header.PushInt32(cfi_section->GetAddress() - (header_address + 4u));
-    // Binary search table size (number of entries).
-    header.PushUint32(dchecked_integral_cast<uint32_t>(binary_search_table.size()/2));
-    header_section->WriteFully(buffer.data(), buffer.size());
-    // Binary search table.
-    for (size_t i = 0; i < binary_search_table.size(); i++) {
-      // Make addresses section-relative since we know the header address now.
-      binary_search_table[i] -= header_address;
-    }
-    header_section->WriteFully(binary_search_table.data(), binary_search_table.size());
-    header_section->End();
-  } else {
-    if (write_oat_patches) {
-      builder->WritePatches(".debug_frame.oat_patches",
-                            ArrayRef<const uintptr_t>(patch_locations));
-    }
-  }
-}
-
-namespace {
-  struct CompilationUnit {
-    std::vector<const MethodDebugInfo*> methods_;
-    size_t debug_line_offset_ = 0;
-    uintptr_t low_pc_ = std::numeric_limits<uintptr_t>::max();
-    uintptr_t high_pc_ = 0;
-  };
-
-  typedef std::vector<DexFile::LocalInfo> LocalInfos;
-
-  void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) {
-    static_cast<LocalInfos*>(ctx)->push_back(entry);
-  }
-
-  typedef std::vector<DexFile::PositionInfo> PositionInfos;
-
-  bool PositionInfoCallback(void* ctx, const DexFile::PositionInfo& entry) {
-    static_cast<PositionInfos*>(ctx)->push_back(entry);
-    return false;
-  }
-
-  std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
-    std::vector<const char*> names;
-    if (mi->code_item_ != nullptr) {
-      const uint8_t* stream = mi->dex_file_->GetDebugInfoStream(mi->code_item_);
-      if (stream != nullptr) {
-        DecodeUnsignedLeb128(&stream);  // line.
-        uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
-        for (uint32_t i = 0; i < parameters_size; ++i) {
-          uint32_t id = DecodeUnsignedLeb128P1(&stream);
-          names.push_back(mi->dex_file_->StringDataByIdx(id));
-        }
-      }
-    }
-    return names;
-  }
-
-  struct VariableLocation {
-    uint32_t low_pc;
-    uint32_t high_pc;
-    DexRegisterLocation reg_lo;  // May be None if the location is unknown.
-    DexRegisterLocation reg_hi;  // Most significant bits of 64-bit value.
-  };
-
-  // Get the location of given dex register (e.g. stack or machine register).
-  // Note that the location might be different based on the current pc.
-  // The result will cover all ranges where the variable is in scope.
-  std::vector<VariableLocation> GetVariableLocations(const MethodDebugInfo* method_info,
-                                                     uint16_t vreg,
-                                                     bool is64bitValue,
-                                                     uint32_t dex_pc_low,
-                                                     uint32_t dex_pc_high) {
-    std::vector<VariableLocation> variable_locations;
-
-    // Get stack maps sorted by pc (they might not be sorted internally).
-    const CodeInfo code_info(method_info->compiled_method_->GetVmapTable().data());
-    const StackMapEncoding encoding = code_info.ExtractEncoding();
-    std::map<uint32_t, StackMap> stack_maps;
-    for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
-      StackMap stack_map = code_info.GetStackMapAt(s, encoding);
-      DCHECK(stack_map.IsValid());
-      const uint32_t low_pc = method_info->low_pc_ + stack_map.GetNativePcOffset(encoding);
-      DCHECK_LE(low_pc, method_info->high_pc_);
-      stack_maps.emplace(low_pc, stack_map);
-    }
-
-    // Create entries for the requested register based on stack map data.
-    for (auto it = stack_maps.begin(); it != stack_maps.end(); it++) {
-      const StackMap& stack_map = it->second;
-      const uint32_t low_pc = it->first;
-      auto next_it = it;
-      next_it++;
-      const uint32_t high_pc = next_it != stack_maps.end() ? next_it->first
-                                                           : method_info->high_pc_;
-      DCHECK_LE(low_pc, high_pc);
-      if (low_pc == high_pc) {
-        continue;  // Ignore if the address range is empty.
-      }
-
-      // Check that the stack map is in the requested range.
-      uint32_t dex_pc = stack_map.GetDexPc(encoding);
-      if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) {
-        continue;
-      }
-
-      // Find the location of the dex register.
-      DexRegisterLocation reg_lo = DexRegisterLocation::None();
-      DexRegisterLocation reg_hi = DexRegisterLocation::None();
-      if (stack_map.HasDexRegisterMap(encoding)) {
-        DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
-            stack_map, encoding, method_info->code_item_->registers_size_);
-        reg_lo = dex_register_map.GetDexRegisterLocation(
-            vreg, method_info->code_item_->registers_size_, code_info, encoding);
-        if (is64bitValue) {
-          reg_hi = dex_register_map.GetDexRegisterLocation(
-              vreg + 1, method_info->code_item_->registers_size_, code_info, encoding);
-        }
-      }
-
-      // Add location entry for this address range.
-      if (!variable_locations.empty() &&
-          variable_locations.back().reg_lo == reg_lo &&
-          variable_locations.back().reg_hi == reg_hi &&
-          variable_locations.back().high_pc == low_pc) {
-        // Merge with the previous entry (extend its range).
-        variable_locations.back().high_pc = high_pc;
-      } else {
-        variable_locations.push_back({low_pc, high_pc, reg_lo, reg_hi});
-      }
-    }
-
-    return variable_locations;
-  }
-
-  bool IsFromOptimizingCompiler(const MethodDebugInfo* method_info) {
-    return method_info->compiled_method_->GetQuickCode().size() > 0 &&
-           method_info->compiled_method_->GetVmapTable().size() > 0 &&
-           method_info->compiled_method_->GetGcMap().size() == 0 &&
-           method_info->code_item_ != nullptr;
-  }
-}  // namespace
-
-// Helper class to write .debug_info and its supporting sections.
-template<typename ElfTypes>
-class DebugInfoWriter {
-  typedef typename ElfTypes::Addr Elf_Addr;
-
-  // Helper class to write one compilation unit.
-  // It holds helper methods and temporary state.
-  class CompilationUnitWriter {
-   public:
-    explicit CompilationUnitWriter(DebugInfoWriter* owner)
-      : owner_(owner),
-        info_(Is64BitInstructionSet(owner_->builder_->GetIsa()), &owner->debug_abbrev_) {
-    }
-
-    void Write(const CompilationUnit& compilation_unit) {
-      CHECK(!compilation_unit.methods_.empty());
-      const Elf_Addr text_address = owner_->builder_->GetText()->Exists()
-          ? owner_->builder_->GetText()->GetAddress()
-          : 0;
-      const uintptr_t cu_size = compilation_unit.high_pc_ - compilation_unit.low_pc_;
-
-      info_.StartTag(DW_TAG_compile_unit);
-      info_.WriteString(DW_AT_producer, "Android dex2oat");
-      info_.WriteData1(DW_AT_language, DW_LANG_Java);
-      info_.WriteString(DW_AT_comp_dir, "$JAVA_SRC_ROOT");
-      info_.WriteAddr(DW_AT_low_pc, text_address + compilation_unit.low_pc_);
-      info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(cu_size));
-      info_.WriteSecOffset(DW_AT_stmt_list, compilation_unit.debug_line_offset_);
-
-      const char* last_dex_class_desc = nullptr;
-      for (auto mi : compilation_unit.methods_) {
-        const DexFile* dex = mi->dex_file_;
-        const DexFile::CodeItem* dex_code = mi->code_item_;
-        const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index_);
-        const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
-        const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
-        const char* dex_class_desc = dex->GetMethodDeclaringClassDescriptor(dex_method);
-        const bool is_static = (mi->access_flags_ & kAccStatic) != 0;
-
-        // Enclose the method in correct class definition.
-        if (last_dex_class_desc != dex_class_desc) {
-          if (last_dex_class_desc != nullptr) {
-            EndClassTag();
-          }
-          // Write reference tag for the class we are about to declare.
-          size_t reference_tag_offset = info_.StartTag(DW_TAG_reference_type);
-          type_cache_.emplace(std::string(dex_class_desc), reference_tag_offset);
-          size_t type_attrib_offset = info_.size();
-          info_.WriteRef4(DW_AT_type, 0);
-          info_.EndTag();
-          // Declare the class that owns this method.
-          size_t class_offset = StartClassTag(dex_class_desc);
-          info_.UpdateUint32(type_attrib_offset, class_offset);
-          info_.WriteFlagPresent(DW_AT_declaration);
-          // Check that each class is defined only once.
-          bool unique = owner_->defined_dex_classes_.insert(dex_class_desc).second;
-          CHECK(unique) << "Redefinition of " << dex_class_desc;
-          last_dex_class_desc = dex_class_desc;
-        }
-
-        int start_depth = info_.Depth();
-        info_.StartTag(DW_TAG_subprogram);
-        WriteName(dex->GetMethodName(dex_method));
-        info_.WriteAddr(DW_AT_low_pc, text_address + mi->low_pc_);
-        info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(mi->high_pc_-mi->low_pc_));
-        std::vector<uint8_t> expr_buffer;
-        Expression expr(&expr_buffer);
-        expr.WriteOpCallFrameCfa();
-        info_.WriteExprLoc(DW_AT_frame_base, expr);
-        WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto));
-
-        // Write parameters. DecodeDebugLocalInfo returns them as well, but it does not
-        // guarantee order or uniqueness so it is safer to iterate over them manually.
-        // DecodeDebugLocalInfo might not also be available if there is no debug info.
-        std::vector<const char*> param_names = GetParamNames(mi);
-        uint32_t arg_reg = 0;
-        if (!is_static) {
-          info_.StartTag(DW_TAG_formal_parameter);
-          WriteName("this");
-          info_.WriteFlagPresent(DW_AT_artificial);
-          WriteLazyType(dex_class_desc);
-          if (dex_code != nullptr) {
-            // Write the stack location of the parameter.
-            const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg;
-            const bool is64bitValue = false;
-            WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc_);
-          }
-          arg_reg++;
-          info_.EndTag();
-        }
-        if (dex_params != nullptr) {
-          for (uint32_t i = 0; i < dex_params->Size(); ++i) {
-            info_.StartTag(DW_TAG_formal_parameter);
-            // Parameter names may not be always available.
-            if (i < param_names.size()) {
-              WriteName(param_names[i]);
-            }
-            // Write the type.
-            const char* type_desc = dex->StringByTypeIdx(dex_params->GetTypeItem(i).type_idx_);
-            WriteLazyType(type_desc);
-            const bool is64bitValue = type_desc[0] == 'D' || type_desc[0] == 'J';
-            if (dex_code != nullptr) {
-              // Write the stack location of the parameter.
-              const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg;
-              WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc_);
-            }
-            arg_reg += is64bitValue ? 2 : 1;
-            info_.EndTag();
-          }
-          if (dex_code != nullptr) {
-            DCHECK_EQ(arg_reg, dex_code->ins_size_);
-          }
-        }
-
-        // Write local variables.
-        LocalInfos local_infos;
-        if (dex->DecodeDebugLocalInfo(dex_code,
-                                      is_static,
-                                      mi->dex_method_index_,
-                                      LocalInfoCallback,
-                                      &local_infos)) {
-          for (const DexFile::LocalInfo& var : local_infos) {
-            if (var.reg_ < dex_code->registers_size_ - dex_code->ins_size_) {
-              info_.StartTag(DW_TAG_variable);
-              WriteName(var.name_);
-              WriteLazyType(var.descriptor_);
-              bool is64bitValue = var.descriptor_[0] == 'D' || var.descriptor_[0] == 'J';
-              WriteRegLocation(mi, var.reg_, is64bitValue, compilation_unit.low_pc_,
-                               var.start_address_, var.end_address_);
-              info_.EndTag();
-            }
-          }
-        }
-
-        info_.EndTag();
-        CHECK_EQ(info_.Depth(), start_depth);  // Balanced start/end.
-      }
-      if (last_dex_class_desc != nullptr) {
-        EndClassTag();
-      }
-      FinishLazyTypes();
-      CloseNamespacesAboveDepth(0);
-      info_.EndTag();  // DW_TAG_compile_unit
-      CHECK_EQ(info_.Depth(), 0);
-      std::vector<uint8_t> buffer;
-      buffer.reserve(info_.data()->size() + KB);
-      const size_t offset = owner_->builder_->GetDebugInfo()->GetSize();
-      // All compilation units share single table which is at the start of .debug_abbrev.
-      const size_t debug_abbrev_offset = 0;
-      WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_);
-      owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
-    }
-
-    void Write(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
-      info_.StartTag(DW_TAG_compile_unit);
-      info_.WriteString(DW_AT_producer, "Android dex2oat");
-      info_.WriteData1(DW_AT_language, DW_LANG_Java);
-
-      // Base class references to be patched at the end.
-      std::map<size_t, mirror::Class*> base_class_references;
-
-      // Already written declarations or definitions.
-      std::map<mirror::Class*, size_t> class_declarations;
-
-      std::vector<uint8_t> expr_buffer;
-      for (mirror::Class* type : types) {
-        if (type->IsPrimitive()) {
-          // For primitive types the definition and the declaration is the same.
-          if (type->GetPrimitiveType() != Primitive::kPrimVoid) {
-            WriteTypeDeclaration(type->GetDescriptor(nullptr));
-          }
-        } else if (type->IsArrayClass()) {
-          mirror::Class* element_type = type->GetComponentType();
-          uint32_t component_size = type->GetComponentSize();
-          uint32_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
-          uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
-
-          CloseNamespacesAboveDepth(0);  // Declare in root namespace.
-          info_.StartTag(DW_TAG_array_type);
-          std::string descriptor_string;
-          WriteLazyType(element_type->GetDescriptor(&descriptor_string));
-          WriteLinkageName(type);
-          info_.WriteUdata(DW_AT_data_member_location, data_offset);
-          info_.StartTag(DW_TAG_subrange_type);
-          Expression count_expr(&expr_buffer);
-          count_expr.WriteOpPushObjectAddress();
-          count_expr.WriteOpPlusUconst(length_offset);
-          count_expr.WriteOpDerefSize(4);  // Array length is always 32-bit wide.
-          info_.WriteExprLoc(DW_AT_count, count_expr);
-          info_.EndTag();  // DW_TAG_subrange_type.
-          info_.EndTag();  // DW_TAG_array_type.
-        } else if (type->IsInterface()) {
-          // Skip.  Variables cannot have an interface as a dynamic type.
-          // We do not expose the interface information to the debugger in any way.
-        } else {
-          std::string descriptor_string;
-          const char* desc = type->GetDescriptor(&descriptor_string);
-          size_t class_offset = StartClassTag(desc);
-          class_declarations.emplace(type, class_offset);
-
-          if (!type->IsVariableSize()) {
-            info_.WriteUdata(DW_AT_byte_size, type->GetObjectSize());
-          }
-
-          WriteLinkageName(type);
-
-          if (type->IsObjectClass()) {
-            // Generate artificial member which is used to get the dynamic type of variable.
-            // The run-time value of this field will correspond to linkage name of some type.
-            // We need to do it only once in j.l.Object since all other types inherit it.
-            info_.StartTag(DW_TAG_member);
-            WriteName(".dynamic_type");
-            WriteLazyType(sizeof(uintptr_t) == 8 ? "J" : "I");
-            info_.WriteFlagPresent(DW_AT_artificial);
-            // Create DWARF expression to get the value of the methods_ field.
-            Expression expr(&expr_buffer);
-            // The address of the object has been implicitly pushed on the stack.
-            // Dereference the klass_ field of Object (32-bit; possibly poisoned).
-            DCHECK_EQ(type->ClassOffset().Uint32Value(), 0u);
-            DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Class>), 4u);
-            expr.WriteOpDerefSize(4);
-            if (kPoisonHeapReferences) {
-              expr.WriteOpNeg();
-              // DWARF stack is pointer sized. Ensure that the high bits are clear.
-              expr.WriteOpConstu(0xFFFFFFFF);
-              expr.WriteOpAnd();
-            }
-            // Add offset to the methods_ field.
-            expr.WriteOpPlusUconst(mirror::Class::MethodsOffset().Uint32Value());
-            // Top of stack holds the location of the field now.
-            info_.WriteExprLoc(DW_AT_data_member_location, expr);
-            info_.EndTag();  // DW_TAG_member.
-          }
-
-          // Base class.
-          mirror::Class* base_class = type->GetSuperClass();
-          if (base_class != nullptr) {
-            info_.StartTag(DW_TAG_inheritance);
-            base_class_references.emplace(info_.size(), base_class);
-            info_.WriteRef4(DW_AT_type, 0);
-            info_.WriteUdata(DW_AT_data_member_location, 0);
-            info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
-            info_.EndTag();  // DW_TAG_inheritance.
-          }
-
-          // Member variables.
-          for (uint32_t i = 0, count = type->NumInstanceFields(); i < count; ++i) {
-            ArtField* field = type->GetInstanceField(i);
-            info_.StartTag(DW_TAG_member);
-            WriteName(field->GetName());
-            WriteLazyType(field->GetTypeDescriptor());
-            info_.WriteUdata(DW_AT_data_member_location, field->GetOffset().Uint32Value());
-            uint32_t access_flags = field->GetAccessFlags();
-            if (access_flags & kAccPublic) {
-              info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
-            } else if (access_flags & kAccProtected) {
-              info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_protected);
-            } else if (access_flags & kAccPrivate) {
-              info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private);
-            }
-            info_.EndTag();  // DW_TAG_member.
-          }
-
-          if (type->IsStringClass()) {
-            // Emit debug info about an artifical class member for java.lang.String which represents
-            // the first element of the data stored in a string instance. Consumers of the debug
-            // info will be able to read the content of java.lang.String based on the count (real
-            // field) and based on the location of this data member.
-            info_.StartTag(DW_TAG_member);
-            WriteName("value");
-            // We don't support fields with C like array types so we just say its type is java char.
-            WriteLazyType("C");  // char.
-            info_.WriteUdata(DW_AT_data_member_location,
-                             mirror::String::ValueOffset().Uint32Value());
-            info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private);
-            info_.EndTag();  // DW_TAG_member.
-          }
-
-          EndClassTag();
-        }
-      }
-
-      // Write base class declarations.
-      for (const auto& base_class_reference : base_class_references) {
-        size_t reference_offset = base_class_reference.first;
-        mirror::Class* base_class = base_class_reference.second;
-        const auto& it = class_declarations.find(base_class);
-        if (it != class_declarations.end()) {
-          info_.UpdateUint32(reference_offset, it->second);
-        } else {
-          // Declare base class.  We can not use the standard WriteLazyType
-          // since we want to avoid the DW_TAG_reference_tag wrapping.
-          std::string tmp_storage;
-          const char* base_class_desc = base_class->GetDescriptor(&tmp_storage);
-          size_t base_class_declaration_offset = StartClassTag(base_class_desc);
-          info_.WriteFlagPresent(DW_AT_declaration);
-          WriteLinkageName(base_class);
-          EndClassTag();
-          class_declarations.emplace(base_class, base_class_declaration_offset);
-          info_.UpdateUint32(reference_offset, base_class_declaration_offset);
-        }
-      }
-
-      FinishLazyTypes();
-      CloseNamespacesAboveDepth(0);
-      info_.EndTag();  // DW_TAG_compile_unit.
-      CHECK_EQ(info_.Depth(), 0);
-      std::vector<uint8_t> buffer;
-      buffer.reserve(info_.data()->size() + KB);
-      const size_t offset = owner_->builder_->GetDebugInfo()->GetSize();
-      // All compilation units share single table which is at the start of .debug_abbrev.
-      const size_t debug_abbrev_offset = 0;
-      WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_);
-      owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
-    }
-
-    // Linkage name uniquely identifies type.
-    // It is used to determine the dynamic type of objects.
-    // We use the methods_ field of class since it is unique and it is not moved by the GC.
-    void WriteLinkageName(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
-      auto* methods_ptr = type->GetMethodsPtr();
-      if (methods_ptr == nullptr) {
-        // Some types might have no methods.  Allocate empty array instead.
-        LinearAlloc* allocator = Runtime::Current()->GetLinearAlloc();
-        void* storage = allocator->Alloc(Thread::Current(), sizeof(LengthPrefixedArray<ArtMethod>));
-        methods_ptr = new (storage) LengthPrefixedArray<ArtMethod>(0);
-        type->SetMethodsPtr(methods_ptr, 0, 0);
-        DCHECK(type->GetMethodsPtr() != nullptr);
-      }
-      char name[32];
-      snprintf(name, sizeof(name), "0x%" PRIXPTR, reinterpret_cast<uintptr_t>(methods_ptr));
-      info_.WriteString(DW_AT_linkage_name, name);
-    }
-
-    // Write table into .debug_loc which describes location of dex register.
-    // The dex register might be valid only at some points and it might
-    // move between machine registers and stack.
-    void WriteRegLocation(const MethodDebugInfo* method_info,
-                          uint16_t vreg,
-                          bool is64bitValue,
-                          uint32_t compilation_unit_low_pc,
-                          uint32_t dex_pc_low = 0,
-                          uint32_t dex_pc_high = 0xFFFFFFFF) {
-      using Kind = DexRegisterLocation::Kind;
-      if (!IsFromOptimizingCompiler(method_info)) {
-        return;
-      }
-
-      Writer<> debug_loc(&owner_->debug_loc_);
-      Writer<> debug_ranges(&owner_->debug_ranges_);
-      info_.WriteSecOffset(DW_AT_location, debug_loc.size());
-      info_.WriteSecOffset(DW_AT_start_scope, debug_ranges.size());
-
-      std::vector<VariableLocation> variable_locations = GetVariableLocations(
-          method_info,
-          vreg,
-          is64bitValue,
-          dex_pc_low,
-          dex_pc_high);
-
-      // Write .debug_loc entries.
-      const InstructionSet isa = owner_->builder_->GetIsa();
-      const bool is64bit = Is64BitInstructionSet(isa);
-      std::vector<uint8_t> expr_buffer;
-      for (const VariableLocation& variable_location : variable_locations) {
-        // Translate dex register location to DWARF expression.
-        // Note that 64-bit value might be split to two distinct locations.
-        // (for example, two 32-bit machine registers, or even stack and register)
-        Expression expr(&expr_buffer);
-        DexRegisterLocation reg_lo = variable_location.reg_lo;
-        DexRegisterLocation reg_hi = variable_location.reg_hi;
-        for (int piece = 0; piece < (is64bitValue ? 2 : 1); piece++) {
-          DexRegisterLocation reg_loc = (piece == 0 ? reg_lo : reg_hi);
-          const Kind kind = reg_loc.GetKind();
-          const int32_t value = reg_loc.GetValue();
-          if (kind == Kind::kInStack) {
-            const size_t frame_size = method_info->compiled_method_->GetFrameSizeInBytes();
-            // The stack offset is relative to SP. Make it relative to CFA.
-            expr.WriteOpFbreg(value - frame_size);
-            if (piece == 0 && reg_hi.GetKind() == Kind::kInStack &&
-                reg_hi.GetValue() == value + 4) {
-              break;  // the high word is correctly implied by the low word.
-            }
-          } else if (kind == Kind::kInRegister) {
-            expr.WriteOpReg(GetDwarfCoreReg(isa, value).num());
-            if (piece == 0 && reg_hi.GetKind() == Kind::kInRegisterHigh &&
-                reg_hi.GetValue() == value) {
-              break;  // the high word is correctly implied by the low word.
-            }
-          } else if (kind == Kind::kInFpuRegister) {
-            if ((isa == kArm || isa == kThumb2) &&
-                piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegister &&
-                reg_hi.GetValue() == value + 1 && value % 2 == 0) {
-              // Translate S register pair to D register (e.g. S4+S5 to D2).
-              expr.WriteOpReg(Reg::ArmDp(value / 2).num());
-              break;
-            }
-            expr.WriteOpReg(GetDwarfFpReg(isa, value).num());
-            if (piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegisterHigh &&
-                reg_hi.GetValue() == reg_lo.GetValue()) {
-              break;  // the high word is correctly implied by the low word.
-            }
-          } else if (kind == Kind::kConstant) {
-            expr.WriteOpConsts(value);
-            expr.WriteOpStackValue();
-          } else if (kind == Kind::kNone) {
-            break;
-          } else {
-            // kInStackLargeOffset and kConstantLargeValue are hidden by GetKind().
-            // kInRegisterHigh and kInFpuRegisterHigh should be handled by
-            // the special cases above and they should not occur alone.
-            LOG(ERROR) << "Unexpected register location kind: "
-                       << DexRegisterLocation::PrettyDescriptor(kind);
-            break;
-          }
-          if (is64bitValue) {
-            // Write the marker which is needed by split 64-bit values.
-            // This code is skipped by the special cases.
-            expr.WriteOpPiece(4);
-          }
-        }
-
-        if (expr.size() > 0) {
-          if (is64bit) {
-            debug_loc.PushUint64(variable_location.low_pc - compilation_unit_low_pc);
-            debug_loc.PushUint64(variable_location.high_pc - compilation_unit_low_pc);
-          } else {
-            debug_loc.PushUint32(variable_location.low_pc - compilation_unit_low_pc);
-            debug_loc.PushUint32(variable_location.high_pc - compilation_unit_low_pc);
-          }
-          // Write the expression.
-          debug_loc.PushUint16(expr.size());
-          debug_loc.PushData(expr.data());
-        } else {
-          // Do not generate .debug_loc if the location is not known.
-        }
-      }
-      // Write end-of-list entry.
-      if (is64bit) {
-        debug_loc.PushUint64(0);
-        debug_loc.PushUint64(0);
-      } else {
-        debug_loc.PushUint32(0);
-        debug_loc.PushUint32(0);
-      }
-
-      // Write .debug_ranges entries.
-      // This includes ranges where the variable is in scope but the location is not known.
-      for (size_t i = 0; i < variable_locations.size(); i++) {
-        uint32_t low_pc = variable_locations[i].low_pc;
-        uint32_t high_pc = variable_locations[i].high_pc;
-        while (i + 1 < variable_locations.size() && variable_locations[i+1].low_pc == high_pc) {
-          // Merge address range with the next entry.
-          high_pc = variable_locations[++i].high_pc;
-        }
-        if (is64bit) {
-          debug_ranges.PushUint64(low_pc - compilation_unit_low_pc);
-          debug_ranges.PushUint64(high_pc - compilation_unit_low_pc);
-        } else {
-          debug_ranges.PushUint32(low_pc - compilation_unit_low_pc);
-          debug_ranges.PushUint32(high_pc - compilation_unit_low_pc);
-        }
-      }
-      // Write end-of-list entry.
-      if (is64bit) {
-        debug_ranges.PushUint64(0);
-        debug_ranges.PushUint64(0);
-      } else {
-        debug_ranges.PushUint32(0);
-        debug_ranges.PushUint32(0);
-      }
-    }
-
-    // Some types are difficult to define as we go since they need
-    // to be enclosed in the right set of namespaces. Therefore we
-    // just define all types lazily at the end of compilation unit.
-    void WriteLazyType(const char* type_descriptor) {
-      if (type_descriptor != nullptr && type_descriptor[0] != 'V') {
-        lazy_types_.emplace(std::string(type_descriptor), info_.size());
-        info_.WriteRef4(DW_AT_type, 0);
-      }
-    }
-
-    void FinishLazyTypes() {
-      for (const auto& lazy_type : lazy_types_) {
-        info_.UpdateUint32(lazy_type.second, WriteTypeDeclaration(lazy_type.first));
-      }
-      lazy_types_.clear();
-    }
-
-   private:
-    void WriteName(const char* name) {
-      if (name != nullptr) {
-        info_.WriteString(DW_AT_name, name);
-      }
-    }
-
-    // Convert dex type descriptor to DWARF.
-    // Returns offset in the compilation unit.
-    size_t WriteTypeDeclaration(const std::string& desc) {
-      DCHECK(!desc.empty());
-      const auto& it = type_cache_.find(desc);
-      if (it != type_cache_.end()) {
-        return it->second;
-      }
-
-      size_t offset;
-      if (desc[0] == 'L') {
-        // Class type. For example: Lpackage/name;
-        size_t class_offset = StartClassTag(desc.c_str());
-        info_.WriteFlagPresent(DW_AT_declaration);
-        EndClassTag();
-        // Reference to the class type.
-        offset = info_.StartTag(DW_TAG_reference_type);
-        info_.WriteRef(DW_AT_type, class_offset);
-        info_.EndTag();
-      } else if (desc[0] == '[') {
-        // Array type.
-        size_t element_type = WriteTypeDeclaration(desc.substr(1));
-        CloseNamespacesAboveDepth(0);  // Declare in root namespace.
-        size_t array_type = info_.StartTag(DW_TAG_array_type);
-        info_.WriteFlagPresent(DW_AT_declaration);
-        info_.WriteRef(DW_AT_type, element_type);
-        info_.EndTag();
-        offset = info_.StartTag(DW_TAG_reference_type);
-        info_.WriteRef4(DW_AT_type, array_type);
-        info_.EndTag();
-      } else {
-        // Primitive types.
-        DCHECK_EQ(desc.size(), 1u);
-
-        const char* name;
-        uint32_t encoding;
-        uint32_t byte_size;
-        switch (desc[0]) {
-        case 'B':
-          name = "byte";
-          encoding = DW_ATE_signed;
-          byte_size = 1;
-          break;
-        case 'C':
-          name = "char";
-          encoding = DW_ATE_UTF;
-          byte_size = 2;
-          break;
-        case 'D':
-          name = "double";
-          encoding = DW_ATE_float;
-          byte_size = 8;
-          break;
-        case 'F':
-          name = "float";
-          encoding = DW_ATE_float;
-          byte_size = 4;
-          break;
-        case 'I':
-          name = "int";
-          encoding = DW_ATE_signed;
-          byte_size = 4;
-          break;
-        case 'J':
-          name = "long";
-          encoding = DW_ATE_signed;
-          byte_size = 8;
-          break;
-        case 'S':
-          name = "short";
-          encoding = DW_ATE_signed;
-          byte_size = 2;
-          break;
-        case 'Z':
-          name = "boolean";
-          encoding = DW_ATE_boolean;
-          byte_size = 1;
-          break;
-        case 'V':
-          LOG(FATAL) << "Void type should not be encoded";
-          UNREACHABLE();
-        default:
-          LOG(FATAL) << "Unknown dex type descriptor: \"" << desc << "\"";
-          UNREACHABLE();
-        }
-        CloseNamespacesAboveDepth(0);  // Declare in root namespace.
-        offset = info_.StartTag(DW_TAG_base_type);
-        WriteName(name);
-        info_.WriteData1(DW_AT_encoding, encoding);
-        info_.WriteData1(DW_AT_byte_size, byte_size);
-        info_.EndTag();
-      }
-
-      type_cache_.emplace(desc, offset);
-      return offset;
-    }
-
-    // Start DW_TAG_class_type tag nested in DW_TAG_namespace tags.
-    // Returns offset of the class tag in the compilation unit.
-    size_t StartClassTag(const char* desc) {
-      std::string name = SetNamespaceForClass(desc);
-      size_t offset = info_.StartTag(DW_TAG_class_type);
-      WriteName(name.c_str());
-      return offset;
-    }
-
-    void EndClassTag() {
-      info_.EndTag();
-    }
-
-    // Set the current namespace nesting to one required by the given class.
-    // Returns the class name with namespaces, 'L', and ';' stripped.
-    std::string SetNamespaceForClass(const char* desc) {
-      DCHECK(desc != nullptr && desc[0] == 'L');
-      desc++;  // Skip the initial 'L'.
-      size_t depth = 0;
-      for (const char* end; (end = strchr(desc, '/')) != nullptr; desc = end + 1, ++depth) {
-        // Check whether the name at this depth is already what we need.
-        if (depth < current_namespace_.size()) {
-          const std::string& name = current_namespace_[depth];
-          if (name.compare(0, name.size(), desc, end - desc) == 0) {
-            continue;
-          }
-        }
-        // Otherwise we need to open a new namespace tag at this depth.
-        CloseNamespacesAboveDepth(depth);
-        info_.StartTag(DW_TAG_namespace);
-        std::string name(desc, end - desc);
-        WriteName(name.c_str());
-        current_namespace_.push_back(std::move(name));
-      }
-      CloseNamespacesAboveDepth(depth);
-      return std::string(desc, strchr(desc, ';') - desc);
-    }
-
-    // Close namespace tags to reach the given nesting depth.
-    void CloseNamespacesAboveDepth(size_t depth) {
-      DCHECK_LE(depth, current_namespace_.size());
-      while (current_namespace_.size() > depth) {
-        info_.EndTag();
-        current_namespace_.pop_back();
-      }
-    }
-
-    // For access to the ELF sections.
-    DebugInfoWriter<ElfTypes>* owner_;
-    // Temporary buffer to create and store the entries.
-    DebugInfoEntryWriter<> info_;
-    // Cache of already translated type descriptors.
-    std::map<std::string, size_t> type_cache_;  // type_desc -> definition_offset.
-    // 32-bit references which need to be resolved to a type later.
-    // Given type may be used multiple times.  Therefore we need a multimap.
-    std::multimap<std::string, size_t> lazy_types_;  // type_desc -> patch_offset.
-    // The current set of open namespace tags which are active and not closed yet.
-    std::vector<std::string> current_namespace_;
-  };
-
- public:
-  explicit DebugInfoWriter(ElfBuilder<ElfTypes>* builder)
-      : builder_(builder),
-        debug_abbrev_(&debug_abbrev_buffer_) {
-  }
-
-  void Start() {
-    builder_->GetDebugInfo()->Start();
-  }
-
-  void WriteCompilationUnit(const CompilationUnit& compilation_unit) {
-    CompilationUnitWriter writer(this);
-    writer.Write(compilation_unit);
-  }
-
-  void WriteTypes(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
-    CompilationUnitWriter writer(this);
-    writer.Write(types);
-  }
-
-  void End() {
-    builder_->GetDebugInfo()->End();
-    builder_->WritePatches(".debug_info.oat_patches",
-                           ArrayRef<const uintptr_t>(debug_info_patches_));
-    builder_->WriteSection(".debug_abbrev", &debug_abbrev_buffer_);
-    builder_->WriteSection(".debug_loc", &debug_loc_);
-    builder_->WriteSection(".debug_ranges", &debug_ranges_);
-  }
-
- private:
-  ElfBuilder<ElfTypes>* builder_;
-  std::vector<uintptr_t> debug_info_patches_;
-  std::vector<uint8_t> debug_abbrev_buffer_;
-  DebugAbbrevWriter<> debug_abbrev_;
-  std::vector<uint8_t> debug_loc_;
-  std::vector<uint8_t> debug_ranges_;
-
-  std::unordered_set<const char*> defined_dex_classes_;  // For CHECKs only.
-};
-
-template<typename ElfTypes>
-class DebugLineWriter {
-  typedef typename ElfTypes::Addr Elf_Addr;
-
- public:
-  explicit DebugLineWriter(ElfBuilder<ElfTypes>* builder) : builder_(builder) {
-  }
-
-  void Start() {
-    builder_->GetDebugLine()->Start();
-  }
-
-  // Write line table for given set of methods.
-  // Returns the number of bytes written.
-  size_t WriteCompilationUnit(CompilationUnit& compilation_unit) {
-    const bool is64bit = Is64BitInstructionSet(builder_->GetIsa());
-    const Elf_Addr text_address = builder_->GetText()->Exists()
-        ? builder_->GetText()->GetAddress()
-        : 0;
-
-    compilation_unit.debug_line_offset_ = builder_->GetDebugLine()->GetSize();
-
-    std::vector<FileEntry> files;
-    std::unordered_map<std::string, size_t> files_map;
-    std::vector<std::string> directories;
-    std::unordered_map<std::string, size_t> directories_map;
-    int code_factor_bits_ = 0;
-    int dwarf_isa = -1;
-    switch (builder_->GetIsa()) {
-      case kArm:  // arm actually means thumb2.
-      case kThumb2:
-        code_factor_bits_ = 1;  // 16-bit instuctions
-        dwarf_isa = 1;  // DW_ISA_ARM_thumb.
-        break;
-      case kArm64:
-      case kMips:
-      case kMips64:
-        code_factor_bits_ = 2;  // 32-bit instructions
-        break;
-      case kNone:
-      case kX86:
-      case kX86_64:
-        break;
-    }
-    DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits_);
-    for (const MethodDebugInfo* mi : compilation_unit.methods_) {
-      // Ignore function if we have already generated line table for the same address.
-      // It would confuse the debugger and the DWARF specification forbids it.
-      if (mi->deduped_) {
-        continue;
-      }
-
-      ArrayRef<const SrcMapElem> src_mapping_table;
-      std::vector<SrcMapElem> src_mapping_table_from_stack_maps;
-      if (IsFromOptimizingCompiler(mi)) {
-        // Use stack maps to create mapping table from pc to dex.
-        const CodeInfo code_info(mi->compiled_method_->GetVmapTable().data());
-        const StackMapEncoding encoding = code_info.ExtractEncoding();
-        for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
-          StackMap stack_map = code_info.GetStackMapAt(s, encoding);
-          DCHECK(stack_map.IsValid());
-          // Emit only locations where we have local-variable information.
-          // In particular, skip mappings inside the prologue.
-          if (stack_map.HasDexRegisterMap(encoding)) {
-            const uint32_t pc = stack_map.GetNativePcOffset(encoding);
-            const int32_t dex = stack_map.GetDexPc(encoding);
-            src_mapping_table_from_stack_maps.push_back({pc, dex});
-          }
-        }
-        std::sort(src_mapping_table_from_stack_maps.begin(),
-                  src_mapping_table_from_stack_maps.end());
-        src_mapping_table = ArrayRef<const SrcMapElem>(src_mapping_table_from_stack_maps);
-      } else {
-        // Use the mapping table provided by the quick compiler.
-        src_mapping_table = mi->compiled_method_->GetSrcMappingTable();
-      }
-
-      if (src_mapping_table.empty()) {
-        continue;
-      }
-
-      Elf_Addr method_address = text_address + mi->low_pc_;
-
-      PositionInfos position_infos;
-      const DexFile* dex = mi->dex_file_;
-      if (!dex->DecodeDebugPositionInfo(mi->code_item_, PositionInfoCallback, &position_infos)) {
-        continue;
-      }
-
-      if (position_infos.empty()) {
-        continue;
-      }
-
-      opcodes.SetAddress(method_address);
-      if (dwarf_isa != -1) {
-        opcodes.SetISA(dwarf_isa);
-      }
-
-      // Get and deduplicate directory and filename.
-      int file_index = 0;  // 0 - primary source file of the compilation.
-      auto& dex_class_def = dex->GetClassDef(mi->class_def_index_);
-      const char* source_file = dex->GetSourceFile(dex_class_def);
-      if (source_file != nullptr) {
-        std::string file_name(source_file);
-        size_t file_name_slash = file_name.find_last_of('/');
-        std::string class_name(dex->GetClassDescriptor(dex_class_def));
-        size_t class_name_slash = class_name.find_last_of('/');
-        std::string full_path(file_name);
-
-        // Guess directory from package name.
-        int directory_index = 0;  // 0 - current directory of the compilation.
-        if (file_name_slash == std::string::npos &&  // Just filename.
-            class_name.front() == 'L' &&  // Type descriptor for a class.
-            class_name_slash != std::string::npos) {  // Has package name.
-          std::string package_name = class_name.substr(1, class_name_slash - 1);
-          auto it = directories_map.find(package_name);
-          if (it == directories_map.end()) {
-            directory_index = 1 + directories.size();
-            directories_map.emplace(package_name, directory_index);
-            directories.push_back(package_name);
-          } else {
-            directory_index = it->second;
-          }
-          full_path = package_name + "/" + file_name;
-        }
-
-        // Add file entry.
-        auto it2 = files_map.find(full_path);
-        if (it2 == files_map.end()) {
-          file_index = 1 + files.size();
-          files_map.emplace(full_path, file_index);
-          files.push_back(FileEntry {
-            file_name,
-            directory_index,
-            0,  // Modification time - NA.
-            0,  // File size - NA.
-          });
-        } else {
-          file_index = it2->second;
-        }
-      }
-      opcodes.SetFile(file_index);
-
-      // Generate mapping opcodes from PC to Java lines.
-      if (file_index != 0) {
-        bool first = true;
-        for (SrcMapElem pc2dex : src_mapping_table) {
-          uint32_t pc = pc2dex.from_;
-          int dex_pc = pc2dex.to_;
-          // Find mapping with address with is greater than our dex pc; then go back one step.
-          auto ub = std::upper_bound(position_infos.begin(), position_infos.end(), dex_pc,
-              [](uint32_t address, const DexFile::PositionInfo& entry) {
-                  return address < entry.address_;
-              });
-          if (ub != position_infos.begin()) {
-            int line = (--ub)->line_;
-            if (first) {
-              first = false;
-              if (pc > 0) {
-                // Assume that any preceding code is prologue.
-                int first_line = position_infos.front().line_;
-                // Prologue is not a sensible place for a breakpoint.
-                opcodes.NegateStmt();
-                opcodes.AddRow(method_address, first_line);
-                opcodes.NegateStmt();
-                opcodes.SetPrologueEnd();
-              }
-              opcodes.AddRow(method_address + pc, line);
-            } else if (line != opcodes.CurrentLine()) {
-              opcodes.AddRow(method_address + pc, line);
-            }
-          }
-        }
-      } else {
-        // line 0 - instruction cannot be attributed to any source line.
-        opcodes.AddRow(method_address, 0);
-      }
-
-      opcodes.AdvancePC(text_address + mi->high_pc_);
-      opcodes.EndSequence();
-    }
-    std::vector<uint8_t> buffer;
-    buffer.reserve(opcodes.data()->size() + KB);
-    size_t offset = builder_->GetDebugLine()->GetSize();
-    WriteDebugLineTable(directories, files, opcodes, offset, &buffer, &debug_line_patches);
-    builder_->GetDebugLine()->WriteFully(buffer.data(), buffer.size());
-    return buffer.size();
-  }
-
-  void End() {
-    builder_->GetDebugLine()->End();
-    builder_->WritePatches(".debug_line.oat_patches",
-                           ArrayRef<const uintptr_t>(debug_line_patches));
-  }
-
- private:
-  ElfBuilder<ElfTypes>* builder_;
-  std::vector<uintptr_t> debug_line_patches;
-};
-
-template<typename ElfTypes>
-static void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
-                               const ArrayRef<const MethodDebugInfo>& method_infos) {
-  // Group the methods into compilation units based on source file.
-  std::vector<CompilationUnit> compilation_units;
-  const char* last_source_file = nullptr;
-  for (const MethodDebugInfo& mi : method_infos) {
-    auto& dex_class_def = mi.dex_file_->GetClassDef(mi.class_def_index_);
-    const char* source_file = mi.dex_file_->GetSourceFile(dex_class_def);
-    if (compilation_units.empty() || source_file != last_source_file) {
-      compilation_units.push_back(CompilationUnit());
-    }
-    CompilationUnit& cu = compilation_units.back();
-    cu.methods_.push_back(&mi);
-    cu.low_pc_ = std::min(cu.low_pc_, mi.low_pc_);
-    cu.high_pc_ = std::max(cu.high_pc_, mi.high_pc_);
-    last_source_file = source_file;
-  }
-
-  // Write .debug_line section.
-  if (!compilation_units.empty()) {
-    DebugLineWriter<ElfTypes> line_writer(builder);
-    line_writer.Start();
-    for (auto& compilation_unit : compilation_units) {
-      line_writer.WriteCompilationUnit(compilation_unit);
-    }
-    line_writer.End();
-  }
-
-  // Write .debug_info section.
-  if (!compilation_units.empty()) {
-    DebugInfoWriter<ElfTypes> info_writer(builder);
-    info_writer.Start();
-    for (const auto& compilation_unit : compilation_units) {
-      info_writer.WriteCompilationUnit(compilation_unit);
-    }
-    info_writer.End();
-  }
-}
-
-template <typename ElfTypes>
-static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder,
-                              const ArrayRef<const MethodDebugInfo>& method_infos,
-                              bool with_signature) {
-  bool generated_mapping_symbol = false;
-  auto* strtab = builder->GetStrTab();
-  auto* symtab = builder->GetSymTab();
-
-  if (method_infos.empty()) {
-    return;
-  }
-
-  // Find all addresses (low_pc) which contain deduped methods.
-  // The first instance of method is not marked deduped_, but the rest is.
-  std::unordered_set<uint32_t> deduped_addresses;
-  for (const MethodDebugInfo& info : method_infos) {
-    if (info.deduped_) {
-      deduped_addresses.insert(info.low_pc_);
-    }
-  }
-
-  strtab->Start();
-  strtab->Write("");  // strtab should start with empty string.
-  std::string last_name;
-  size_t last_name_offset = 0;
-  for (const MethodDebugInfo& info : method_infos) {
-    if (info.deduped_) {
-      continue;  // Add symbol only for the first instance.
-    }
-    std::string name = PrettyMethod(info.dex_method_index_, *info.dex_file_, with_signature);
-    if (deduped_addresses.find(info.low_pc_) != deduped_addresses.end()) {
-      name += " [DEDUPED]";
-    }
-    // If we write method names without signature, we might see the same name multiple times.
-    size_t name_offset = (name == last_name ? last_name_offset : strtab->Write(name));
-
-    const auto* text = builder->GetText()->Exists() ? builder->GetText() : nullptr;
-    const bool is_relative = (text != nullptr);
-    uint32_t low_pc = info.low_pc_;
-    // Add in code delta, e.g., thumb bit 0 for Thumb2 code.
-    low_pc += info.compiled_method_->CodeDelta();
-    symtab->Add(name_offset,
-                text,
-                low_pc,
-                is_relative,
-                info.high_pc_ - info.low_pc_,
-                STB_GLOBAL,
-                STT_FUNC);
-
-    // Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2
-    // instructions, so that disassembler tools can correctly disassemble.
-    // Note that even if we generate just a single mapping symbol, ARM's Streamline
-    // requires it to match function symbol.  Just address 0 does not work.
-    if (info.compiled_method_->GetInstructionSet() == kThumb2) {
-      if (!generated_mapping_symbol || !kGenerateSingleArmMappingSymbol) {
-        symtab->Add(strtab->Write("$t"), text, info.low_pc_ & ~1,
-                    is_relative, 0, STB_LOCAL, STT_NOTYPE);
-        generated_mapping_symbol = true;
-      }
-    }
-
-    last_name = std::move(name);
-    last_name_offset = name_offset;
-  }
-  strtab->End();
-
-  // Symbols are buffered and written after names (because they are smaller).
-  // We could also do two passes in this function to avoid the buffering.
-  symtab->Start();
-  symtab->Write();
-  symtab->End();
-}
-
-template <typename ElfTypes>
-void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
-                    const ArrayRef<const MethodDebugInfo>& method_infos,
-                    CFIFormat cfi_format) {
-  // Add methods to .symtab.
-  WriteDebugSymbols(builder, method_infos, true /* with_signature */);
-  // Generate CFI (stack unwinding information).
-  WriteCFISection(builder, method_infos, cfi_format, true /* write_oat_patches */);
-  // Write DWARF .debug_* sections.
-  WriteDebugSections(builder, method_infos);
-}
-
-static void XzCompress(const std::vector<uint8_t>* src, std::vector<uint8_t>* dst) {
-  // Configure the compression library.
-  CrcGenerateTable();
-  Crc64GenerateTable();
-  CLzma2EncProps lzma2Props;
-  Lzma2EncProps_Init(&lzma2Props);
-  lzma2Props.lzmaProps.level = 1;  // Fast compression.
-  Lzma2EncProps_Normalize(&lzma2Props);
-  CXzProps props;
-  XzProps_Init(&props);
-  props.lzma2Props = &lzma2Props;
-  // Implement the required interface for communication (written in C so no virtual methods).
-  struct XzCallbacks : public ISeqInStream, public ISeqOutStream, public ICompressProgress {
-    static SRes ReadImpl(void* p, void* buf, size_t* size) {
-      auto* ctx = static_cast<XzCallbacks*>(reinterpret_cast<ISeqInStream*>(p));
-      *size = std::min(*size, ctx->src_->size() - ctx->src_pos_);
-      memcpy(buf, ctx->src_->data() + ctx->src_pos_, *size);
-      ctx->src_pos_ += *size;
-      return SZ_OK;
-    }
-    static size_t WriteImpl(void* p, const void* buf, size_t size) {
-      auto* ctx = static_cast<XzCallbacks*>(reinterpret_cast<ISeqOutStream*>(p));
-      const uint8_t* buffer = reinterpret_cast<const uint8_t*>(buf);
-      ctx->dst_->insert(ctx->dst_->end(), buffer, buffer + size);
-      return size;
-    }
-    static SRes ProgressImpl(void* , UInt64, UInt64) {
-      return SZ_OK;
-    }
-    size_t src_pos_;
-    const std::vector<uint8_t>* src_;
-    std::vector<uint8_t>* dst_;
-  };
-  XzCallbacks callbacks;
-  callbacks.Read = XzCallbacks::ReadImpl;
-  callbacks.Write = XzCallbacks::WriteImpl;
-  callbacks.Progress = XzCallbacks::ProgressImpl;
-  callbacks.src_pos_ = 0;
-  callbacks.src_ = src;
-  callbacks.dst_ = dst;
-  // Compress.
-  SRes res = Xz_Encode(&callbacks, &callbacks, &props, &callbacks);
-  CHECK_EQ(res, SZ_OK);
-}
-
-template <typename ElfTypes>
-void WriteMiniDebugInfo(ElfBuilder<ElfTypes>* parent_builder,
-                        const ArrayRef<const MethodDebugInfo>& method_infos) {
-  const InstructionSet isa = parent_builder->GetIsa();
-  std::vector<uint8_t> buffer;
-  buffer.reserve(KB);
-  VectorOutputStream out("Mini-debug-info ELF file", &buffer);
-  std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
-  builder->Start();
-  // Write .rodata and .text as NOBITS sections.
-  // This allows tools to detect virtual address relocation of the parent ELF file.
-  builder->SetVirtualAddress(parent_builder->GetRoData()->GetAddress());
-  builder->GetRoData()->WriteNoBitsSection(parent_builder->GetRoData()->GetSize());
-  builder->SetVirtualAddress(parent_builder->GetText()->GetAddress());
-  builder->GetText()->WriteNoBitsSection(parent_builder->GetText()->GetSize());
-  WriteDebugSymbols(builder.get(), method_infos, false /* with_signature */);
-  WriteCFISection(builder.get(), method_infos, DW_DEBUG_FRAME_FORMAT, false /* write_oat_paches */);
-  builder->End();
-  CHECK(builder->Good());
-  std::vector<uint8_t> compressed_buffer;
-  compressed_buffer.reserve(buffer.size() / 4);
-  XzCompress(&buffer, &compressed_buffer);
-  parent_builder->WriteSection(".gnu_debugdata", &compressed_buffer);
-}
-
-template <typename ElfTypes>
-static ArrayRef<const uint8_t> WriteDebugElfFileForMethodInternal(
-    const dwarf::MethodDebugInfo& method_info) {
-  const InstructionSet isa = method_info.compiled_method_->GetInstructionSet();
-  std::vector<uint8_t> buffer;
-  buffer.reserve(KB);
-  VectorOutputStream out("Debug ELF file", &buffer);
-  std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
-  builder->Start();
-  WriteDebugInfo(builder.get(),
-                 ArrayRef<const MethodDebugInfo>(&method_info, 1),
-                 DW_DEBUG_FRAME_FORMAT);
-  builder->End();
-  CHECK(builder->Good());
-  // Make a copy of the buffer.  We want to shrink it anyway.
-  uint8_t* result = new uint8_t[buffer.size()];
-  CHECK(result != nullptr);
-  memcpy(result, buffer.data(), buffer.size());
-  return ArrayRef<const uint8_t>(result, buffer.size());
-}
-
-ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const dwarf::MethodDebugInfo& method_info) {
-  const InstructionSet isa = method_info.compiled_method_->GetInstructionSet();
-  if (Is64BitInstructionSet(isa)) {
-    return WriteDebugElfFileForMethodInternal<ElfTypes64>(method_info);
-  } else {
-    return WriteDebugElfFileForMethodInternal<ElfTypes32>(method_info);
-  }
-}
-
-template <typename ElfTypes>
-static ArrayRef<const uint8_t> WriteDebugElfFileForClassesInternal(
-    const InstructionSet isa, const ArrayRef<mirror::Class*>& types)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
-  std::vector<uint8_t> buffer;
-  buffer.reserve(KB);
-  VectorOutputStream out("Debug ELF file", &buffer);
-  std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
-  builder->Start();
-
-  DebugInfoWriter<ElfTypes> info_writer(builder.get());
-  info_writer.Start();
-  info_writer.WriteTypes(types);
-  info_writer.End();
-
-  builder->End();
-  CHECK(builder->Good());
-  // Make a copy of the buffer.  We want to shrink it anyway.
-  uint8_t* result = new uint8_t[buffer.size()];
-  CHECK(result != nullptr);
-  memcpy(result, buffer.data(), buffer.size());
-  return ArrayRef<const uint8_t>(result, buffer.size());
-}
-
-ArrayRef<const uint8_t> WriteDebugElfFileForClasses(const InstructionSet isa,
-                                                    const ArrayRef<mirror::Class*>& types) {
-  if (Is64BitInstructionSet(isa)) {
-    return WriteDebugElfFileForClassesInternal<ElfTypes64>(isa, types);
-  } else {
-    return WriteDebugElfFileForClassesInternal<ElfTypes32>(isa, types);
-  }
-}
-
-// Explicit instantiations
-template void WriteDebugInfo<ElfTypes32>(
-    ElfBuilder<ElfTypes32>* builder,
-    const ArrayRef<const MethodDebugInfo>& method_infos,
-    CFIFormat cfi_format);
-template void WriteDebugInfo<ElfTypes64>(
-    ElfBuilder<ElfTypes64>* builder,
-    const ArrayRef<const MethodDebugInfo>& method_infos,
-    CFIFormat cfi_format);
-template void WriteMiniDebugInfo<ElfTypes32>(
-    ElfBuilder<ElfTypes32>* builder,
-    const ArrayRef<const MethodDebugInfo>& method_infos);
-template void WriteMiniDebugInfo<ElfTypes64>(
-    ElfBuilder<ElfTypes64>* builder,
-    const ArrayRef<const MethodDebugInfo>& method_infos);
-
-}  // namespace dwarf
-}  // namespace art
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 6bf080a..1d71e57 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -23,16 +23,18 @@
 #include "base/logging.h"
 #include "base/stl_util.h"
 #include "compiled_method.h"
+#include "debug/elf_debug_writer.h"
+#include "debug/method_debug_info.h"
 #include "driver/compiler_options.h"
-#include "dwarf/method_debug_info.h"
 #include "elf.h"
 #include "elf_builder.h"
 #include "elf_utils.h"
-#include "elf_writer_debug.h"
 #include "globals.h"
 #include "leb128.h"
 #include "linker/buffered_output_stream.h"
 #include "linker/file_output_stream.h"
+#include "thread-inl.h"
+#include "thread_pool.h"
 #include "utils.h"
 
 namespace art {
@@ -46,6 +48,37 @@
 // Let's use .debug_frame because it is easier to strip or compress.
 constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_DEBUG_FRAME_FORMAT;
 
+class DebugInfoTask : public Task {
+ public:
+  DebugInfoTask(InstructionSet isa,
+                size_t rodata_section_size,
+                size_t text_section_size,
+                const ArrayRef<const debug::MethodDebugInfo>& method_infos)
+      : isa_(isa),
+        rodata_section_size_(rodata_section_size),
+        text_section_size_(text_section_size),
+        method_infos_(method_infos) {
+  }
+
+  void Run(Thread*) {
+    result_ = debug::MakeMiniDebugInfo(isa_,
+                                       rodata_section_size_,
+                                       text_section_size_,
+                                       method_infos_);
+  }
+
+  std::vector<uint8_t>* GetResult() {
+    return &result_;
+  }
+
+ private:
+  InstructionSet isa_;
+  size_t rodata_section_size_;
+  size_t text_section_size_;
+  const ArrayRef<const debug::MethodDebugInfo>& method_infos_;
+  std::vector<uint8_t> result_;
+};
+
 template <typename ElfTypes>
 class ElfWriterQuick FINAL : public ElfWriter {
  public:
@@ -55,13 +88,16 @@
   ~ElfWriterQuick();
 
   void Start() OVERRIDE;
+  void PrepareDebugInfo(size_t rodata_section_size,
+                        size_t text_section_size,
+                        const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
   OutputStream* StartRoData() OVERRIDE;
   void EndRoData(OutputStream* rodata) OVERRIDE;
   OutputStream* StartText() OVERRIDE;
   void EndText(OutputStream* text) OVERRIDE;
   void SetBssSize(size_t bss_size) OVERRIDE;
   void WriteDynamicSection() OVERRIDE;
-  void WriteDebugInfo(const ArrayRef<const dwarf::MethodDebugInfo>& method_infos) OVERRIDE;
+  void WriteDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
   void WritePatchLocations(const ArrayRef<const uintptr_t>& patch_locations) OVERRIDE;
   bool End() OVERRIDE;
 
@@ -75,6 +111,8 @@
   File* const elf_file_;
   std::unique_ptr<BufferedOutputStream> output_stream_;
   std::unique_ptr<ElfBuilder<ElfTypes>> builder_;
+  std::unique_ptr<DebugInfoTask> debug_info_task_;
+  std::unique_ptr<ThreadPool> debug_info_thread_pool_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick);
 };
@@ -147,15 +185,40 @@
 }
 
 template <typename ElfTypes>
-void ElfWriterQuick<ElfTypes>::WriteDebugInfo(
-    const ArrayRef<const dwarf::MethodDebugInfo>& method_infos) {
-  if (compiler_options_->GetGenerateDebugInfo()) {
-    // Generate all the debug information we can.
-    dwarf::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat);
+void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(
+    size_t rodata_section_size,
+    size_t text_section_size,
+    const ArrayRef<const debug::MethodDebugInfo>& method_infos) {
+  if (!method_infos.empty() && compiler_options_->GetGenerateMiniDebugInfo()) {
+    // Prepare the mini-debug-info in background while we do other I/O.
+    Thread* self = Thread::Current();
+    debug_info_task_ = std::unique_ptr<DebugInfoTask>(
+        new DebugInfoTask(builder_->GetIsa(),
+                          rodata_section_size,
+                          text_section_size,
+                          method_infos));
+    debug_info_thread_pool_ = std::unique_ptr<ThreadPool>(
+        new ThreadPool("Mini-debug-info writer", 1));
+    debug_info_thread_pool_->AddTask(self, debug_info_task_.get());
+    debug_info_thread_pool_->StartWorkers(self);
   }
-  if (compiler_options_->GetGenerateMiniDebugInfo()) {
-    // Generate only some information and compress it.
-    dwarf::WriteMiniDebugInfo(builder_.get(), method_infos);
+}
+
+template <typename ElfTypes>
+void ElfWriterQuick<ElfTypes>::WriteDebugInfo(
+    const ArrayRef<const debug::MethodDebugInfo>& method_infos) {
+  if (!method_infos.empty()) {
+    if (compiler_options_->GetGenerateDebugInfo()) {
+      // Generate all the debug information we can.
+      debug::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat, true /* write_oat_patches */);
+    }
+    if (compiler_options_->GetGenerateMiniDebugInfo()) {
+      // Wait for the mini-debug-info generation to finish and write it to disk.
+      Thread* self = Thread::Current();
+      DCHECK(debug_info_thread_pool_ != nullptr);
+      debug_info_thread_pool_->Wait(self, true, false);
+      builder_->WriteSection(".gnu_debugdata", debug_info_task_->GetResult());
+    }
   }
 }
 
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index a5a77966..4920f9b 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -23,7 +23,7 @@
 #include "base/unix_file/fd_file.h"
 #include "class_linker-inl.h"
 #include "common_compiler_test.h"
-#include "dwarf/method_debug_info.h"
+#include "debug/method_debug_info.h"
 #include "elf_writer.h"
 #include "elf_writer_quick.h"
 #include "gc/space/image_space.h"
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 6774758..d2bf6c0 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -16,19 +16,19 @@
 
 #include "jit_compiler.h"
 
-#include "art_method-inl.h"
 #include "arch/instruction_set.h"
 #include "arch/instruction_set_features.h"
+#include "art_method-inl.h"
 #include "base/stringpiece.h"
 #include "base/time_utils.h"
 #include "base/timing_logger.h"
 #include "base/unix_file/fd_file.h"
 #include "compiler_callbacks.h"
+#include "debug/elf_debug_writer.h"
 #include "dex/pass_manager.h"
 #include "dex/quick_compiler_callbacks.h"
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
-#include "elf_writer_debug.h"
 #include "jit/debugger_interface.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
@@ -60,11 +60,12 @@
   delete reinterpret_cast<JitCompiler*>(handle);
 }
 
-extern "C" bool jit_compile_method(void* handle, ArtMethod* method, Thread* self)
+extern "C" bool jit_compile_method(
+    void* handle, ArtMethod* method, Thread* self, bool osr)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
   DCHECK(jit_compiler != nullptr);
-  return jit_compiler->CompileMethod(self, method);
+  return jit_compiler->CompileMethod(self, method, osr);
 }
 
 extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
@@ -73,7 +74,7 @@
   DCHECK(jit_compiler != nullptr);
   if (jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo()) {
     const ArrayRef<mirror::Class*> types_array(types, count);
-    ArrayRef<const uint8_t> elf_file = dwarf::WriteDebugElfFileForClasses(kRuntimeISA, types_array);
+    ArrayRef<const uint8_t> elf_file = debug::WriteDebugElfFileForClasses(kRuntimeISA, types_array);
     CreateJITCodeEntry(std::unique_ptr<const uint8_t[]>(elf_file.data()), elf_file.size());
   }
 }
@@ -201,7 +202,7 @@
   }
 }
 
-bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) {
+bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
   TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit));
   const uint64_t start_time = NanoTime();
   StackHandleScope<2> hs(self);
@@ -223,8 +224,8 @@
     // of that proxy method, as the compiler does not expect a proxy method.
     ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*));
     JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
-    success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method_to_compile);
-    if (success && perf_file_ != nullptr) {
+    success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method_to_compile, osr);
+    if (success && (perf_file_ != nullptr)) {
       const void* ptr = method_to_compile->GetEntryPointFromQuickCompiledCode();
       std::ostringstream stream;
       stream << std::hex
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 037a18a..5294d0e 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -37,7 +37,7 @@
  public:
   static JitCompiler* Create();
   virtual ~JitCompiler();
-  bool CompileMethod(Thread* self, ArtMethod* method)
+  bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
       SHARED_REQUIRES(Locks::mutator_lock_);
   CompilerCallbacks* GetCompilerCallbacks() const;
   size_t GetTotalCompileTime() const {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index cff2f47..894d29e 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -21,20 +21,20 @@
 #include "common_compiler_test.h"
 #include "compiled_method.h"
 #include "compiler.h"
+#include "debug/method_debug_info.h"
 #include "dex/pass_manager.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
 #include "dex/quick_compiler_callbacks.h"
 #include "dex/verification_results.h"
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
-#include "dwarf/method_debug_info.h"
 #include "elf_writer.h"
 #include "elf_writer_quick.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "linker/vector_output_stream.h"
 #include "mirror/class-inl.h"
-#include "mirror/object_array-inl.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
 #include "oat_file-inl.h"
 #include "oat_writer.h"
 #include "scoped_thread_state_change.h"
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 90ac499..47dcfd5 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -29,11 +29,11 @@
 #include "class_linker.h"
 #include "compiled_class.h"
 #include "compiled_method.h"
-#include "dex_file-inl.h"
+#include "debug/method_debug_info.h"
 #include "dex/verification_results.h"
+#include "dex_file-inl.h"
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
-#include "dwarf/method_debug_info.h"
 #include "gc/space/image_space.h"
 #include "gc/space/space.h"
 #include "handle_scope-inl.h"
@@ -811,7 +811,7 @@
         // Record debug information for this function if we are doing that.
         const uint32_t quick_code_start = quick_code_offset -
             writer_->oat_header_->GetExecutableOffset() - thumb_offset;
-        writer_->method_info_.push_back(dwarf::MethodDebugInfo {
+        writer_->method_info_.push_back(debug::MethodDebugInfo {
             dex_file_,
             class_def_index_,
             it.GetMemberIndex(),
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 14c6d50..5a55fc6 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -43,9 +43,9 @@
 class TypeLookupTable;
 class ZipEntry;
 
-namespace dwarf {
+namespace debug {
 struct MethodDebugInfo;
-}  // namespace dwarf
+}  // namespace debug
 
 // OatHeader         variable length with count of D OatDexFiles
 //
@@ -193,8 +193,8 @@
 
   ~OatWriter();
 
-  ArrayRef<const dwarf::MethodDebugInfo> GetMethodDebugInfo() const {
-    return ArrayRef<const dwarf::MethodDebugInfo>(method_info_);
+  ArrayRef<const debug::MethodDebugInfo> GetMethodDebugInfo() const {
+    return ArrayRef<const debug::MethodDebugInfo>(method_info_);
   }
 
   const CompilerDriver* GetCompilerDriver() {
@@ -289,7 +289,7 @@
   // We need this because we keep plain pointers to the strings' c_str().
   std::list<std::string> zipped_dex_file_locations_;
 
-  dchecked_vector<dwarf::MethodDebugInfo> method_info_;
+  dchecked_vector<debug::MethodDebugInfo> method_info_;
 
   const CompilerDriver* compiler_driver_;
   ImageWriter* image_writer_;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index c7430e7..8d77daf 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -72,74 +72,6 @@
   size_t index_;
 };
 
-class SwitchTable : public ValueObject {
- public:
-  SwitchTable(const Instruction& instruction, uint32_t dex_pc, bool sparse)
-      : instruction_(instruction), dex_pc_(dex_pc), sparse_(sparse) {
-    int32_t table_offset = instruction.VRegB_31t();
-    const uint16_t* table = reinterpret_cast<const uint16_t*>(&instruction) + table_offset;
-    if (sparse) {
-      CHECK_EQ(table[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
-    } else {
-      CHECK_EQ(table[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
-    }
-    num_entries_ = table[1];
-    values_ = reinterpret_cast<const int32_t*>(&table[2]);
-  }
-
-  uint16_t GetNumEntries() const {
-    return num_entries_;
-  }
-
-  void CheckIndex(size_t index) const {
-    if (sparse_) {
-      // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order.
-      DCHECK_LT(index, 2 * static_cast<size_t>(num_entries_));
-    } else {
-      // In a packed table, we have the starting key and num_entries_ values.
-      DCHECK_LT(index, 1 + static_cast<size_t>(num_entries_));
-    }
-  }
-
-  int32_t GetEntryAt(size_t index) const {
-    CheckIndex(index);
-    return values_[index];
-  }
-
-  uint32_t GetDexPcForIndex(size_t index) const {
-    CheckIndex(index);
-    return dex_pc_ +
-        (reinterpret_cast<const int16_t*>(values_ + index) -
-         reinterpret_cast<const int16_t*>(&instruction_));
-  }
-
-  // Index of the first value in the table.
-  size_t GetFirstValueIndex() const {
-    if (sparse_) {
-      // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order.
-      return num_entries_;
-    } else {
-      // In a packed table, we have the starting key and num_entries_ values.
-      return 1;
-    }
-  }
-
- private:
-  const Instruction& instruction_;
-  const uint32_t dex_pc_;
-
-  // Whether this is a sparse-switch table (or a packed-switch one).
-  const bool sparse_;
-
-  // This can't be const as it needs to be computed off of the given instruction, and complicated
-  // expressions in the initializer list seemed very ugly.
-  uint16_t num_entries_;
-
-  const int32_t* values_;
-
-  DISALLOW_COPY_AND_ASSIGN(SwitchTable);
-};
-
 void HGraphBuilder::InitializeLocals(uint16_t count) {
   graph_->SetNumberOfVRegs(count);
   locals_.resize(count);
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 1d604e7..93e17d6 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -30,7 +30,6 @@
 namespace art {
 
 class Instruction;
-class SwitchTable;
 
 class HGraphBuilder : public ValueObject {
  public:
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index a3bbfdb..e1b83f0 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -629,8 +629,76 @@
   return stack_map_stream_.PrepareForFillIn();
 }
 
-void CodeGenerator::BuildStackMaps(MemoryRegion region) {
+static void CheckCovers(uint32_t dex_pc,
+                        const HGraph& graph,
+                        const CodeInfo& code_info,
+                        const ArenaVector<HSuspendCheck*>& loop_headers,
+                        ArenaVector<size_t>* covered) {
+  StackMapEncoding encoding = code_info.ExtractEncoding();
+  for (size_t i = 0; i < loop_headers.size(); ++i) {
+    if (loop_headers[i]->GetDexPc() == dex_pc) {
+      if (graph.IsCompilingOsr()) {
+        DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc, encoding).IsValid());
+      }
+      ++(*covered)[i];
+    }
+  }
+}
+
+// Debug helper to ensure loop entries in compiled code are matched by
+// dex branch instructions.
+static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
+                                            const CodeInfo& code_info,
+                                            const DexFile::CodeItem& code_item) {
+  if (graph.HasTryCatch()) {
+    // One can write loops through try/catch, which we do not support for OSR anyway.
+    return;
+  }
+  ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc));
+  for (HReversePostOrderIterator it(graph); !it.Done(); it.Advance()) {
+    if (it.Current()->IsLoopHeader()) {
+      HSuspendCheck* suspend_check = it.Current()->GetLoopInformation()->GetSuspendCheck();
+      if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
+        loop_headers.push_back(suspend_check);
+      }
+    }
+  }
+  ArenaVector<size_t> covered(loop_headers.size(), 0, graph.GetArena()->Adapter(kArenaAllocMisc));
+  const uint16_t* code_ptr = code_item.insns_;
+  const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
+
+  size_t dex_pc = 0;
+  while (code_ptr < code_end) {
+    const Instruction& instruction = *Instruction::At(code_ptr);
+    if (instruction.IsBranch()) {
+      uint32_t target = dex_pc + instruction.GetTargetOffset();
+      CheckCovers(target, graph, code_info, loop_headers, &covered);
+    } else if (instruction.IsSwitch()) {
+      SwitchTable table(instruction, dex_pc, instruction.Opcode() == Instruction::SPARSE_SWITCH);
+      uint16_t num_entries = table.GetNumEntries();
+      size_t offset = table.GetFirstValueIndex();
+
+      // Use a larger loop counter type to avoid overflow issues.
+      for (size_t i = 0; i < num_entries; ++i) {
+        // The target of the case.
+        uint32_t target = dex_pc + table.GetEntryAt(i + offset);
+        CheckCovers(target, graph, code_info, loop_headers, &covered);
+      }
+    }
+    dex_pc += instruction.SizeInCodeUnits();
+    code_ptr += instruction.SizeInCodeUnits();
+  }
+
+  for (size_t i = 0; i < covered.size(); ++i) {
+    DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
+  }
+}
+
+void CodeGenerator::BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item) {
   stack_map_stream_.FillIn(region);
+  if (kIsDebugBuild) {
+    CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(region), code_item);
+  }
 }
 
 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
@@ -705,6 +773,46 @@
 
   EmitEnvironment(instruction->GetEnvironment(), slow_path);
   stack_map_stream_.EndStackMapEntry();
+
+  HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
+  if (instruction->IsSuspendCheck() &&
+      (info != nullptr) &&
+      graph_->IsCompilingOsr() &&
+      (inlining_depth == 0)) {
+    DCHECK_EQ(info->GetSuspendCheck(), instruction);
+    // We duplicate the stack map as a marker that this stack map can be an OSR entry.
+    // Duplicating it avoids having the runtime recognize and skip an OSR stack map.
+    DCHECK(info->IsIrreducible());
+    stack_map_stream_.BeginStackMapEntry(
+        dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0);
+    EmitEnvironment(instruction->GetEnvironment(), slow_path);
+    stack_map_stream_.EndStackMapEntry();
+    if (kIsDebugBuild) {
+      HEnvironment* environment = instruction->GetEnvironment();
+      for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
+        HInstruction* in_environment = environment->GetInstructionAt(i);
+        if (in_environment != nullptr) {
+          DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
+          Location location = environment->GetLocationAt(i);
+          DCHECK(location.IsStackSlot() ||
+                 location.IsDoubleStackSlot() ||
+                 location.IsConstant() ||
+                 location.IsInvalid());
+          if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
+            DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
+          }
+        }
+      }
+    }
+  } else if (kIsDebugBuild) {
+    // Ensure stack maps are unique, by checking that the native pc in the stack map
+    // last emitted is different than the native pc of the stack map just emitted.
+    size_t number_of_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
+    if (number_of_stack_maps > 1) {
+      DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_offset,
+                stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_offset);
+    }
+  }
 }
 
 bool CodeGenerator::HasStackMapAtCurrentPc() {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 4f8f146..0a688cf 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -288,7 +288,7 @@
     slow_paths_.push_back(slow_path);
   }
 
-  void BuildStackMaps(MemoryRegion region);
+  void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item);
   size_t ComputeStackMapsSize();
 
   bool IsLeafMethod() const {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index c2d9edd..e434932 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -3750,6 +3750,7 @@
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
   switch (compare->InputAt(0)->GetType()) {
+    case Primitive::kPrimInt:
     case Primitive::kPrimLong: {
       locations->SetInAt(0, Location::RequiresRegister());
       locations->SetInAt(1, Location::RequiresRegister());
@@ -3779,6 +3780,13 @@
   Primitive::Type type = compare->InputAt(0)->GetType();
   Condition less_cond;
   switch (type) {
+    case Primitive::kPrimInt: {
+      __ LoadImmediate(out, 0);
+      __ cmp(left.AsRegister<Register>(),
+             ShifterOperand(right.AsRegister<Register>()));  // Signed compare.
+      less_cond = LT;
+      break;
+    }
     case Primitive::kPrimLong: {
       __ cmp(left.AsRegisterPairHigh<Register>(),
              ShifterOperand(right.AsRegisterPairHigh<Register>()));  // Signed compare.
@@ -3808,6 +3816,7 @@
       LOG(FATAL) << "Unexpected compare type " << type;
       UNREACHABLE();
   }
+
   __ b(&done, EQ);
   __ b(&less, less_cond);
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4179fab..cfdf6b1 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2408,6 +2408,7 @@
       new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
   Primitive::Type in_type = compare->InputAt(0)->GetType();
   switch (in_type) {
+    case Primitive::kPrimInt:
     case Primitive::kPrimLong: {
       locations->SetInAt(0, Location::RequiresRegister());
       locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare));
@@ -2436,14 +2437,14 @@
   //  1 if: left  > right
   // -1 if: left  < right
   switch (in_type) {
+    case Primitive::kPrimInt:
     case Primitive::kPrimLong: {
       Register result = OutputRegister(compare);
       Register left = InputRegisterAt(compare, 0);
       Operand right = InputOperandAt(compare, 1);
-
       __ Cmp(left, right);
-      __ Cset(result, ne);
-      __ Cneg(result, result, lt);
+      __ Cset(result, ne);          // result == +1 if NE or 0 otherwise
+      __ Cneg(result, result, lt);  // result == -1 if LT or unchanged otherwise
       break;
     }
     case Primitive::kPrimFloat:
@@ -4879,20 +4880,18 @@
     static_assert(
         sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
         "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
-    temp2 = temps.AcquireW();
     // /* HeapReference<Object> */ ref =
     //     *(obj + offset + index * sizeof(HeapReference<Object>))
-    MemOperand source = HeapOperand(obj);
+    const size_t shift_amount = Primitive::ComponentSizeShift(type);
     if (index.IsConstant()) {
-      uint32_t computed_offset =
-          offset + (Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type));
-      source = HeapOperand(obj, computed_offset);
+      uint32_t computed_offset = offset + (Int64ConstantFrom(index) << shift_amount);
+      Load(type, ref_reg, HeapOperand(obj, computed_offset));
     } else {
+      temp2 = temps.AcquireW();
       __ Add(temp2, obj, offset);
-      source = HeapOperand(temp2, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
+      Load(type, ref_reg, HeapOperand(temp2, XRegisterFrom(index), LSL, shift_amount));
+      temps.Release(temp2);
     }
-    Load(type, ref_reg, source);
-    temps.Release(temp2);
   } else {
     // /* HeapReference<Object> */ ref = *(obj + offset)
     MemOperand field = HeapOperand(obj, offset);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 961fe62..e9c0b6a 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -2123,6 +2123,7 @@
       new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
 
   switch (in_type) {
+    case Primitive::kPrimInt:
     case Primitive::kPrimLong:
       locations->SetInAt(0, Location::RequiresRegister());
       locations->SetInAt(1, Location::RequiresRegister());
@@ -2153,6 +2154,14 @@
   //  1 if: left  > right
   // -1 if: left  < right
   switch (in_type) {
+    case Primitive::kPrimInt: {
+      Register lhs = locations->InAt(0).AsRegister<Register>();
+      Register rhs = locations->InAt(1).AsRegister<Register>();
+      __ Slt(TMP, lhs, rhs);
+      __ Slt(res, rhs, lhs);
+      __ Subu(res, res, TMP);
+      break;
+    }
     case Primitive::kPrimLong: {
       MipsLabel done;
       Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 3e1563c..da98a89 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1763,6 +1763,7 @@
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare);
 
   switch (in_type) {
+    case Primitive::kPrimInt:
     case Primitive::kPrimLong:
       locations->SetInAt(0, Location::RequiresRegister());
       locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1)));
@@ -1791,16 +1792,25 @@
   //  1 if: left  > right
   // -1 if: left  < right
   switch (in_type) {
+    case Primitive::kPrimInt:
     case Primitive::kPrimLong: {
       GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
       Location rhs_location = locations->InAt(1);
       bool use_imm = rhs_location.IsConstant();
       GpuRegister rhs = ZERO;
       if (use_imm) {
-        int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
-        if (value != 0) {
-          rhs = AT;
-          __ LoadConst64(rhs, value);
+        if (in_type == Primitive::kPrimInt) {
+          int32_t value = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant()->AsConstant());
+          if (value != 0) {
+            rhs = AT;
+            __ LoadConst32(rhs, value);
+          }
+        } else {
+          int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
+          if (value != 0) {
+            rhs = AT;
+            __ LoadConst64(rhs, value);
+          }
         }
       } else {
         rhs = rhs_location.AsRegister<GpuRegister>();
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index da054ba..de62010 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1350,11 +1350,7 @@
     int32_t val_high = High32Bits(value);
     int32_t val_low = Low32Bits(value);
 
-    if (val_high == 0) {
-      __ testl(left_high, left_high);
-    } else {
-      __ cmpl(left_high, Immediate(val_high));
-    }
+    codegen_->Compare32BitValue(left_high, val_high);
     if (if_cond == kCondNE) {
       __ j(X86Condition(true_high_cond), true_label);
     } else if (if_cond == kCondEQ) {
@@ -1364,11 +1360,7 @@
       __ j(X86Condition(false_high_cond), false_label);
     }
     // Must be equal high, so compare the lows.
-    if (val_low == 0) {
-      __ testl(left_low, left_low);
-    } else {
-      __ cmpl(left_low, Immediate(val_low));
-    }
+    codegen_->Compare32BitValue(left_low, val_low);
   } else {
     Register right_high = right.AsRegisterPairHigh<Register>();
     Register right_low = right.AsRegisterPairLow<Register>();
@@ -1389,6 +1381,40 @@
   __ j(final_condition, true_label);
 }
 
+void InstructionCodeGeneratorX86::GenerateFPCompare(Location lhs,
+                                                    Location rhs,
+                                                    HInstruction* insn,
+                                                    bool is_double) {
+  HX86LoadFromConstantTable* const_area = insn->InputAt(1)->AsX86LoadFromConstantTable();
+  if (is_double) {
+    if (rhs.IsFpuRegister()) {
+      __ ucomisd(lhs.AsFpuRegister<XmmRegister>(), rhs.AsFpuRegister<XmmRegister>());
+    } else if (const_area != nullptr) {
+      DCHECK(const_area->IsEmittedAtUseSite());
+      __ ucomisd(lhs.AsFpuRegister<XmmRegister>(),
+                 codegen_->LiteralDoubleAddress(
+                   const_area->GetConstant()->AsDoubleConstant()->GetValue(),
+                   const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+    } else {
+      DCHECK(rhs.IsDoubleStackSlot());
+      __ ucomisd(lhs.AsFpuRegister<XmmRegister>(), Address(ESP, rhs.GetStackIndex()));
+    }
+  } else {
+    if (rhs.IsFpuRegister()) {
+      __ ucomiss(lhs.AsFpuRegister<XmmRegister>(), rhs.AsFpuRegister<XmmRegister>());
+    } else if (const_area != nullptr) {
+      DCHECK(const_area->IsEmittedAtUseSite());
+      __ ucomiss(lhs.AsFpuRegister<XmmRegister>(),
+                 codegen_->LiteralFloatAddress(
+                   const_area->GetConstant()->AsFloatConstant()->GetValue(),
+                   const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+    } else {
+      DCHECK(rhs.IsStackSlot());
+      __ ucomiss(lhs.AsFpuRegister<XmmRegister>(), Address(ESP, rhs.GetStackIndex()));
+    }
+  }
+}
+
 template<class LabelType>
 void InstructionCodeGeneratorX86::GenerateCompareTestAndBranch(HCondition* condition,
                                                                LabelType* true_target_in,
@@ -1409,11 +1435,11 @@
       GenerateLongComparesAndJumps(condition, true_target, false_target);
       break;
     case Primitive::kPrimFloat:
-      __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+      GenerateFPCompare(left, right, condition, false);
       GenerateFPJumps(condition, true_target, false_target);
       break;
     case Primitive::kPrimDouble:
-      __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+      GenerateFPCompare(left, right, condition, true);
       GenerateFPJumps(condition, true_target, false_target);
       break;
     default:
@@ -1513,11 +1539,7 @@
       __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>());
     } else if (rhs.IsConstant()) {
       int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
-      if (constant == 0) {
-        __ testl(lhs.AsRegister<Register>(), lhs.AsRegister<Register>());
-      } else {
-        __ cmpl(lhs.AsRegister<Register>(), Immediate(constant));
-      }
+      codegen_->Compare32BitValue(lhs.AsRegister<Register>(), constant);
     } else {
       __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
     }
@@ -1665,7 +1687,13 @@
     case Primitive::kPrimFloat:
     case Primitive::kPrimDouble: {
       locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
+      if (cond->InputAt(1)->IsX86LoadFromConstantTable()) {
+        DCHECK(cond->InputAt(1)->IsEmittedAtUseSite());
+      } else if (cond->InputAt(1)->IsConstant()) {
+        locations->SetInAt(1, Location::RequiresFpuRegister());
+      } else {
+        locations->SetInAt(1, Location::Any());
+      }
       if (!cond->IsEmittedAtUseSite()) {
         locations->SetOut(Location::RequiresRegister());
       }
@@ -1704,11 +1732,7 @@
         __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>());
       } else if (rhs.IsConstant()) {
         int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
-        if (constant == 0) {
-          __ testl(lhs.AsRegister<Register>(), lhs.AsRegister<Register>());
-        } else {
-          __ cmpl(lhs.AsRegister<Register>(), Immediate(constant));
-        }
+        codegen_->Compare32BitValue(lhs.AsRegister<Register>(), constant);
       } else {
         __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
       }
@@ -1719,11 +1743,11 @@
       GenerateLongComparesAndJumps(cond, &true_label, &false_label);
       break;
     case Primitive::kPrimFloat:
-      __ ucomiss(lhs.AsFpuRegister<XmmRegister>(), rhs.AsFpuRegister<XmmRegister>());
+      GenerateFPCompare(lhs, rhs, cond, false);
       GenerateFPJumps(cond, &true_label, &false_label);
       break;
     case Primitive::kPrimDouble:
-      __ ucomisd(lhs.AsFpuRegister<XmmRegister>(), rhs.AsFpuRegister<XmmRegister>());
+      GenerateFPCompare(lhs, rhs, cond, true);
       GenerateFPJumps(cond, &true_label, &false_label);
       break;
   }
@@ -2159,6 +2183,32 @@
   }
 }
 
+void LocationsBuilderX86::VisitX86FPNeg(HX86FPNeg* neg) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+  DCHECK(Primitive::IsFloatingPointType(neg->GetType()));
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::SameAsFirstInput());
+  locations->AddTemp(Location::RequiresFpuRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitX86FPNeg(HX86FPNeg* neg) {
+  LocationSummary* locations = neg->GetLocations();
+  Location out = locations->Out();
+  DCHECK(locations->InAt(0).Equals(out));
+
+  Register constant_area = locations->InAt(1).AsRegister<Register>();
+  XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+  if (neg->GetType() == Primitive::kPrimFloat) {
+    __ movss(mask, codegen_->LiteralInt32Address(INT32_C(0x80000000), constant_area));
+    __ xorps(out.AsFpuRegister<XmmRegister>(), mask);
+  } else {
+     __ movsd(mask, codegen_->LiteralInt64Address(INT64_C(0x8000000000000000), constant_area));
+     __ xorpd(out.AsFpuRegister<XmmRegister>(), mask);
+  }
+}
+
 void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
   Primitive::Type result_type = conversion->GetResultType();
   Primitive::Type input_type = conversion->GetInputType();
@@ -4077,6 +4127,7 @@
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
   switch (compare->InputAt(0)->GetType()) {
+    case Primitive::kPrimInt:
     case Primitive::kPrimLong: {
       locations->SetInAt(0, Location::RequiresRegister());
       locations->SetInAt(1, Location::Any());
@@ -4086,7 +4137,13 @@
     case Primitive::kPrimFloat:
     case Primitive::kPrimDouble: {
       locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
+      if (compare->InputAt(1)->IsX86LoadFromConstantTable()) {
+        DCHECK(compare->InputAt(1)->IsEmittedAtUseSite());
+      } else if (compare->InputAt(1)->IsConstant()) {
+        locations->SetInAt(1, Location::RequiresFpuRegister());
+      } else {
+        locations->SetInAt(1, Location::Any());
+      }
       locations->SetOut(Location::RequiresRegister());
       break;
     }
@@ -4102,7 +4159,21 @@
   Location right = locations->InAt(1);
 
   NearLabel less, greater, done;
+  Condition less_cond = kLess;
+
   switch (compare->InputAt(0)->GetType()) {
+    case Primitive::kPrimInt: {
+      Register left_reg = left.AsRegister<Register>();
+      if (right.IsConstant()) {
+        int32_t value = right.GetConstant()->AsIntConstant()->GetValue();
+        codegen_->Compare32BitValue(left_reg, value);
+      } else if (right.IsStackSlot()) {
+        __ cmpl(left_reg, Address(ESP, right.GetStackIndex()));
+      } else {
+        __ cmpl(left_reg, right.AsRegister<Register>());
+      }
+      break;
+    }
     case Primitive::kPrimLong: {
       Register left_low = left.AsRegisterPairLow<Register>();
       Register left_high = left.AsRegisterPairHigh<Register>();
@@ -4124,11 +4195,7 @@
         __ cmpl(left_high, Address(ESP, right.GetHighStackIndex(kX86WordSize)));
       } else {
         DCHECK(right_is_const) << right;
-        if (val_high == 0) {
-          __ testl(left_high, left_high);
-        } else {
-          __ cmpl(left_high, Immediate(val_high));
-        }
+        codegen_->Compare32BitValue(left_high, val_high);
       }
       __ j(kLess, &less);  // Signed compare.
       __ j(kGreater, &greater);  // Signed compare.
@@ -4138,30 +4205,30 @@
         __ cmpl(left_low, Address(ESP, right.GetStackIndex()));
       } else {
         DCHECK(right_is_const) << right;
-        if (val_low == 0) {
-          __ testl(left_low, left_low);
-        } else {
-          __ cmpl(left_low, Immediate(val_low));
-        }
+        codegen_->Compare32BitValue(left_low, val_low);
       }
+      less_cond = kBelow;  // for CF (unsigned).
       break;
     }
     case Primitive::kPrimFloat: {
-      __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+      GenerateFPCompare(left, right, compare, false);
       __ j(kUnordered, compare->IsGtBias() ? &greater : &less);
+      less_cond = kBelow;  // for CF (floats).
       break;
     }
     case Primitive::kPrimDouble: {
-      __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+      GenerateFPCompare(left, right, compare, true);
       __ j(kUnordered, compare->IsGtBias() ? &greater : &less);
+      less_cond = kBelow;  // for CF (floats).
       break;
     }
     default:
       LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType();
   }
+
   __ movl(out, Immediate(0));
   __ j(kEqual, &done);
-  __ j(kBelow, &less);  // kBelow is for CF (unsigned & floats).
+  __ j(less_cond, &less);
 
   __ Bind(&greater);
   __ movl(out, Immediate(1));
@@ -7121,6 +7188,22 @@
   return Address(reg, kDummy32BitOffset, fixup);
 }
 
+void CodeGeneratorX86::Load32BitValue(Register dest, int32_t value) {
+  if (value == 0) {
+    __ xorl(dest, dest);
+  } else {
+    __ movl(dest, Immediate(value));
+  }
+}
+
+void CodeGeneratorX86::Compare32BitValue(Register dest, int32_t value) {
+  if (value == 0) {
+    __ testl(dest, dest);
+  } else {
+    __ cmpl(dest, Immediate(value));
+  }
+}
+
 Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
                                            Register reg,
                                            Register value) {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 0aef478..45e8ffa 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -296,6 +296,8 @@
                                    HBasicBlock* switch_block,
                                    HBasicBlock* default_block);
 
+  void GenerateFPCompare(Location lhs, Location rhs, HInstruction* insn, bool is_double);
+
   X86Assembler* const assembler_;
   CodeGeneratorX86* const codegen_;
 
@@ -450,6 +452,12 @@
   Address LiteralInt32Address(int32_t v, Register reg);
   Address LiteralInt64Address(int64_t v, Register reg);
 
+  // Load a 32-bit value into a register in the most efficient manner.
+  void Load32BitValue(Register dest, int32_t value);
+
+  // Compare a register with a 32-bit value in the most efficient manner.
+  void Compare32BitValue(Register dest, int32_t value);
+
   Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
 
   void Finalize(CodeAllocator* allocator) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 6795488..99396cd 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1126,30 +1126,43 @@
     return;
   }
   if (destination.IsRegister()) {
+    CpuRegister dest = destination.AsRegister<CpuRegister>();
     if (source.IsRegister()) {
-      __ movq(destination.AsRegister<CpuRegister>(), source.AsRegister<CpuRegister>());
+      __ movq(dest, source.AsRegister<CpuRegister>());
     } else if (source.IsFpuRegister()) {
-      __ movd(destination.AsRegister<CpuRegister>(), source.AsFpuRegister<XmmRegister>());
+      __ movd(dest, source.AsFpuRegister<XmmRegister>());
     } else if (source.IsStackSlot()) {
-      __ movl(destination.AsRegister<CpuRegister>(),
-              Address(CpuRegister(RSP), source.GetStackIndex()));
+      __ movl(dest, Address(CpuRegister(RSP), source.GetStackIndex()));
+    } else if (source.IsConstant()) {
+      HConstant* constant = source.GetConstant();
+      if (constant->IsLongConstant()) {
+        Load64BitValue(dest, constant->AsLongConstant()->GetValue());
+      } else {
+        Load32BitValue(dest, GetInt32ValueOf(constant));
+      }
     } else {
       DCHECK(source.IsDoubleStackSlot());
-      __ movq(destination.AsRegister<CpuRegister>(),
-              Address(CpuRegister(RSP), source.GetStackIndex()));
+      __ movq(dest, Address(CpuRegister(RSP), source.GetStackIndex()));
     }
   } else if (destination.IsFpuRegister()) {
+    XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
     if (source.IsRegister()) {
-      __ movd(destination.AsFpuRegister<XmmRegister>(), source.AsRegister<CpuRegister>());
+      __ movd(dest, source.AsRegister<CpuRegister>());
     } else if (source.IsFpuRegister()) {
-      __ movaps(destination.AsFpuRegister<XmmRegister>(), source.AsFpuRegister<XmmRegister>());
+      __ movaps(dest, source.AsFpuRegister<XmmRegister>());
+    } else if (source.IsConstant()) {
+      HConstant* constant = source.GetConstant();
+      int64_t value = CodeGenerator::GetInt64ValueOf(constant);
+      if (constant->IsFloatConstant()) {
+        Load32BitValue(dest, static_cast<int32_t>(value));
+      } else {
+        Load64BitValue(dest, value);
+      }
     } else if (source.IsStackSlot()) {
-      __ movss(destination.AsFpuRegister<XmmRegister>(),
-              Address(CpuRegister(RSP), source.GetStackIndex()));
+      __ movss(dest, Address(CpuRegister(RSP), source.GetStackIndex()));
     } else {
       DCHECK(source.IsDoubleStackSlot());
-      __ movsd(destination.AsFpuRegister<XmmRegister>(),
-               Address(CpuRegister(RSP), source.GetStackIndex()));
+      __ movsd(dest, Address(CpuRegister(RSP), source.GetStackIndex()));
     }
   } else if (destination.IsStackSlot()) {
     if (source.IsRegister()) {
@@ -1345,42 +1358,44 @@
   __ j(X86_64FPCondition(cond->GetCondition()), true_label);
 }
 
-template<class LabelType>
-void InstructionCodeGeneratorX86_64::GenerateCompareTestAndBranch(HCondition* condition,
-                                                                  LabelType* true_target_in,
-                                                                  LabelType* false_target_in) {
-  // Generated branching requires both targets to be explicit. If either of the
-  // targets is nullptr (fallthrough) use and bind `fallthrough_target` instead.
-  LabelType fallthrough_target;
-  LabelType* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
-  LabelType* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
-
+void InstructionCodeGeneratorX86_64::GenerateCompareTest(HCondition* condition) {
   LocationSummary* locations = condition->GetLocations();
+
   Location left = locations->InAt(0);
   Location right = locations->InAt(1);
-
   Primitive::Type type = condition->InputAt(0)->GetType();
   switch (type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimInt:
+    case Primitive::kPrimNot: {
+      CpuRegister left_reg = left.AsRegister<CpuRegister>();
+      if (right.IsConstant()) {
+        int32_t value = CodeGenerator::GetInt32ValueOf(right.GetConstant());
+        if (value == 0) {
+          __ testl(left_reg, left_reg);
+        } else {
+          __ cmpl(left_reg, Immediate(value));
+        }
+      } else if (right.IsStackSlot()) {
+        __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
+      } else {
+        __ cmpl(left_reg, right.AsRegister<CpuRegister>());
+      }
+      break;
+    }
     case Primitive::kPrimLong: {
       CpuRegister left_reg = left.AsRegister<CpuRegister>();
       if (right.IsConstant()) {
         int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
-        if (IsInt<32>(value)) {
-          if (value == 0) {
-            __ testq(left_reg, left_reg);
-          } else {
-            __ cmpq(left_reg, Immediate(static_cast<int32_t>(value)));
-          }
-        } else {
-          // Value won't fit in a 32-bit integer.
-          __ cmpq(left_reg, codegen_->LiteralInt64Address(value));
-        }
+        codegen_->Compare64BitValue(left_reg, value);
       } else if (right.IsDoubleStackSlot()) {
         __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
       } else {
         __ cmpq(left_reg, right.AsRegister<CpuRegister>());
       }
-      __ j(X86_64IntegerCondition(condition->GetCondition()), true_target);
       break;
     }
     case Primitive::kPrimFloat: {
@@ -1395,7 +1410,6 @@
         __ ucomiss(left.AsFpuRegister<XmmRegister>(),
                    Address(CpuRegister(RSP), right.GetStackIndex()));
       }
-      GenerateFPJumps(condition, true_target, false_target);
       break;
     }
     case Primitive::kPrimDouble: {
@@ -1410,6 +1424,38 @@
         __ ucomisd(left.AsFpuRegister<XmmRegister>(),
                    Address(CpuRegister(RSP), right.GetStackIndex()));
       }
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected condition type " << type;
+  }
+}
+
+template<class LabelType>
+void InstructionCodeGeneratorX86_64::GenerateCompareTestAndBranch(HCondition* condition,
+                                                                  LabelType* true_target_in,
+                                                                  LabelType* false_target_in) {
+  // Generated branching requires both targets to be explicit. If either of the
+  // targets is nullptr (fallthrough) use and bind `fallthrough_target` instead.
+  LabelType fallthrough_target;
+  LabelType* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
+  LabelType* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
+
+  // Generate the comparison to set the CC.
+  GenerateCompareTest(condition);
+
+  // Now generate the correct jump(s).
+  Primitive::Type type = condition->InputAt(0)->GetType();
+  switch (type) {
+    case Primitive::kPrimLong: {
+      __ j(X86_64IntegerCondition(condition->GetCondition()), true_target);
+      break;
+    }
+    case Primitive::kPrimFloat: {
+      GenerateFPJumps(condition, true_target, false_target);
+      break;
+    }
+    case Primitive::kPrimDouble: {
       GenerateFPJumps(condition, true_target, false_target);
       break;
     }
@@ -1508,11 +1554,7 @@
       __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
     } else if (rhs.IsConstant()) {
       int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
-      if (constant == 0) {
-        __ testl(lhs.AsRegister<CpuRegister>(), lhs.AsRegister<CpuRegister>());
-      } else {
-        __ cmpl(lhs.AsRegister<CpuRegister>(), Immediate(constant));
-      }
+      codegen_->Compare32BitValue(lhs.AsRegister<CpuRegister>(), constant);
     } else {
       __ cmpl(lhs.AsRegister<CpuRegister>(),
               Address(CpuRegister(RSP), rhs.GetStackIndex()));
@@ -1564,14 +1606,37 @@
                                /* false_target */ nullptr);
 }
 
+static bool SelectCanUseCMOV(HSelect* select) {
+  // There are no conditional move instructions for XMMs.
+  if (Primitive::IsFloatingPointType(select->GetType())) {
+    return false;
+  }
+
+  // A FP condition doesn't generate the single CC that we need.
+  HInstruction* condition = select->GetCondition();
+  if (condition->IsCondition() &&
+      Primitive::IsFloatingPointType(condition->InputAt(0)->GetType())) {
+    return false;
+  }
+
+  // We can generate a CMOV for this Select.
+  return true;
+}
+
 void LocationsBuilderX86_64::VisitSelect(HSelect* select) {
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
   if (Primitive::IsFloatingPointType(select->GetType())) {
     locations->SetInAt(0, Location::RequiresFpuRegister());
-    locations->SetInAt(1, Location::RequiresFpuRegister());
+    // Since we can't use CMOV, there is no need to force 'true' into a register.
+    locations->SetInAt(1, Location::Any());
   } else {
     locations->SetInAt(0, Location::RequiresRegister());
-    locations->SetInAt(1, Location::RequiresRegister());
+    if (SelectCanUseCMOV(select)) {
+      locations->SetInAt(1, Location::RequiresRegister());
+    } else {
+      // Since we can't use CMOV, there is no need to force 'true' into a register.
+      locations->SetInAt(1, Location::Any());
+    }
   }
   if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) {
     locations->SetInAt(2, Location::RequiresRegister());
@@ -1581,13 +1646,52 @@
 
 void InstructionCodeGeneratorX86_64::VisitSelect(HSelect* select) {
   LocationSummary* locations = select->GetLocations();
-  NearLabel false_target;
-  GenerateTestAndBranch<NearLabel>(select,
-                                   /* condition_input_index */ 2,
-                                   /* true_target */ nullptr,
-                                   &false_target);
-  codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
-  __ Bind(&false_target);
+  if (SelectCanUseCMOV(select)) {
+    // If both the condition and the source types are integer, we can generate
+    // a CMOV to implement Select.
+    CpuRegister value_false = locations->InAt(0).AsRegister<CpuRegister>();
+    CpuRegister value_true = locations->InAt(1).AsRegister<CpuRegister>();
+    DCHECK(locations->InAt(0).Equals(locations->Out()));
+
+    HInstruction* select_condition = select->GetCondition();
+    Condition cond = kNotEqual;
+
+    // Figure out how to test the 'condition'.
+    if (select_condition->IsCondition()) {
+      HCondition* condition = select_condition->AsCondition();
+      if (!condition->IsEmittedAtUseSite()) {
+        // This was a previously materialized condition.
+        // Can we use the existing condition code?
+        if (AreEflagsSetFrom(condition, select)) {
+          // Materialization was the previous instruction.  Condition codes are right.
+          cond = X86_64IntegerCondition(condition->GetCondition());
+        } else {
+          // No, we have to recreate the condition code.
+          CpuRegister cond_reg = locations->InAt(2).AsRegister<CpuRegister>();
+          __ testl(cond_reg, cond_reg);
+        }
+      } else {
+        GenerateCompareTest(condition);
+        cond = X86_64IntegerCondition(condition->GetCondition());
+      }
+    } else {
+      // Must be a boolean condition, which needs to be compared to 0.
+      CpuRegister cond_reg = locations->InAt(2).AsRegister<CpuRegister>();
+      __ testl(cond_reg, cond_reg);
+    }
+
+    // If the condition is true, overwrite the output, which already contains false.
+    // Generate the correct sized CMOV.
+    __ cmov(cond, value_false, value_true, select->GetType() == Primitive::kPrimLong);
+  } else {
+    NearLabel false_target;
+    GenerateTestAndBranch<NearLabel>(select,
+                                     /* condition_input_index */ 2,
+                                     /* true_target */ nullptr,
+                                     &false_target);
+    codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
+    __ Bind(&false_target);
+  }
 }
 
 void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
@@ -1691,11 +1795,7 @@
         __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
       } else if (rhs.IsConstant()) {
         int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
-        if (constant == 0) {
-          __ testl(lhs.AsRegister<CpuRegister>(), lhs.AsRegister<CpuRegister>());
-        } else {
-          __ cmpl(lhs.AsRegister<CpuRegister>(), Immediate(constant));
-        }
+        codegen_->Compare32BitValue(lhs.AsRegister<CpuRegister>(), constant);
       } else {
         __ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
       }
@@ -1709,16 +1809,7 @@
         __ cmpq(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
       } else if (rhs.IsConstant()) {
         int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue();
-        if (IsInt<32>(value)) {
-          if (value == 0) {
-            __ testq(lhs.AsRegister<CpuRegister>(), lhs.AsRegister<CpuRegister>());
-          } else {
-            __ cmpq(lhs.AsRegister<CpuRegister>(), Immediate(static_cast<int32_t>(value)));
-          }
-        } else {
-          // Value won't fit in an int.
-          __ cmpq(lhs.AsRegister<CpuRegister>(), codegen_->LiteralInt64Address(value));
-        }
+        codegen_->Compare64BitValue(lhs.AsRegister<CpuRegister>(), value);
       } else {
         __ cmpq(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
       }
@@ -1850,6 +1941,7 @@
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
   switch (compare->InputAt(0)->GetType()) {
+    case Primitive::kPrimInt:
     case Primitive::kPrimLong: {
       locations->SetInAt(0, Location::RequiresRegister());
       locations->SetInAt(1, Location::Any());
@@ -1876,21 +1968,26 @@
 
   NearLabel less, greater, done;
   Primitive::Type type = compare->InputAt(0)->GetType();
+  Condition less_cond = kLess;
+
   switch (type) {
+    case Primitive::kPrimInt: {
+      CpuRegister left_reg = left.AsRegister<CpuRegister>();
+      if (right.IsConstant()) {
+        int32_t value = right.GetConstant()->AsIntConstant()->GetValue();
+        codegen_->Compare32BitValue(left_reg, value);
+      } else if (right.IsStackSlot()) {
+        __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
+      } else {
+        __ cmpl(left_reg, right.AsRegister<CpuRegister>());
+      }
+      break;
+    }
     case Primitive::kPrimLong: {
       CpuRegister left_reg = left.AsRegister<CpuRegister>();
       if (right.IsConstant()) {
         int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
-        if (IsInt<32>(value)) {
-          if (value == 0) {
-            __ testq(left_reg, left_reg);
-          } else {
-            __ cmpq(left_reg, Immediate(static_cast<int32_t>(value)));
-          }
-        } else {
-          // Value won't fit in an int.
-          __ cmpq(left_reg, codegen_->LiteralInt64Address(value));
-        }
+        codegen_->Compare64BitValue(left_reg, value);
       } else if (right.IsDoubleStackSlot()) {
         __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
       } else {
@@ -1909,6 +2006,7 @@
         __ ucomiss(left_reg, right.AsFpuRegister<XmmRegister>());
       }
       __ j(kUnordered, compare->IsGtBias() ? &greater : &less);
+      less_cond = kBelow;  //  ucomis{s,d} sets CF
       break;
     }
     case Primitive::kPrimDouble: {
@@ -1922,14 +2020,16 @@
         __ ucomisd(left_reg, right.AsFpuRegister<XmmRegister>());
       }
       __ j(kUnordered, compare->IsGtBias() ? &greater : &less);
+      less_cond = kBelow;  //  ucomis{s,d} sets CF
       break;
     }
     default:
       LOG(FATAL) << "Unexpected compare type " << type;
   }
+
   __ movl(out, Immediate(0));
   __ j(kEqual, &done);
-  __ j(type == Primitive::kPrimLong ? kLess : kBelow, &less);  //  ucomis{s,d} sets CF (kBelow)
+  __ j(less_cond, &less);
 
   __ Bind(&greater);
   __ movl(out, Immediate(1));
@@ -2750,11 +2850,7 @@
           } else if (in.IsConstant()) {
             int32_t v = in.GetConstant()->AsIntConstant()->GetValue();
             XmmRegister dest = out.AsFpuRegister<XmmRegister>();
-            if (v == 0) {
-              __ xorps(dest, dest);
-            } else {
-              __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v)));
-            }
+            codegen_->Load32BitValue(dest, static_cast<float>(v));
           } else {
             __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(),
                         Address(CpuRegister(RSP), in.GetStackIndex()), false);
@@ -2768,11 +2864,7 @@
           } else if (in.IsConstant()) {
             int64_t v = in.GetConstant()->AsLongConstant()->GetValue();
             XmmRegister dest = out.AsFpuRegister<XmmRegister>();
-            if (v == 0) {
-              __ xorps(dest, dest);
-            } else {
-              __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v)));
-            }
+            codegen_->Load64BitValue(dest, static_cast<double>(v));
           } else {
             __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(),
                         Address(CpuRegister(RSP), in.GetStackIndex()), true);
@@ -2786,11 +2878,7 @@
           } else if (in.IsConstant()) {
             double v = in.GetConstant()->AsDoubleConstant()->GetValue();
             XmmRegister dest = out.AsFpuRegister<XmmRegister>();
-            if (bit_cast<int64_t, double>(v) == 0) {
-              __ xorps(dest, dest);
-            } else {
-              __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v)));
-            }
+            codegen_->Load32BitValue(dest, static_cast<float>(v));
           } else {
             __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(),
                         Address(CpuRegister(RSP), in.GetStackIndex()));
@@ -2817,11 +2905,7 @@
           } else if (in.IsConstant()) {
             int32_t v = in.GetConstant()->AsIntConstant()->GetValue();
             XmmRegister dest = out.AsFpuRegister<XmmRegister>();
-            if (v == 0) {
-              __ xorpd(dest, dest);
-            } else {
-              __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v)));
-            }
+            codegen_->Load64BitValue(dest, static_cast<double>(v));
           } else {
             __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(),
                         Address(CpuRegister(RSP), in.GetStackIndex()), false);
@@ -2835,11 +2919,7 @@
           } else if (in.IsConstant()) {
             int64_t v = in.GetConstant()->AsLongConstant()->GetValue();
             XmmRegister dest = out.AsFpuRegister<XmmRegister>();
-            if (v == 0) {
-              __ xorpd(dest, dest);
-            } else {
-              __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v)));
-            }
+            codegen_->Load64BitValue(dest, static_cast<double>(v));
           } else {
             __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(),
                         Address(CpuRegister(RSP), in.GetStackIndex()), true);
@@ -2853,11 +2933,7 @@
           } else if (in.IsConstant()) {
             float v = in.GetConstant()->AsFloatConstant()->GetValue();
             XmmRegister dest = out.AsFpuRegister<XmmRegister>();
-            if (bit_cast<int32_t, float>(v) == 0) {
-              __ xorpd(dest, dest);
-            } else {
-              __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v)));
-            }
+            codegen_->Load64BitValue(dest, static_cast<double>(v));
           } else {
             __ cvtss2sd(out.AsFpuRegister<XmmRegister>(),
                         Address(CpuRegister(RSP), in.GetStackIndex()));
@@ -5196,18 +5272,12 @@
       }
     } else if (constant->IsFloatConstant()) {
       float fp_value = constant->AsFloatConstant()->GetValue();
-      int32_t value = bit_cast<int32_t, float>(fp_value);
       if (destination.IsFpuRegister()) {
         XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
-        if (value == 0) {
-          // easy FP 0.0.
-          __ xorps(dest, dest);
-        } else {
-          __ movss(dest, codegen_->LiteralFloatAddress(fp_value));
-        }
+        codegen_->Load32BitValue(dest, fp_value);
       } else {
         DCHECK(destination.IsStackSlot()) << destination;
-        Immediate imm(value);
+        Immediate imm(bit_cast<int32_t, float>(fp_value));
         __ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
       }
     } else {
@@ -5216,11 +5286,7 @@
       int64_t value = bit_cast<int64_t, double>(fp_value);
       if (destination.IsFpuRegister()) {
         XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
-        if (value == 0) {
-          __ xorpd(dest, dest);
-        } else {
-          __ movsd(dest, codegen_->LiteralDoubleAddress(fp_value));
-        }
+        codegen_->Load64BitValue(dest, fp_value);
       } else {
         DCHECK(destination.IsDoubleStackSlot()) << destination;
         codegen_->Store64BitValueToStack(destination, value);
@@ -6467,6 +6533,51 @@
   }
 }
 
+void CodeGeneratorX86_64::Load32BitValue(XmmRegister dest, int32_t value) {
+  if (value == 0) {
+    __ xorps(dest, dest);
+  } else {
+    __ movss(dest, LiteralInt32Address(value));
+  }
+}
+
+void CodeGeneratorX86_64::Load64BitValue(XmmRegister dest, int64_t value) {
+  if (value == 0) {
+    __ xorpd(dest, dest);
+  } else {
+    __ movsd(dest, LiteralInt64Address(value));
+  }
+}
+
+void CodeGeneratorX86_64::Load32BitValue(XmmRegister dest, float value) {
+  Load32BitValue(dest, bit_cast<int32_t, float>(value));
+}
+
+void CodeGeneratorX86_64::Load64BitValue(XmmRegister dest, double value) {
+  Load64BitValue(dest, bit_cast<int64_t, double>(value));
+}
+
+void CodeGeneratorX86_64::Compare32BitValue(CpuRegister dest, int32_t value) {
+  if (value == 0) {
+    __ testl(dest, dest);
+  } else {
+    __ cmpl(dest, Immediate(value));
+  }
+}
+
+void CodeGeneratorX86_64::Compare64BitValue(CpuRegister dest, int64_t value) {
+  if (IsInt<32>(value)) {
+    if (value == 0) {
+      __ testq(dest, dest);
+    } else {
+      __ cmpq(dest, Immediate(static_cast<int32_t>(value)));
+    }
+  } else {
+    // Value won't fit in an int.
+    __ cmpq(dest, LiteralInt64Address(value));
+  }
+}
+
 void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) {
   DCHECK(dest.IsDoubleStackSlot());
   if (IsInt<32>(value)) {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 318087e..72dddfd 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -264,6 +264,7 @@
   void GenerateExplicitNullCheck(HNullCheck* instruction);
   void PushOntoFPStack(Location source, uint32_t temp_offset,
                        uint32_t stack_adjustment, bool is_float);
+  void GenerateCompareTest(HCondition* condition);
   template<class LabelType>
   void GenerateTestAndBranch(HInstruction* instruction,
                              size_t condition_input_index,
@@ -478,9 +479,17 @@
   Address LiteralInt32Address(int32_t v);
   Address LiteralInt64Address(int64_t v);
 
-  // Load a 32/64 bit value into a register in the most efficient manner.
+  // Load a 32/64-bit value into a register in the most efficient manner.
   void Load32BitValue(CpuRegister dest, int32_t value);
   void Load64BitValue(CpuRegister dest, int64_t value);
+  void Load32BitValue(XmmRegister dest, int32_t value);
+  void Load64BitValue(XmmRegister dest, int64_t value);
+  void Load32BitValue(XmmRegister dest, float value);
+  void Load64BitValue(XmmRegister dest, double value);
+
+  // Compare a register with a 32/64-bit value in the most efficient manner.
+  void Compare32BitValue(CpuRegister dest, int32_t value);
+  void Compare64BitValue(CpuRegister dest, int64_t value);
 
   Address LiteralCaseTable(HPackedSwitch* switch_instr);
 
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 9b91b53..fa6aae8 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -612,8 +612,9 @@
         // TODO: Needs null check.
         return false;
       }
+      Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
       HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, data.object_arg);
-      HInstanceFieldGet* iget = CreateInstanceFieldGet(resolved_method, data.field_idx, obj);
+      HInstanceFieldGet* iget = CreateInstanceFieldGet(dex_cache, data.field_idx, obj);
       DCHECK_EQ(iget->GetFieldOffset().Uint32Value(), data.field_offset);
       DCHECK_EQ(iget->IsVolatile() ? 1u : 0u, data.is_volatile);
       invoke_instruction->GetBlock()->InsertInstructionBefore(iget, invoke_instruction);
@@ -626,9 +627,10 @@
         // TODO: Needs null check.
         return false;
       }
+      Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
       HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, data.object_arg);
       HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, data.src_arg);
-      HInstanceFieldSet* iput = CreateInstanceFieldSet(resolved_method, data.field_idx, obj, value);
+      HInstanceFieldSet* iput = CreateInstanceFieldSet(dex_cache, data.field_idx, obj, value);
       DCHECK_EQ(iput->GetFieldOffset().Uint32Value(), data.field_offset);
       DCHECK_EQ(iput->IsVolatile() ? 1u : 0u, data.is_volatile);
       invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
@@ -638,6 +640,59 @@
       }
       break;
     }
+    case kInlineOpConstructor: {
+      const InlineConstructorData& data = inline_method.d.constructor_data;
+      // Get the indexes to arrays for easier processing.
+      uint16_t iput_field_indexes[] = {
+          data.iput0_field_index, data.iput1_field_index, data.iput2_field_index
+      };
+      uint16_t iput_args[] = { data.iput0_arg, data.iput1_arg, data.iput2_arg };
+      static_assert(arraysize(iput_args) == arraysize(iput_field_indexes), "Size mismatch");
+      // Count valid field indexes.
+      size_t number_of_iputs = 0u;
+      while (number_of_iputs != arraysize(iput_field_indexes) &&
+          iput_field_indexes[number_of_iputs] != DexFile::kDexNoIndex16) {
+        // Check that there are no duplicate valid field indexes.
+        DCHECK_EQ(0, std::count(iput_field_indexes + number_of_iputs + 1,
+                                iput_field_indexes + arraysize(iput_field_indexes),
+                                iput_field_indexes[number_of_iputs]));
+        ++number_of_iputs;
+      }
+      // Check that there are no valid field indexes in the rest of the array.
+      DCHECK_EQ(0, std::count_if(iput_field_indexes + number_of_iputs,
+                                 iput_field_indexes + arraysize(iput_field_indexes),
+                                 [](uint16_t index) { return index != DexFile::kDexNoIndex16; }));
+
+      // Create HInstanceFieldSet for each IPUT that stores non-zero data.
+      Handle<mirror::DexCache> dex_cache;
+      HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, /* this */ 0u);
+      bool needs_constructor_barrier = false;
+      for (size_t i = 0; i != number_of_iputs; ++i) {
+        HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, iput_args[i]);
+        if (!value->IsConstant() ||
+            (!value->AsConstant()->IsZero() && !value->IsNullConstant())) {
+          if (dex_cache.GetReference() == nullptr) {
+            dex_cache = handles_->NewHandle(resolved_method->GetDexCache());
+          }
+          uint16_t field_index = iput_field_indexes[i];
+          HInstanceFieldSet* iput = CreateInstanceFieldSet(dex_cache, field_index, obj, value);
+          invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
+
+          // Check whether the field is final. If it is, we need to add a barrier.
+          size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+          ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
+          DCHECK(resolved_field != nullptr);
+          if (resolved_field->IsFinal()) {
+            needs_constructor_barrier = true;
+          }
+        }
+      }
+      if (needs_constructor_barrier) {
+        HMemoryBarrier* barrier = new (graph_->GetArena()) HMemoryBarrier(kStoreStore, kNoDexPc);
+        invoke_instruction->GetBlock()->InsertInstructionBefore(barrier, invoke_instruction);
+      }
+      break;
+    }
     default:
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
@@ -652,11 +707,10 @@
   return true;
 }
 
-HInstanceFieldGet* HInliner::CreateInstanceFieldGet(ArtMethod* resolved_method,
+HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
                                                     uint32_t field_index,
                                                     HInstruction* obj)
     SHARED_REQUIRES(Locks::mutator_lock_) {
-  Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
   size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
   ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
   DCHECK(resolved_field != nullptr);
@@ -667,7 +721,7 @@
       resolved_field->IsVolatile(),
       field_index,
       resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
-      *resolved_method->GetDexFile(),
+      *dex_cache->GetDexFile(),
       dex_cache,
       // Read barrier generates a runtime call in slow path and we need a valid
       // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
@@ -679,12 +733,11 @@
   return iget;
 }
 
-HInstanceFieldSet* HInliner::CreateInstanceFieldSet(ArtMethod* resolved_method,
+HInstanceFieldSet* HInliner::CreateInstanceFieldSet(Handle<mirror::DexCache> dex_cache,
                                                     uint32_t field_index,
                                                     HInstruction* obj,
                                                     HInstruction* value)
     SHARED_REQUIRES(Locks::mutator_lock_) {
-  Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
   size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
   ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
   DCHECK(resolved_field != nullptr);
@@ -696,7 +749,7 @@
       resolved_field->IsVolatile(),
       field_index,
       resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
-      *resolved_method->GetDexFile(),
+      *dex_cache->GetDexFile(),
       dex_cache,
       // Read barrier generates a runtime call in slow path and we need a valid
       // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
@@ -758,6 +811,7 @@
       compiler_driver_->GetInstructionSet(),
       invoke_type,
       graph_->IsDebuggable(),
+      /* osr */ false,
       graph_->GetCurrentInstructionId());
   callee_graph->SetArtMethod(resolved_method);
 
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 0127d55..7d343c6 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -70,11 +70,11 @@
     SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Create a new HInstanceFieldGet.
-  HInstanceFieldGet* CreateInstanceFieldGet(ArtMethod* resolved_method,
+  HInstanceFieldGet* CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
                                             uint32_t field_index,
                                             HInstruction* obj);
   // Create a new HInstanceFieldSet.
-  HInstanceFieldSet* CreateInstanceFieldSet(ArtMethod* resolved_method,
+  HInstanceFieldSet* CreateInstanceFieldSet(Handle<mirror::DexCache> dex_cache,
                                             uint32_t field_index,
                                             HInstruction* obj,
                                             HInstruction* value);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index c1e3863..0029cc3 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -91,6 +91,7 @@
   void SimplifyRotate(HInvoke* invoke, bool is_left);
   void SimplifySystemArrayCopy(HInvoke* invoke);
   void SimplifyStringEquals(HInvoke* invoke);
+  void SimplifyCompare(HInvoke* invoke, bool has_zero_op);
 
   OptimizingCompilerStats* stats_;
   bool simplification_occurred_ = false;
@@ -176,8 +177,8 @@
 
   // We can apply De Morgan's laws if both inputs are Not's and are only used
   // by `op`.
-  if (left->IsNot() &&
-      right->IsNot() &&
+  if (((left->IsNot() && right->IsNot()) ||
+       (left->IsBooleanNot() && right->IsBooleanNot())) &&
       left->HasOnlyOneNonEnvironmentUse() &&
       right->HasOnlyOneNonEnvironmentUse()) {
     // Replace code looking like
@@ -187,8 +188,8 @@
     // with
     //    OR or, a, b         (respectively AND)
     //    NOT dest, or
-    HInstruction* src_left = left->AsNot()->GetInput();
-    HInstruction* src_right = right->AsNot()->GetInput();
+    HInstruction* src_left = left->InputAt(0);
+    HInstruction* src_right = right->InputAt(0);
     uint32_t dex_pc = op->GetDexPc();
 
     // Remove the negations on the inputs.
@@ -204,7 +205,12 @@
     } else {
       hbin = new (GetGraph()->GetArena()) HAnd(type, src_left, src_right, dex_pc);
     }
-    HNot* hnot = new (GetGraph()->GetArena()) HNot(type, hbin, dex_pc);
+    HInstruction* hnot;
+    if (left->IsBooleanNot()) {
+      hnot = new (GetGraph()->GetArena()) HBooleanNot(hbin, dex_pc);
+    } else {
+      hnot = new (GetGraph()->GetArena()) HNot(type, hbin, dex_pc);
+    }
 
     op->GetBlock()->InsertInstructionBefore(hbin, op);
     op->GetBlock()->ReplaceAndRemoveInstructionWith(op, hnot);
@@ -1308,8 +1314,8 @@
 
   HInstruction* left = instruction->GetLeft();
   HInstruction* right = instruction->GetRight();
-  if (left->IsNot() &&
-      right->IsNot() &&
+  if (((left->IsNot() && right->IsNot()) ||
+       (left->IsBooleanNot() && right->IsBooleanNot())) &&
       left->HasOnlyOneNonEnvironmentUse() &&
       right->HasOnlyOneNonEnvironmentUse()) {
     // Replace code looking like
@@ -1318,8 +1324,8 @@
     //    XOR dst, nota, notb
     // with
     //    XOR dst, a, b
-    instruction->ReplaceInput(left->AsNot()->GetInput(), 0);
-    instruction->ReplaceInput(right->AsNot()->GetInput(), 1);
+    instruction->ReplaceInput(left->InputAt(0), 0);
+    instruction->ReplaceInput(right->InputAt(0), 1);
     left->GetBlock()->RemoveInstruction(left);
     right->GetBlock()->RemoveInstruction(right);
     RecordSimplification();
@@ -1441,6 +1447,24 @@
   }
 }
 
+void InstructionSimplifierVisitor::SimplifyCompare(HInvoke* invoke, bool is_signum) {
+  DCHECK(invoke->IsInvokeStaticOrDirect());
+  uint32_t dex_pc = invoke->GetDexPc();
+  HInstruction* left = invoke->InputAt(0);
+  HInstruction* right;
+  Primitive::Type type = left->GetType();
+  if (!is_signum) {
+    right = invoke->InputAt(1);
+  } else if (type == Primitive::kPrimLong) {
+    right = GetGraph()->GetLongConstant(0);
+  } else {
+    right = GetGraph()->GetIntConstant(0);
+  }
+  HCompare* compare = new (GetGraph()->GetArena())
+      HCompare(type, left, right, ComparisonBias::kNoBias, dex_pc);
+  invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, compare);
+}
+
 void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
   if (instruction->GetIntrinsic() == Intrinsics::kStringEquals) {
     SimplifyStringEquals(instruction);
@@ -1452,6 +1476,12 @@
   } else if (instruction->GetIntrinsic() == Intrinsics::kIntegerRotateLeft ||
              instruction->GetIntrinsic() == Intrinsics::kLongRotateLeft) {
     SimplifyRotate(instruction, true);
+  } else if (instruction->GetIntrinsic() == Intrinsics::kIntegerCompare ||
+             instruction->GetIntrinsic() == Intrinsics::kLongCompare) {
+    SimplifyCompare(instruction, /* is_signum */ false);
+  } else if (instruction->GetIntrinsic() == Intrinsics::kIntegerSignum ||
+             instruction->GetIntrinsic() == Intrinsics::kLongSignum) {
+    SimplifyCompare(instruction, /* is_signum */ true);
   }
 }
 
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index a6be324..db39bc8 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -481,6 +481,7 @@
     case kInlineOpNonWideConst:
     case kInlineOpIGet:
     case kInlineOpIPut:
+    case kInlineOpConstructor:
       return Intrinsics::kNone;
 
     // String init cases, not intrinsics.
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index e8912b3..00a158b 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1580,6 +1580,251 @@
   __ Bind(slow_path->GetExitLabel());
 }
 
+static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  // If the graph is debuggable, all callee-saved floating-point registers are blocked by
+  // the code generator. Furthermore, the register allocator creates fixed live intervals
+  // for all caller-saved registers because we are doing a function call. As a result, if
+  // the input and output locations are unallocated, the register allocator runs out of
+  // registers and fails; however, a debuggable graph is not the common case.
+  if (invoke->GetBlock()->GetGraph()->IsDebuggable()) {
+    return;
+  }
+
+  DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
+  DCHECK_EQ(invoke->InputAt(0)->GetType(), Primitive::kPrimDouble);
+  DCHECK_EQ(invoke->GetType(), Primitive::kPrimDouble);
+
+  LocationSummary* const locations = new (arena) LocationSummary(invoke,
+                                                                 LocationSummary::kCall,
+                                                                 kIntrinsified);
+  const InvokeRuntimeCallingConvention calling_convention;
+
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetOut(Location::RequiresFpuRegister());
+  // Native code uses the soft float ABI.
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+}
+
+static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  // If the graph is debuggable, all callee-saved floating-point registers are blocked by
+  // the code generator. Furthermore, the register allocator creates fixed live intervals
+  // for all caller-saved registers because we are doing a function call. As a result, if
+  // the input and output locations are unallocated, the register allocator runs out of
+  // registers and fails; however, a debuggable graph is not the common case.
+  if (invoke->GetBlock()->GetGraph()->IsDebuggable()) {
+    return;
+  }
+
+  DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
+  DCHECK_EQ(invoke->InputAt(0)->GetType(), Primitive::kPrimDouble);
+  DCHECK_EQ(invoke->InputAt(1)->GetType(), Primitive::kPrimDouble);
+  DCHECK_EQ(invoke->GetType(), Primitive::kPrimDouble);
+
+  LocationSummary* const locations = new (arena) LocationSummary(invoke,
+                                                                 LocationSummary::kCall,
+                                                                 kIntrinsified);
+  const InvokeRuntimeCallingConvention calling_convention;
+
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetInAt(1, Location::RequiresFpuRegister());
+  locations->SetOut(Location::RequiresFpuRegister());
+  // Native code uses the soft float ABI.
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+}
+
+static void GenFPToFPCall(HInvoke* invoke,
+                          ArmAssembler* assembler,
+                          CodeGeneratorARM* codegen,
+                          QuickEntrypointEnum entry) {
+  LocationSummary* const locations = invoke->GetLocations();
+  const InvokeRuntimeCallingConvention calling_convention;
+
+  DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
+  DCHECK(locations->WillCall() && locations->Intrinsified());
+  DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(0)));
+  DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(1)));
+
+  __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmWordSize>(entry).Int32Value());
+  // Native code uses the soft float ABI.
+  __ vmovrrd(calling_convention.GetRegisterAt(0),
+             calling_convention.GetRegisterAt(1),
+             FromLowSToD(locations->InAt(0).AsFpuRegisterPairLow<SRegister>()));
+  __ blx(LR);
+  codegen->RecordPcInfo(invoke, invoke->GetDexPc());
+  __ vmovdrr(FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>()),
+             calling_convention.GetRegisterAt(0),
+             calling_convention.GetRegisterAt(1));
+}
+
+static void GenFPFPToFPCall(HInvoke* invoke,
+                          ArmAssembler* assembler,
+                          CodeGeneratorARM* codegen,
+                          QuickEntrypointEnum entry) {
+  LocationSummary* const locations = invoke->GetLocations();
+  const InvokeRuntimeCallingConvention calling_convention;
+
+  DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
+  DCHECK(locations->WillCall() && locations->Intrinsified());
+  DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(0)));
+  DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(1)));
+  DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(2)));
+  DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(3)));
+
+  __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmWordSize>(entry).Int32Value());
+  // Native code uses the soft float ABI.
+  __ vmovrrd(calling_convention.GetRegisterAt(0),
+             calling_convention.GetRegisterAt(1),
+             FromLowSToD(locations->InAt(0).AsFpuRegisterPairLow<SRegister>()));
+  __ vmovrrd(calling_convention.GetRegisterAt(2),
+             calling_convention.GetRegisterAt(3),
+             FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>()));
+  __ blx(LR);
+  codegen->RecordPcInfo(invoke, invoke->GetDexPc());
+  __ vmovdrr(FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>()),
+             calling_convention.GetRegisterAt(0),
+             calling_convention.GetRegisterAt(1));
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathCos(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathCos(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCos);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathSin(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathSin(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickSin);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathAcos(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathAcos(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAcos);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathAsin(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathAsin(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAsin);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathAtan(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathAtan(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAtan);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathCbrt(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathCbrt(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCbrt);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathCosh(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathCosh(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCosh);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathExp(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathExp(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickExp);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathExpm1(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathExpm1(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickExpm1);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathLog(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathLog(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickLog);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathLog10(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathLog10(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickLog10);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathSinh(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathSinh(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickSinh);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathTan(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathTan(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickTan);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathTanh(HInvoke* invoke) {
+  CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathTanh(HInvoke* invoke) {
+  GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickTanh);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathAtan2(HInvoke* invoke) {
+  CreateFPFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathAtan2(HInvoke* invoke) {
+  GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAtan2);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathHypot(HInvoke* invoke) {
+  CreateFPFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathHypot(HInvoke* invoke) {
+  GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickHypot);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathNextAfter(HInvoke* invoke) {
+  CreateFPFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathNextAfter(HInvoke* invoke) {
+  GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickNextAfter);
+}
+
 // Unimplemented intrinsics.
 
 #define UNIMPLEMENTED_INTRINSIC(Name)                                                  \
@@ -1610,44 +1855,27 @@
 UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
-UNIMPLEMENTED_INTRINSIC(MathCos)
-UNIMPLEMENTED_INTRINSIC(MathSin)
-UNIMPLEMENTED_INTRINSIC(MathAcos)
-UNIMPLEMENTED_INTRINSIC(MathAsin)
-UNIMPLEMENTED_INTRINSIC(MathAtan)
-UNIMPLEMENTED_INTRINSIC(MathAtan2)
-UNIMPLEMENTED_INTRINSIC(MathCbrt)
-UNIMPLEMENTED_INTRINSIC(MathCosh)
-UNIMPLEMENTED_INTRINSIC(MathExp)
-UNIMPLEMENTED_INTRINSIC(MathExpm1)
-UNIMPLEMENTED_INTRINSIC(MathHypot)
-UNIMPLEMENTED_INTRINSIC(MathLog)
-UNIMPLEMENTED_INTRINSIC(MathLog10)
-UNIMPLEMENTED_INTRINSIC(MathNextAfter)
-UNIMPLEMENTED_INTRINSIC(MathSinh)
-UNIMPLEMENTED_INTRINSIC(MathTan)
-UNIMPLEMENTED_INTRINSIC(MathTanh)
 
 UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
 UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
 UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
 UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
 
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
 UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
+
+// Handled as HIR instructions.
+UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
+UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
+UNIMPLEMENTED_INTRINSIC(LongRotateRight)
+UNIMPLEMENTED_INTRINSIC(IntegerCompare)
+UNIMPLEMENTED_INTRINSIC(LongCompare)
 UNIMPLEMENTED_INTRINSIC(IntegerSignum)
 UNIMPLEMENTED_INTRINSIC(LongSignum)
 
-// Rotate operations are handled as HRor instructions.
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-
 #undef UNIMPLEMENTED_INTRINSIC
 
 #undef __
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 5dce83a..4140d94 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -284,36 +284,6 @@
   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
 }
 
-static void GenCompare(LocationSummary* locations, bool is_long, vixl::MacroAssembler* masm) {
-  Location op1 = locations->InAt(0);
-  Location op2 = locations->InAt(1);
-  Location out = locations->Out();
-
-  Register op1_reg = is_long ? XRegisterFrom(op1) : WRegisterFrom(op1);
-  Register op2_reg = is_long ? XRegisterFrom(op2) : WRegisterFrom(op2);
-  Register out_reg = WRegisterFrom(out);
-
-  __ Cmp(op1_reg, op2_reg);
-  __ Cset(out_reg, gt);           // out == +1 if GT or 0 otherwise
-  __ Cinv(out_reg, out_reg, lt);  // out == -1 if LT or unchanged otherwise
-}
-
-void IntrinsicLocationsBuilderARM64::VisitIntegerCompare(HInvoke* invoke) {
-  CreateIntIntToIntLocations(arena_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitIntegerCompare(HInvoke* invoke) {
-  GenCompare(invoke->GetLocations(), /* is_long */ false, GetVIXLAssembler());
-}
-
-void IntrinsicLocationsBuilderARM64::VisitLongCompare(HInvoke* invoke) {
-  CreateIntIntToIntLocations(arena_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitLongCompare(HInvoke* invoke) {
-  GenCompare(invoke->GetLocations(), /* is_long */ true,  GetVIXLAssembler());
-}
-
 static void GenNumberOfLeadingZeros(LocationSummary* locations,
                                     Primitive::Type type,
                                     vixl::MacroAssembler* masm) {
@@ -1456,34 +1426,6 @@
   __ Bind(slow_path->GetExitLabel());
 }
 
-static void GenSignum(LocationSummary* locations, bool is_long, vixl::MacroAssembler* masm) {
-  Location op1 = locations->InAt(0);
-  Location out = locations->Out();
-
-  Register op1_reg = is_long ? XRegisterFrom(op1) : WRegisterFrom(op1);
-  Register out_reg = WRegisterFrom(out);
-
-  __ Cmp(op1_reg, 0);
-  __ Cset(out_reg, gt);           // out == +1 if GT or 0 otherwise
-  __ Cinv(out_reg, out_reg, lt);  // out == -1 if LT or unchanged otherwise
-}
-
-void IntrinsicLocationsBuilderARM64::VisitIntegerSignum(HInvoke* invoke) {
-  CreateIntToIntLocations(arena_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitIntegerSignum(HInvoke* invoke) {
-  GenSignum(invoke->GetLocations(), /* is_long */ false, GetVIXLAssembler());
-}
-
-void IntrinsicLocationsBuilderARM64::VisitLongSignum(HInvoke* invoke) {
-  CreateIntToIntLocations(arena_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitLongSignum(HInvoke* invoke) {
-  GenSignum(invoke->GetLocations(), /* is_long */ true,  GetVIXLAssembler());
-}
-
 static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
   DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
   DCHECK(Primitive::IsFloatingPointType(invoke->InputAt(0)->GetType()));
@@ -1684,11 +1626,15 @@
 UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
 
-// Rotate operations are handled as HRor instructions.
+// Handled as HIR instructions.
 UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
 UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
 UNIMPLEMENTED_INTRINSIC(LongRotateRight)
+UNIMPLEMENTED_INTRINSIC(IntegerCompare)
+UNIMPLEMENTED_INTRINSIC(LongCompare)
+UNIMPLEMENTED_INTRINSIC(IntegerSignum)
+UNIMPLEMENTED_INTRINSIC(LongSignum)
 
 #undef UNIMPLEMENTED_INTRINSIC
 
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 0d9cf09..2294713 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1019,12 +1019,14 @@
 UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
 UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
 
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
 UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
+
+// Handled as HIR instructions.
+UNIMPLEMENTED_INTRINSIC(IntegerCompare)
+UNIMPLEMENTED_INTRINSIC(LongCompare)
 UNIMPLEMENTED_INTRINSIC(IntegerSignum)
 UNIMPLEMENTED_INTRINSIC(LongSignum)
 
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index f681d1f..ac28503 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1767,12 +1767,14 @@
 UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
 UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
 
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
 UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
+
+// Handled as HIR instructions.
+UNIMPLEMENTED_INTRINSIC(IntegerCompare)
+UNIMPLEMENTED_INTRINSIC(LongCompare)
 UNIMPLEMENTED_INTRINSIC(IntegerSignum)
 UNIMPLEMENTED_INTRINSIC(LongSignum)
 
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index acc40bc..ab4f6f9 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -37,10 +37,12 @@
 
 static constexpr int kDoubleNaNHigh = 0x7FF80000;
 static constexpr int kDoubleNaNLow = 0x00000000;
-static constexpr int kFloatNaN = 0x7FC00000;
+static constexpr int64_t kDoubleNaN = INT64_C(0x7FF8000000000000);
+static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000);
 
 IntrinsicLocationsBuilderX86::IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen)
-  : arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) {
+  : arena_(codegen->GetGraph()->GetArena()),
+    codegen_(codegen) {
 }
 
 
@@ -256,15 +258,37 @@
                                                            LocationSummary::kNoCall,
                                                            kIntrinsified);
   locations->SetInAt(0, Location::RequiresFpuRegister());
-  // TODO: Allow x86 to work with memory. This requires assembler support, see below.
-  // locations->SetInAt(0, Location::Any());               // X86 can work on memory directly.
   locations->SetOut(Location::SameAsFirstInput());
+  HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
+  DCHECK(static_or_direct != nullptr);
+  if (invoke->InputAt(static_or_direct->GetSpecialInputIndex())->IsX86ComputeBaseMethodAddress()) {
+    // We need addressibility for the constant area.
+    locations->SetInAt(1, Location::RequiresRegister());
+    // We need a temporary to hold the constant.
+    locations->AddTemp(Location::RequiresFpuRegister());
+  }
 }
 
-static void MathAbsFP(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
+static void MathAbsFP(LocationSummary* locations,
+                      bool is64bit,
+                      X86Assembler* assembler,
+                      CodeGeneratorX86* codegen) {
   Location output = locations->Out();
 
-  if (output.IsFpuRegister()) {
+  DCHECK(output.IsFpuRegister());
+  if (locations->InAt(1).IsValid()) {
+    DCHECK(locations->InAt(1).IsRegister());
+    // We also have a constant area pointer.
+    Register constant_area = locations->InAt(1).AsRegister<Register>();
+    XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+    if (is64bit) {
+      __ movsd(temp, codegen->LiteralInt64Address(INT64_C(0x7FFFFFFFFFFFFFFF), constant_area));
+      __ andpd(output.AsFpuRegister<XmmRegister>(), temp);
+    } else {
+      __ movss(temp, codegen->LiteralInt32Address(INT32_C(0x7FFFFFFF), constant_area));
+      __ andps(output.AsFpuRegister<XmmRegister>(), temp);
+    }
+  } else {
     // Create the right constant on an aligned stack.
     if (is64bit) {
       __ subl(ESP, Immediate(8));
@@ -277,19 +301,6 @@
       __ andps(output.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
     }
     __ addl(ESP, Immediate(16));
-  } else {
-    // TODO: update when assember support is available.
-    UNIMPLEMENTED(FATAL) << "Needs assembler support.";
-//  Once assembler support is available, in-memory operations look like this:
-//    if (is64bit) {
-//      DCHECK(output.IsDoubleStackSlot());
-//      __ andl(Address(Register(RSP), output.GetHighStackIndex(kX86WordSize)),
-//              Immediate(0x7FFFFFFF));
-//    } else {
-//      DCHECK(output.IsStackSlot());
-//      // Can use and with a literal directly.
-//      __ andl(Address(Register(RSP), output.GetStackIndex()), Immediate(0x7FFFFFFF));
-//    }
   }
 }
 
@@ -298,7 +309,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) {
-  MathAbsFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+  MathAbsFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler(), codegen_);
 }
 
 void IntrinsicLocationsBuilderX86::VisitMathAbsFloat(HInvoke* invoke) {
@@ -306,7 +317,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitMathAbsFloat(HInvoke* invoke) {
-  MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+  MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler(), codegen_);
 }
 
 static void CreateAbsIntLocation(ArenaAllocator* arena, HInvoke* invoke) {
@@ -388,8 +399,11 @@
   GenAbsLong(invoke->GetLocations(), GetAssembler());
 }
 
-static void GenMinMaxFP(LocationSummary* locations, bool is_min, bool is_double,
-                        X86Assembler* assembler) {
+static void GenMinMaxFP(LocationSummary* locations,
+                        bool is_min,
+                        bool is_double,
+                        X86Assembler* assembler,
+                        CodeGeneratorX86* codegen) {
   Location op1_loc = locations->InAt(0);
   Location op2_loc = locations->InAt(1);
   Location out_loc = locations->Out();
@@ -450,15 +464,26 @@
 
   // NaN handling.
   __ Bind(&nan);
-  if (is_double) {
-    __ pushl(Immediate(kDoubleNaNHigh));
-    __ pushl(Immediate(kDoubleNaNLow));
-    __ movsd(out, Address(ESP, 0));
-    __ addl(ESP, Immediate(8));
+  // Do we have a constant area pointer?
+  if (locations->InAt(2).IsValid()) {
+    DCHECK(locations->InAt(2).IsRegister());
+    Register constant_area = locations->InAt(2).AsRegister<Register>();
+    if (is_double) {
+      __ movsd(out, codegen->LiteralInt64Address(kDoubleNaN, constant_area));
+    } else {
+      __ movss(out, codegen->LiteralInt32Address(kFloatNaN, constant_area));
+    }
   } else {
-    __ pushl(Immediate(kFloatNaN));
-    __ movss(out, Address(ESP, 0));
-    __ addl(ESP, Immediate(4));
+    if (is_double) {
+      __ pushl(Immediate(kDoubleNaNHigh));
+      __ pushl(Immediate(kDoubleNaNLow));
+      __ movsd(out, Address(ESP, 0));
+      __ addl(ESP, Immediate(8));
+    } else {
+      __ pushl(Immediate(kFloatNaN));
+      __ movss(out, Address(ESP, 0));
+      __ addl(ESP, Immediate(4));
+    }
   }
   __ jmp(&done);
 
@@ -483,6 +508,11 @@
   // The following is sub-optimal, but all we can do for now. It would be fine to also accept
   // the second input to be the output (we can simply swap inputs).
   locations->SetOut(Location::SameAsFirstInput());
+  HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
+  DCHECK(static_or_direct != nullptr);
+  if (invoke->InputAt(static_or_direct->GetSpecialInputIndex())->IsX86ComputeBaseMethodAddress()) {
+    locations->SetInAt(2, Location::RequiresRegister());
+  }
 }
 
 void IntrinsicLocationsBuilderX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -490,7 +520,11 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, /* is_double */ true, GetAssembler());
+  GenMinMaxFP(invoke->GetLocations(),
+              /* is_min */ true,
+              /* is_double */ true,
+              GetAssembler(),
+              codegen_);
 }
 
 void IntrinsicLocationsBuilderX86::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -498,7 +532,11 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, /* is_double */ false, GetAssembler());
+  GenMinMaxFP(invoke->GetLocations(),
+              /* is_min */ true,
+              /* is_double */ false,
+              GetAssembler(),
+              codegen_);
 }
 
 void IntrinsicLocationsBuilderX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -506,7 +544,11 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, /* is_double */ true, GetAssembler());
+  GenMinMaxFP(invoke->GetLocations(),
+              /* is_min */ false,
+              /* is_double */ true,
+              GetAssembler(),
+              codegen_);
 }
 
 void IntrinsicLocationsBuilderX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -514,7 +556,11 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, /* is_double */ false, GetAssembler());
+  GenMinMaxFP(invoke->GetLocations(),
+              /* is_min */ false,
+              /* is_double */ false,
+              GetAssembler(),
+              codegen_);
 }
 
 static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long,
@@ -2245,7 +2291,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerReverse(HInvoke* invoke) {
-  X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
+  X86Assembler* assembler = GetAssembler();
   LocationSummary* locations = invoke->GetLocations();
 
   Register reg = locations->InAt(0).AsRegister<Register>();
@@ -2276,7 +2322,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongReverse(HInvoke* invoke) {
-  X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
+  X86Assembler* assembler = GetAssembler();
   LocationSummary* locations = invoke->GetLocations();
 
   Register reg_low = locations->InAt(0).AsRegisterPairLow<Register>();
@@ -2320,7 +2366,9 @@
   locations->SetOut(Location::RequiresRegister());
 }
 
-static void GenBitCount(X86Assembler* assembler, HInvoke* invoke, bool is_long) {
+static void GenBitCount(X86Assembler* assembler,
+                        CodeGeneratorX86* codegen,
+                        HInvoke* invoke, bool is_long) {
   LocationSummary* locations = invoke->GetLocations();
   Location src = locations->InAt(0);
   Register out = locations->Out().AsRegister<Register>();
@@ -2331,11 +2379,7 @@
     value = is_long
         ? POPCOUNT(static_cast<uint64_t>(value))
         : POPCOUNT(static_cast<uint32_t>(value));
-    if (value == 0) {
-      __ xorl(out, out);
-    } else {
-      __ movl(out, Immediate(value));
-    }
+    codegen->Load32BitValue(out, value);
     return;
   }
 
@@ -2367,7 +2411,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) {
-  GenBitCount(GetAssembler(), invoke, /* is_long */ false);
+  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) {
@@ -2375,7 +2419,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) {
-  GenBitCount(GetAssembler(), invoke, /* is_long */ true);
+  GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
 }
 
 static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) {
@@ -2390,7 +2434,9 @@
   locations->SetOut(Location::RequiresRegister());
 }
 
-static void GenLeadingZeros(X86Assembler* assembler, HInvoke* invoke, bool is_long) {
+static void GenLeadingZeros(X86Assembler* assembler,
+                            CodeGeneratorX86* codegen,
+                            HInvoke* invoke, bool is_long) {
   LocationSummary* locations = invoke->GetLocations();
   Location src = locations->InAt(0);
   Register out = locations->Out().AsRegister<Register>();
@@ -2403,11 +2449,7 @@
     } else {
       value = is_long ? CLZ(static_cast<uint64_t>(value)) : CLZ(static_cast<uint32_t>(value));
     }
-    if (value == 0) {
-      __ xorl(out, out);
-    } else {
-      __ movl(out, Immediate(value));
-    }
+    codegen->Load32BitValue(out, value);
     return;
   }
 
@@ -2474,8 +2516,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
-  GenLeadingZeros(assembler, invoke, /* is_long */ false);
+  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2483,8 +2524,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
-  GenLeadingZeros(assembler, invoke, /* is_long */ true);
+  GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
 }
 
 static void CreateTrailingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) {
@@ -2499,7 +2539,9 @@
   locations->SetOut(Location::RequiresRegister());
 }
 
-static void GenTrailingZeros(X86Assembler* assembler, HInvoke* invoke, bool is_long) {
+static void GenTrailingZeros(X86Assembler* assembler,
+                             CodeGeneratorX86* codegen,
+                             HInvoke* invoke, bool is_long) {
   LocationSummary* locations = invoke->GetLocations();
   Location src = locations->InAt(0);
   Register out = locations->Out().AsRegister<Register>();
@@ -2512,11 +2554,7 @@
     } else {
       value = is_long ? CTZ(static_cast<uint64_t>(value)) : CTZ(static_cast<uint32_t>(value));
     }
-    if (value == 0) {
-      __ xorl(out, out);
-    } else {
-      __ movl(out, Immediate(value));
-    }
+    codegen->Load32BitValue(out, value);
     return;
   }
 
@@ -2570,8 +2608,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
-  GenTrailingZeros(assembler, invoke, /* is_long */ false);
+  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
 }
 
 void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2579,8 +2616,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
-  GenTrailingZeros(assembler, invoke, /* is_long */ true);
+  GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
 }
 
 // Unimplemented intrinsics.
@@ -2600,20 +2636,20 @@
 UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
 UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
 
-UNIMPLEMENTED_INTRINSIC(IntegerCompare)
-UNIMPLEMENTED_INTRINSIC(LongCompare)
 UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit)
 UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(IntegerSignum)
-UNIMPLEMENTED_INTRINSIC(LongSignum)
 
-// Rotate operations are handled as HRor instructions.
+// Handled as HIR instructions.
 UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
+UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
 UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
 UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
+UNIMPLEMENTED_INTRINSIC(IntegerCompare)
+UNIMPLEMENTED_INTRINSIC(LongCompare)
+UNIMPLEMENTED_INTRINSIC(IntegerSignum)
+UNIMPLEMENTED_INTRINSIC(LongSignum)
 
 #undef UNIMPLEMENTED_INTRINSIC
 
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 51fa514..c9a4344 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2431,58 +2431,6 @@
   GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
 }
 
-static void CreateCompareLocations(ArenaAllocator* arena, HInvoke* invoke) {
-  LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
-                                                           kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister());
-}
-
-static void GenCompare(X86_64Assembler* assembler, HInvoke* invoke, bool is_long) {
-  LocationSummary* locations = invoke->GetLocations();
-  CpuRegister src1 = locations->InAt(0).AsRegister<CpuRegister>();
-  CpuRegister src2 = locations->InAt(1).AsRegister<CpuRegister>();
-  CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-
-  NearLabel is_lt, done;
-
-  __ xorl(out, out);
-
-  if (is_long) {
-    __ cmpq(src1, src2);
-  } else {
-    __ cmpl(src1, src2);
-  }
-  __ j(kEqual, &done);
-  __ j(kLess, &is_lt);
-
-  __ movl(out, Immediate(1));
-  __ jmp(&done);
-
-  __ Bind(&is_lt);
-  __ movl(out, Immediate(-1));
-
-  __ Bind(&done);
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitIntegerCompare(HInvoke* invoke) {
-  CreateCompareLocations(arena_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitIntegerCompare(HInvoke* invoke) {
-  GenCompare(GetAssembler(), invoke, /* is_long */ false);
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitLongCompare(HInvoke* invoke) {
-  CreateCompareLocations(arena_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitLongCompare(HInvoke* invoke) {
-  GenCompare(GetAssembler(), invoke, /* is_long */ true);
-}
-
 static void CreateOneBitLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_high) {
   LocationSummary* locations = new (arena) LocationSummary(invoke,
                                                            LocationSummary::kNoCall,
@@ -2757,74 +2705,6 @@
   GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
 }
 
-static void CreateSignLocations(ArenaAllocator* arena, HInvoke* invoke) {
-  LocationSummary* locations = new (arena) LocationSummary(invoke,
-                                                           LocationSummary::kNoCall,
-                                                           kIntrinsified);
-  locations->SetInAt(0, Location::Any());
-  locations->SetOut(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());  // Need a writeable register.
-}
-
-static void GenSign(X86_64Assembler* assembler,
-                    CodeGeneratorX86_64* codegen,
-                    HInvoke* invoke, bool is_long) {
-  LocationSummary* locations = invoke->GetLocations();
-  Location src = locations->InAt(0);
-  CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-
-  if (invoke->InputAt(0)->IsConstant()) {
-    // Evaluate this at compile time.
-    int64_t value = Int64FromConstant(invoke->InputAt(0)->AsConstant());
-    codegen->Load32BitValue(out, value == 0 ? 0 : (value > 0 ? 1 : -1));
-    return;
-  }
-
-  // Copy input into temporary.
-  CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
-  if (src.IsRegister()) {
-    if (is_long) {
-      __ movq(tmp, src.AsRegister<CpuRegister>());
-    } else {
-      __ movl(tmp, src.AsRegister<CpuRegister>());
-    }
-  } else if (is_long) {
-    DCHECK(src.IsDoubleStackSlot());
-    __ movq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
-  } else {
-    DCHECK(src.IsStackSlot());
-    __ movl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
-  }
-
-  // Do the bit twiddling: basically tmp >> 63/31 | -tmp >>> 63/31 for long/int.
-  if (is_long) {
-    __ movq(out, tmp);
-    __ sarq(out, Immediate(63));
-    __ negq(tmp);
-    __ shrq(tmp, Immediate(63));
-    __ orq(out, tmp);
-  } else {
-    __ movl(out, tmp);
-    __ sarl(out, Immediate(31));
-    __ negl(tmp);
-    __ shrl(tmp, Immediate(31));
-    __ orl(out, tmp);
-  }
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitIntegerSignum(HInvoke* invoke) {
-  CreateSignLocations(arena_, invoke);
-}
-void IntrinsicCodeGeneratorX86_64::VisitIntegerSignum(HInvoke* invoke) {
-  GenSign(GetAssembler(), codegen_, invoke, /* is_long */ false);
-}
-void IntrinsicLocationsBuilderX86_64::VisitLongSignum(HInvoke* invoke) {
-  CreateSignLocations(arena_, invoke);
-}
-void IntrinsicCodeGeneratorX86_64::VisitLongSignum(HInvoke* invoke) {
-  GenSign(GetAssembler(), codegen_, invoke, /* is_long */ true);
-}
-
 // Unimplemented intrinsics.
 
 #define UNIMPLEMENTED_INTRINSIC(Name)                                                   \
@@ -2840,11 +2720,15 @@
 UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
 UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
 
-// Rotate operations are handled as HRor instructions.
+// Handled as HIR instructions.
 UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
 UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
+UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
 UNIMPLEMENTED_INTRINSIC(LongRotateRight)
+UNIMPLEMENTED_INTRINSIC(IntegerCompare)
+UNIMPLEMENTED_INTRINSIC(LongCompare)
+UNIMPLEMENTED_INTRINSIC(IntegerSignum)
+UNIMPLEMENTED_INTRINSIC(LongSignum)
 
 #undef UNIMPLEMENTED_INTRINSIC
 
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index c057eca..f269885 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -647,6 +647,10 @@
       header_->GetGraph()->SetHasIrreducibleLoops(true);
       PopulateIrreducibleRecursive(back_edge);
     } else {
+      if (header_->GetGraph()->IsCompilingOsr()) {
+        irreducible_ = true;
+        header_->GetGraph()->SetHasIrreducibleLoops(true);
+      }
       PopulateRecursive(back_edge);
     }
   }
@@ -858,7 +862,6 @@
       // At the end of the loop pre-header, the corresponding value for instruction
       // is the first input of the phi.
       HInstruction* initial = instruction->AsPhi()->InputAt(0);
-      DCHECK(initial->GetBlock()->Dominates(loop_header));
       SetRawEnvAt(i, initial);
       initial->AddEnvUseAt(this, i);
     } else {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index b808347..daec096 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -274,6 +274,7 @@
          InstructionSet instruction_set,
          InvokeType invoke_type = kInvalidInvokeType,
          bool debuggable = false,
+         bool osr = false,
          int start_instruction_id = 0)
       : arena_(arena),
         blocks_(arena->Adapter(kArenaAllocBlockList)),
@@ -302,7 +303,8 @@
         cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
         cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
         cached_current_method_(nullptr),
-        inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()) {
+        inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
+        osr_(osr) {
     blocks_.reserve(kDefaultNumberOfBlocks);
   }
 
@@ -478,6 +480,8 @@
     return instruction_set_;
   }
 
+  bool IsCompilingOsr() const { return osr_; }
+
   bool HasTryCatch() const { return has_try_catch_; }
   void SetHasTryCatch(bool value) { has_try_catch_ = value; }
 
@@ -606,6 +610,11 @@
   // collection pointer to passes which may create NullConstant.
   ReferenceTypeInfo inexact_object_rti_;
 
+  // Whether we are compiling this graph for on stack replacement: this will
+  // make all loops seen as irreducible and emit special stack maps to mark
+  // compiled code entries which the interpreter can directly jump to.
+  const bool osr_;
+
   friend class SsaBuilder;           // For caching constants.
   friend class SsaLivenessAnalysis;  // For the linear order.
   ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
@@ -1259,6 +1268,7 @@
 #define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)                            \
   M(X86ComputeBaseMethodAddress, Instruction)                           \
   M(X86LoadFromConstantTable, Instruction)                              \
+  M(X86FPNeg, Instruction)                                              \
   M(X86PackedSwitch, Instruction)
 #endif
 
@@ -6040,6 +6050,74 @@
   FOR_EACH_CONCRETE_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
 #undef INSTRUCTION_TYPE_CHECK
 
+class SwitchTable : public ValueObject {
+ public:
+  SwitchTable(const Instruction& instruction, uint32_t dex_pc, bool sparse)
+      : instruction_(instruction), dex_pc_(dex_pc), sparse_(sparse) {
+    int32_t table_offset = instruction.VRegB_31t();
+    const uint16_t* table = reinterpret_cast<const uint16_t*>(&instruction) + table_offset;
+    if (sparse) {
+      CHECK_EQ(table[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
+    } else {
+      CHECK_EQ(table[0], static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
+    }
+    num_entries_ = table[1];
+    values_ = reinterpret_cast<const int32_t*>(&table[2]);
+  }
+
+  uint16_t GetNumEntries() const {
+    return num_entries_;
+  }
+
+  void CheckIndex(size_t index) const {
+    if (sparse_) {
+      // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order.
+      DCHECK_LT(index, 2 * static_cast<size_t>(num_entries_));
+    } else {
+      // In a packed table, we have the starting key and num_entries_ values.
+      DCHECK_LT(index, 1 + static_cast<size_t>(num_entries_));
+    }
+  }
+
+  int32_t GetEntryAt(size_t index) const {
+    CheckIndex(index);
+    return values_[index];
+  }
+
+  uint32_t GetDexPcForIndex(size_t index) const {
+    CheckIndex(index);
+    return dex_pc_ +
+        (reinterpret_cast<const int16_t*>(values_ + index) -
+         reinterpret_cast<const int16_t*>(&instruction_));
+  }
+
+  // Index of the first value in the table.
+  size_t GetFirstValueIndex() const {
+    if (sparse_) {
+      // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order.
+      return num_entries_;
+    } else {
+      // In a packed table, we have the starting key and num_entries_ values.
+      return 1;
+    }
+  }
+
+ private:
+  const Instruction& instruction_;
+  const uint32_t dex_pc_;
+
+  // Whether this is a sparse-switch table (or a packed-switch one).
+  const bool sparse_;
+
+  // This can't be const as it needs to be computed off of the given instruction, and complicated
+  // expressions in the initializer list seemed very ugly.
+  uint16_t num_entries_;
+
+  const int32_t* values_;
+
+  DISALLOW_COPY_AND_ASSIGN(SwitchTable);
+};
+
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_NODES_H_
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index b1bf939..0b3a84d 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -56,6 +56,25 @@
   DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable);
 };
 
+// Version of HNeg with access to the constant table for FP types.
+class HX86FPNeg : public HExpression<2> {
+ public:
+  HX86FPNeg(Primitive::Type result_type,
+            HInstruction* input,
+            HX86ComputeBaseMethodAddress* method_base,
+            uint32_t dex_pc)
+      : HExpression(result_type, SideEffects::None(), dex_pc) {
+    DCHECK(Primitive::IsFloatingPointType(result_type));
+    SetRawInputAt(0, input);
+    SetRawInputAt(1, method_base);
+  }
+
+  DECLARE_INSTRUCTION(X86FPNeg);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HX86FPNeg);
+};
+
 // X86 version of HPackedSwitch that holds a pointer to the base method address.
 class HX86PackedSwitch : public HTemplateInstruction<2> {
  public:
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index bdc664b..dcd8e7d 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -45,15 +45,14 @@
 #include "compiler.h"
 #include "constant_folding.h"
 #include "dead_code_elimination.h"
+#include "debug/elf_debug_writer.h"
+#include "debug/method_debug_info.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex/verified_method.h"
 #include "dex/verification_results.h"
-#include "driver/compiler_driver.h"
+#include "dex/verified_method.h"
 #include "driver/compiler_driver-inl.h"
 #include "driver/compiler_options.h"
 #include "driver/dex_compilation_unit.h"
-#include "dwarf/method_debug_info.h"
-#include "elf_writer_debug.h"
 #include "elf_writer_quick.h"
 #include "graph_checker.h"
 #include "graph_visualizer.h"
@@ -64,20 +63,20 @@
 #include "intrinsics.h"
 #include "jit/debugger_interface.h"
 #include "jit/jit_code_cache.h"
-#include "licm.h"
 #include "jni/quick/jni_compiler.h"
+#include "licm.h"
 #include "load_store_elimination.h"
 #include "nodes.h"
+#include "oat_quick_method_header.h"
 #include "prepare_for_register_allocation.h"
 #include "reference_type_propagation.h"
 #include "register_allocator.h"
-#include "oat_quick_method_header.h"
 #include "select_generator.h"
 #include "sharpening.h"
 #include "side_effects_analysis.h"
 #include "ssa_builder.h"
-#include "ssa_phi_elimination.h"
 #include "ssa_liveness_analysis.h"
+#include "ssa_phi_elimination.h"
 #include "utils/assembler.h"
 #include "verifier/method_verifier.h"
 
@@ -300,7 +299,7 @@
     }
   }
 
-  bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method)
+  bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr)
       OVERRIDE
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -309,7 +308,8 @@
   CompiledMethod* Emit(ArenaAllocator* arena,
                        CodeVectorAllocator* code_allocator,
                        CodeGenerator* codegen,
-                       CompilerDriver* driver) const;
+                       CompilerDriver* driver,
+                       const DexFile::CodeItem* item) const;
 
   // Try compiling a method and return the code generator used for
   // compiling it.
@@ -327,7 +327,8 @@
                             uint32_t method_idx,
                             jobject class_loader,
                             const DexFile& dex_file,
-                            Handle<mirror::DexCache> dex_cache) const;
+                            Handle<mirror::DexCache> dex_cache,
+                            bool osr) const;
 
   std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
 
@@ -580,11 +581,12 @@
 CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena,
                                          CodeVectorAllocator* code_allocator,
                                          CodeGenerator* codegen,
-                                         CompilerDriver* compiler_driver) const {
+                                         CompilerDriver* compiler_driver,
+                                         const DexFile::CodeItem* code_item) const {
   ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
   ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps));
   stack_map.resize(codegen->ComputeStackMapsSize());
-  codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()));
+  codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()), *code_item);
 
   CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
       compiler_driver,
@@ -615,7 +617,8 @@
                                               uint32_t method_idx,
                                               jobject class_loader,
                                               const DexFile& dex_file,
-                                              Handle<mirror::DexCache> dex_cache) const {
+                                              Handle<mirror::DexCache> dex_cache,
+                                              bool osr) const {
   MaybeRecordStat(MethodCompilationStat::kAttemptCompilation);
   CompilerDriver* compiler_driver = GetCompilerDriver();
   InstructionSet instruction_set = compiler_driver->GetInstructionSet();
@@ -663,8 +666,14 @@
                                                      dex_compilation_unit.GetDexFile(),
                                                      dex_compilation_unit.GetClassDefIndex());
   HGraph* graph = new (arena) HGraph(
-      arena, dex_file, method_idx, requires_barrier, compiler_driver->GetInstructionSet(),
-      kInvalidInvokeType, compiler_driver->GetCompilerOptions().GetDebuggable());
+      arena,
+      dex_file,
+      method_idx,
+      requires_barrier,
+      compiler_driver->GetInstructionSet(),
+      kInvalidInvokeType,
+      compiler_driver->GetCompilerOptions().GetDebuggable(),
+      osr);
 
   std::unique_ptr<CodeGenerator> codegen(
       CodeGenerator::Create(graph,
@@ -797,10 +806,11 @@
                    method_idx,
                    jclass_loader,
                    dex_file,
-                   dex_cache));
+                   dex_cache,
+                   /* osr */ false));
     if (codegen.get() != nullptr) {
       MaybeRecordStat(MethodCompilationStat::kCompiled);
-      method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver);
+      method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item);
     }
   } else {
     if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
@@ -843,7 +853,8 @@
 
 bool OptimizingCompiler::JitCompile(Thread* self,
                                     jit::JitCodeCache* code_cache,
-                                    ArtMethod* method) {
+                                    ArtMethod* method,
+                                    bool osr) {
   StackHandleScope<2> hs(self);
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
       method->GetDeclaringClass()->GetClassLoader()));
@@ -873,7 +884,8 @@
                    method_idx,
                    jclass_loader,
                    *dex_file,
-                   dex_cache));
+                   dex_cache,
+                   osr));
     if (codegen.get() == nullptr) {
       return false;
     }
@@ -885,7 +897,7 @@
     return false;
   }
   MaybeRecordStat(MethodCompilationStat::kCompiled);
-  codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size));
+  codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item);
   const void* code = code_cache->CommitCode(
       self,
       method,
@@ -896,7 +908,8 @@
       codegen->GetCoreSpillMask(),
       codegen->GetFpuSpillMask(),
       code_allocator.GetMemory().data(),
-      code_allocator.GetSize());
+      code_allocator.GetSize(),
+      osr);
 
   if (code == nullptr) {
     code_cache->ClearData(self, stack_map_data);
@@ -919,7 +932,7 @@
         ArrayRef<const uint8_t>(),  // native_gc_map.
         ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
         ArrayRef<const LinkerPatch>());
-    dwarf::MethodDebugInfo method_debug_info {
+    debug::MethodDebugInfo method_debug_info {
         dex_file,
         class_def_idx,
         method_idx,
@@ -930,7 +943,7 @@
         code_address + code_allocator.GetSize(),
         &compiled_method
     };
-    ArrayRef<const uint8_t> elf_file = dwarf::WriteDebugElfFileForMethod(method_debug_info);
+    ArrayRef<const uint8_t> elf_file = debug::WriteDebugElfFileForMethod(method_debug_info);
     CreateJITCodeEntryForAddress(code_address,
                                  std::unique_ptr<const uint8_t[]>(elf_file.data()),
                                  elf_file.size());
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index a2180bc..a6f1461 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -53,6 +53,10 @@
     BinaryFP(div);
   }
 
+  void VisitCompare(HCompare* compare) OVERRIDE {
+    BinaryFP(compare);
+  }
+
   void VisitReturn(HReturn* ret) OVERRIDE {
     HConstant* value = ret->InputAt(0)->AsConstant();
     if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
@@ -74,11 +78,50 @@
 
   void BinaryFP(HBinaryOperation* bin) {
     HConstant* rhs = bin->InputAt(1)->AsConstant();
-    if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
+    if (rhs != nullptr && Primitive::IsFloatingPointType(rhs->GetType())) {
       ReplaceInput(bin, rhs, 1, false);
     }
   }
 
+  void VisitEqual(HEqual* cond) OVERRIDE {
+    BinaryFP(cond);
+  }
+
+  void VisitNotEqual(HNotEqual* cond) OVERRIDE {
+    BinaryFP(cond);
+  }
+
+  void VisitLessThan(HLessThan* cond) OVERRIDE {
+    BinaryFP(cond);
+  }
+
+  void VisitLessThanOrEqual(HLessThanOrEqual* cond) OVERRIDE {
+    BinaryFP(cond);
+  }
+
+  void VisitGreaterThan(HGreaterThan* cond) OVERRIDE {
+    BinaryFP(cond);
+  }
+
+  void VisitGreaterThanOrEqual(HGreaterThanOrEqual* cond) OVERRIDE {
+    BinaryFP(cond);
+  }
+
+  void VisitNeg(HNeg* neg) OVERRIDE {
+    if (Primitive::IsFloatingPointType(neg->GetType())) {
+      // We need to replace the HNeg with a HX86FPNeg in order to address the constant area.
+      InitializePCRelativeBasePointer();
+      HGraph* graph = GetGraph();
+      HBasicBlock* block = neg->GetBlock();
+      HX86FPNeg* x86_fp_neg = new (graph->GetArena()) HX86FPNeg(
+          neg->GetType(),
+          neg->InputAt(0),
+          base_,
+          neg->GetDexPc());
+      block->ReplaceAndRemoveInstructionWith(neg, x86_fp_neg);
+    }
+  }
+
   void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
     if (switch_insn->GetNumEntries() <=
         InstructionCodeGeneratorX86::kPackedSwitchJumpTableThreshold) {
@@ -127,12 +170,23 @@
     // If this is an invoke-static/-direct with PC-relative dex cache array
     // addressing, we need the PC-relative address base.
     HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
+    // We can't add a pointer to the constant area if we already have a current
+    // method pointer. This may arise when sharpening doesn't remove the current
+    // method pointer from the invoke.
+    if (invoke_static_or_direct != nullptr &&
+        invoke_static_or_direct->HasCurrentMethodInput()) {
+      DCHECK(!invoke_static_or_direct->HasPcRelativeDexCache());
+      return;
+    }
+
+    bool base_added = false;
     if (invoke_static_or_direct != nullptr && invoke_static_or_direct->HasPcRelativeDexCache()) {
       InitializePCRelativeBasePointer();
       // Add the extra parameter base_.
-      DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());
       invoke_static_or_direct->AddSpecialInput(base_);
+      base_added = true;
     }
+
     // Ensure that we can load FP arguments from the constant area.
     for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
       HConstant* input = invoke->InputAt(i)->AsConstant();
@@ -140,6 +194,25 @@
         ReplaceInput(invoke, input, i, true);
       }
     }
+
+    // These intrinsics need the constant area.
+    switch (invoke->GetIntrinsic()) {
+      case Intrinsics::kMathAbsDouble:
+      case Intrinsics::kMathAbsFloat:
+      case Intrinsics::kMathMaxDoubleDouble:
+      case Intrinsics::kMathMaxFloatFloat:
+      case Intrinsics::kMathMinDoubleDouble:
+      case Intrinsics::kMathMinFloatFloat:
+        if (!base_added) {
+          DCHECK(invoke_static_or_direct != nullptr);
+          DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());
+          InitializePCRelativeBasePointer();
+          invoke_static_or_direct->AddSpecialInput(base_);
+        }
+        break;
+      default:
+        break;
+    }
   }
 
   // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 5cd30ad..b8d76b9 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -994,10 +994,6 @@
     return false;
   }
 
-  // We use the first use to compare with other intervals. If this interval
-  // is used after any active intervals, we will spill this interval.
-  size_t first_use = current->FirstUseAfter(current->GetStart());
-
   // First set all registers as not being used.
   size_t* next_use = registers_array_;
   for (size_t i = 0; i < number_of_registers_; ++i) {
@@ -1011,7 +1007,7 @@
     if (active->IsFixed()) {
       next_use[active->GetRegister()] = current->GetStart();
     } else {
-      size_t use = active->FirstUseAfter(current->GetStart());
+      size_t use = active->FirstRegisterUseAfter(current->GetStart());
       if (use != kNoLifetime) {
         next_use[active->GetRegister()] = use;
       }
@@ -1052,16 +1048,16 @@
     DCHECK(current->IsHighInterval());
     reg = current->GetRegister();
     // When allocating the low part, we made sure the high register was available.
-    DCHECK_LT(first_use, next_use[reg]);
+    DCHECK_LT(first_register_use, next_use[reg]);
   } else if (current->IsLowInterval()) {
-    reg = FindAvailableRegisterPair(next_use, first_use);
+    reg = FindAvailableRegisterPair(next_use, first_register_use);
     // We should spill if both registers are not available.
-    should_spill = (first_use >= next_use[reg])
-      || (first_use >= next_use[GetHighForLowRegister(reg)]);
+    should_spill = (first_register_use >= next_use[reg])
+      || (first_register_use >= next_use[GetHighForLowRegister(reg)]);
   } else {
     DCHECK(!current->IsHighInterval());
     reg = FindAvailableRegister(next_use, current);
-    should_spill = (first_use >= next_use[reg]);
+    should_spill = (first_register_use >= next_use[reg]);
   }
 
   DCHECK_NE(reg, kNoRegister);
diff --git a/compiler/utils/arm/managed_register_arm.h b/compiler/utils/arm/managed_register_arm.h
index 5fde9e8..5b84058 100644
--- a/compiler/utils/arm/managed_register_arm.h
+++ b/compiler/utils/arm/managed_register_arm.h
@@ -19,7 +19,7 @@
 
 #include "base/logging.h"
 #include "constants_arm.h"
-#include "dwarf/register.h"
+#include "debug/dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h
index dbcd8c5..46be1c5 100644
--- a/compiler/utils/arm64/managed_register_arm64.h
+++ b/compiler/utils/arm64/managed_register_arm64.h
@@ -19,7 +19,7 @@
 
 #include "base/logging.h"
 #include "constants_arm64.h"
-#include "dwarf/register.h"
+#include "debug/dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 1dbc142..414ea7e 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -21,9 +21,10 @@
 
 #include "arch/instruction_set.h"
 #include "arch/instruction_set_features.h"
+#include "arm/constants_arm.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "arm/constants_arm.h"
+#include "debug/dwarf/debug_frame_opcode_writer.h"
 #include "label.h"
 #include "managed_register.h"
 #include "memory_region.h"
@@ -31,7 +32,6 @@
 #include "offsets.h"
 #include "x86/constants_x86.h"
 #include "x86_64/constants_x86_64.h"
-#include "dwarf/debug_frame_opcode_writer.h"
 
 namespace art {
 
diff --git a/compiler/utils/mips/managed_register_mips.h b/compiler/utils/mips/managed_register_mips.h
index 40d39e3..5e7ed11 100644
--- a/compiler/utils/mips/managed_register_mips.h
+++ b/compiler/utils/mips/managed_register_mips.h
@@ -18,7 +18,7 @@
 #define ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
 
 #include "constants_mips.h"
-#include "dwarf/register.h"
+#include "debug/dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/utils/mips64/managed_register_mips64.h b/compiler/utils/mips64/managed_register_mips64.h
index 4c4705b..1d36128 100644
--- a/compiler/utils/mips64/managed_register_mips64.h
+++ b/compiler/utils/mips64/managed_register_mips64.h
@@ -18,7 +18,7 @@
 #define ART_COMPILER_UTILS_MIPS64_MANAGED_REGISTER_MIPS64_H_
 
 #include "constants_mips64.h"
-#include "dwarf/register.h"
+#include "debug/dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h
index 4e8c41e..fc20d7e 100644
--- a/compiler/utils/x86/managed_register_x86.h
+++ b/compiler/utils/x86/managed_register_x86.h
@@ -18,7 +18,7 @@
 #define ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_
 
 #include "constants_x86.h"
-#include "dwarf/register.h"
+#include "debug/dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/compiler/utils/x86_64/managed_register_x86_64.h b/compiler/utils/x86_64/managed_register_x86_64.h
index 47bbb44..c4228c1 100644
--- a/compiler/utils/x86_64/managed_register_x86_64.h
+++ b/compiler/utils/x86_64/managed_register_x86_64.h
@@ -18,7 +18,7 @@
 #define ART_COMPILER_UTILS_X86_64_MANAGED_REGISTER_X86_64_H_
 
 #include "constants_x86_64.h"
-#include "dwarf/register.h"
+#include "debug/dwarf/register.h"
 #include "utils/managed_register.h"
 
 namespace art {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index b3e3ba6..541fb5a 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -35,9 +35,9 @@
 #define ATRACE_TAG ATRACE_TAG_DALVIK
 #include <cutils/trace.h>
 
-#include "art_method-inl.h"
 #include "arch/instruction_set_features.h"
 #include "arch/mips/instruction_set_features_mips.h"
+#include "art_method-inl.h"
 #include "base/dumpable.h"
 #include "base/macros.h"
 #include "base/stl_util.h"
@@ -48,14 +48,14 @@
 #include "class_linker.h"
 #include "compiler.h"
 #include "compiler_callbacks.h"
-#include "dex_file-inl.h"
+#include "debug/method_debug_info.h"
 #include "dex/pass_manager.h"
-#include "dex/verification_results.h"
-#include "dex/quick_compiler_callbacks.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "dex/quick_compiler_callbacks.h"
+#include "dex/verification_results.h"
+#include "dex_file-inl.h"
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
-#include "dwarf/method_debug_info.h"
 #include "elf_file.h"
 #include "elf_writer.h"
 #include "elf_writer_quick.h"
@@ -126,6 +126,11 @@
       continue;
     }
 
+    // The image format is dropped.
+    if (StartsWith(original_argv[i], "--image-format=")) {
+      continue;
+    }
+
     // This should leave any dex-file and oat-file options, describing what we compiled.
 
     // However, we prefer to drop this when we saw --zip-fd.
@@ -1682,6 +1687,12 @@
         std::vector<const DexFile*>& dex_files = dex_files_per_oat_file_[i];
         oat_writer->PrepareLayout(driver_.get(), image_writer_.get(), dex_files);
 
+        // We need to mirror the layout of the ELF file in the compressed debug-info.
+        // Therefore we need to propagate the sizes of at least those sections.
+        size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
+        size_t text_size = oat_writer->GetSize() - rodata_size;
+        elf_writer->PrepareDebugInfo(rodata_size, text_size, oat_writer->GetMethodDebugInfo());
+
         OutputStream*& rodata = rodata_[i];
         DCHECK(rodata != nullptr);
         if (!oat_writer->WriteRodata(rodata)) {
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 5e2cf6b..0e709eb 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -1152,8 +1152,10 @@
             args << Rd << ", #" << imm16;
             break;
           }
-          case 0x16: {
+          case 0x16: case 0x14: case 0x1C: {
             // BFI Rd, Rn, #lsb, #width - 111 10 0 11 011 0 nnnn 0 iii dddd ii 0 iiiii
+            // SBFX Rd, Rn, #lsb, #width - 111 10 0 11 010 0 nnnn 0 iii dddd ii 0 iiiii
+            // UBFX Rd, Rn, #lsb, #width - 111 10 0 11 110 0 nnnn 0 iii dddd ii 0 iiiii
             ArmRegister Rd(instr, 8);
             ArmRegister Rn(instr, 16);
             uint32_t msb = instr & 0x1F;
@@ -1161,12 +1163,21 @@
             uint32_t imm3 = (instr >> 12) & 0x7;
             uint32_t lsb = (imm3 << 2) | imm2;
             uint32_t width = msb - lsb + 1;
-            if (Rn.r != 0xF) {
-              opcode << "bfi";
-              args << Rd << ", " << Rn << ", #" << lsb << ", #" << width;
+            if (op3 == 0x16) {
+              if (Rn.r != 0xF) {
+                opcode << "bfi";
+                args << Rd << ", " << Rn << ", #" << lsb << ", #" << width;
+              } else {
+                opcode << "bfc";
+                args << Rd << ", #" << lsb << ", #" << width;
+              }
             } else {
-              opcode << "bfc";
-              args << Rd << ", #" << lsb << ", #" << width;
+              opcode << ((op3 & 0x8) != 0u ? "ubfx" : "sbfx");
+              args << Rd << ", " << Rn << ", #" << lsb << ", #" << width;
+              if (Rd.r == 13 || Rd.r == 15 || Rn.r == 13 || Rn.r == 15 ||
+                  (instr & 0x04000020) != 0u) {
+                args << " (UNPREDICTABLE)";
+              }
             }
             break;
           }
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 288f95e..e9f7add 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -275,6 +275,8 @@
   arch/arm64/fault_handler_arm64.cc
 
 LIBART_SRC_FILES_x86 := \
+  interpreter/mterp/mterp.cc \
+  interpreter/mterp/out/mterp_x86.S \
   arch/x86/context_x86.cc \
   arch/x86/entrypoints_init_x86.cc \
   arch/x86/jni_entrypoints_x86.S \
@@ -286,20 +288,6 @@
 LIBART_TARGET_SRC_FILES_x86 := \
   $(LIBART_SRC_FILES_x86)
 
-# Darwin uses non-standard x86 assembly syntax.  Don't build x86 Darwin host mterp there.
-ifeq ($(HOST_OS),darwin)
-  LIBART_SRC_FILES_x86 += \
-    interpreter/mterp/mterp_stub.cc
-else
-  LIBART_SRC_FILES_x86 += \
-    interpreter/mterp/mterp.cc \
-    interpreter/mterp/out/mterp_x86.S
-endif
-# But do support x86 mterp for target build regardless of host platform.
-LIBART_TARGET_SRC_FILES_x86 += \
-  interpreter/mterp/mterp.cc \
-  interpreter/mterp/out/mterp_x86.S
-
 # Note that the fault_handler_x86.cc is not a mistake.  This file is
 # shared between the x86 and x86_64 architectures.
 LIBART_SRC_FILES_x86_64 := \
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 7141181..e358ff8 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -130,6 +130,25 @@
     qpoints->pL2f = art_quick_l2f;
   }
 
+  // More math.
+  qpoints->pCos = cos;
+  qpoints->pSin = sin;
+  qpoints->pAcos = acos;
+  qpoints->pAsin = asin;
+  qpoints->pAtan = atan;
+  qpoints->pAtan2 = atan2;
+  qpoints->pCbrt = cbrt;
+  qpoints->pCosh = cosh;
+  qpoints->pExp = exp;
+  qpoints->pExpm1 = expm1;
+  qpoints->pHypot = hypot;
+  qpoints->pLog = log;
+  qpoints->pLog10 = log10;
+  qpoints->pNextAfter = nextafter;
+  qpoints->pSinh = sinh;
+  qpoints->pTan = tan;
+  qpoints->pTanh = tanh;
+
   // Intrinsics
   qpoints->pIndexOf = art_quick_indexof;
   qpoints->pStringCompareTo = art_quick_string_compareto;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 631b784..949ad99 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -429,6 +429,56 @@
 END art_quick_invoke_stub_internal
 
     /*
+     * On stack replacement stub.
+     * On entry:
+     *   r0 = stack to copy
+     *   r1 = size of stack
+     *   r2 = pc to call
+     *   r3 = JValue* result
+     *   [sp] = shorty
+     *   [sp + 4] = thread
+     */
+ENTRY art_quick_osr_stub
+    SPILL_ALL_CALLEE_SAVE_GPRS             @ Spill regs (9)
+    mov    r11, sp                         @ Save the stack pointer
+    mov    r10, r1                         @ Save size of stack
+    ldr    r9, [r11, #40]                  @ Move managed thread pointer into r9
+    mov    r8, r2                          @ Save the pc to call
+    sub    r7, sp, #12                     @ Reserve space for stack pointer, JValue result, and ArtMethod* slot
+    and    r7, #0xFFFFFFF0                 @ Align stack pointer
+    mov    sp, r7                          @ Update stack pointer
+    str    r11, [sp, #4]                   @ Save old stack pointer
+    str    r3, [sp, #8]                    @ Save JValue result
+    mov    ip, #0
+    str    ip, [sp]                        @ Store null for ArtMethod* at bottom of frame
+    sub    sp, sp, r1                      @ Reserve space for callee stack
+    mov    r2, r1
+    mov    r1, r0
+    mov    r0, sp
+    bl     memcpy                          @ memcpy (dest r0, src r1, bytes r2)
+    bl     .Losr_entry                     @ Call the method
+    ldr    r11, [sp, #4]                   @ Restore saved stack pointer
+    ldr    r10, [sp, #8]                   @ Restore JValue result
+    mov    sp, r11                         @ Restore stack pointer.
+    ldr    r4, [sp, #36]                   @ load shorty
+    ldrb   r4, [r4, #0]                    @ load return type
+    cmp    r4, #68                         @ Test if result type char == 'D'.
+    beq    .Losr_fp_result
+    cmp    r4, #70                         @ Test if result type char == 'F'.
+    beq    .Losr_fp_result
+    strd r0, [r10]                         @ Store r0/r1 into result pointer
+    b    .Losr_exit
+.Losr_fp_result:
+    vstr d0, [r10]                         @ Store s0-s1/d0 into result pointer
+.Losr_exit:
+    pop    {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+.Losr_entry:
+    sub r10, r10, #4
+    str lr, [sp, r10]                     @ Store link register per the compiler ABI
+    bx r8
+END art_quick_osr_stub
+
+    /*
      * On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_
      */
 ARM_ENTRY art_quick_do_long_jump
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 9ccabad..e848008 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -915,6 +915,105 @@
 
 
 
+/*  extern"C" void art_quick_osr_stub(void** stack,                x0
+ *                                    size_t stack_size_in_bytes,  x1
+ *                                    const uin8_t* native_pc,     x2
+ *                                    JValue *result,              x3
+ *                                    char   *shorty,              x4
+ *                                    Thread *self)                x5
+ */
+ENTRY art_quick_osr_stub
+SAVE_SIZE=15*8   // x3, x4, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
+    mov x9, sp                             // Save stack pointer.
+    .cfi_register sp,x9
+
+    sub x10, sp, # SAVE_SIZE
+    and x10, x10, # ~0xf                   // Enforce 16 byte stack alignment.
+    mov sp, x10                            // Set new SP.
+
+    str x28, [sp, #112]
+    stp x26, x27, [sp, #96]
+    stp x24, x25, [sp, #80]
+    stp x22, x23, [sp, #64]
+    stp x20, x21, [sp, #48]
+    stp x9, x19, [sp, #32]                // Save old stack pointer and x19.
+    stp x3, x4, [sp, #16]                 // Save result and shorty addresses.
+    stp xFP, xLR, [sp]                    // Store LR & FP.
+    mov xSELF, x5                         // Move thread pointer into SELF register.
+
+    sub sp, sp, #16
+    str xzr, [sp]                         // Store null for ArtMethod* slot
+    // Branch to stub.
+    bl .Losr_entry
+    add sp, sp, #16
+
+    // Restore return value address and shorty address.
+    ldp x3,x4, [sp, #16]
+    ldr x28, [sp, #112]
+    ldp x26, x27, [sp, #96]
+    ldp x24, x25, [sp, #80]
+    ldp x22, x23, [sp, #64]
+    ldp x20, x21, [sp, #48]
+
+    // Store result (w0/x0/s0/d0) appropriately, depending on resultType.
+    ldrb w10, [x4]
+
+    // Check the return type and store the correct register into the jvalue in memory.
+
+    // Don't set anything for a void type.
+    cmp w10, #'V'
+    beq .Losr_exit
+
+    // Is it a double?
+    cmp w10, #'D'
+    bne .Lno_double
+    str d0, [x3]
+    b .Losr_exit
+
+.Lno_double:  // Is it a float?
+    cmp w10, #'F'
+    bne .Lno_float
+    str s0, [x3]
+    b .Losr_exit
+
+.Lno_float:  // Just store x0. Doesn't matter if it is 64 or 32 bits.
+    str x0, [x3]
+
+.Losr_exit:  // Finish up.
+    ldp x2, x19, [sp, #32]   // Restore stack pointer and x19.
+    ldp xFP, xLR, [sp]    // Restore old frame pointer and link register.
+    mov sp, x2
+    ret
+
+.Losr_entry:
+    // Update stack pointer for the callee
+    sub sp, sp, x1
+
+    // Update link register slot expected by the callee.
+    sub w1, w1, #8
+    str lr, [sp, x1]
+
+    // Copy arguments into stack frame.
+    // Use simple copy routine for now.
+    // 4 bytes per slot.
+    // X0 - source address
+    // W1 - args length
+    // SP - destination address.
+    // W10 - temporary
+.Losr_loop_entry:
+    cmp w1, #0
+    beq .Losr_loop_exit
+    sub w1, w1, #4
+    ldr w10, [x0, x1]
+    str w10, [sp, x1]
+    b .Losr_loop_entry
+
+.Losr_loop_exit:
+    // Branch to the OSR entry point.
+    br x2
+
+END art_quick_osr_stub
+
     /*
      * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_
      */
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index da30331..fbee5d7 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1712,5 +1712,65 @@
     ret
 END_FUNCTION art_quick_read_barrier_for_root_slow
 
+  /*
+     * On stack replacement stub.
+     * On entry:
+     *   [sp] = return address
+     *   [sp + 4] = stack to copy
+     *   [sp + 8] = size of stack
+     *   [sp + 12] = pc to call
+     *   [sp + 16] = JValue* result
+     *   [sp + 20] = shorty
+     *   [sp + 24] = thread
+     */
+DEFINE_FUNCTION art_quick_osr_stub
+    // Save native callee saves.
+    PUSH ebp
+    PUSH ebx
+    PUSH esi
+    PUSH edi
+    mov 4+16(%esp), %esi           // ESI = argument array
+    mov 8+16(%esp), %ecx           // ECX = size of args
+    mov 12+16(%esp), %ebx          // EBX = pc to call
+    mov %esp, %ebp                 // Save stack pointer
+    andl LITERAL(0xFFFFFFF0), %esp // Align stack
+    PUSH ebp                       // Save old stack pointer
+    subl LITERAL(12), %esp         // Align stack
+    movl LITERAL(0), (%esp)        // Store null for ArtMethod* slot
+    call .Losr_entry
+
+    // Restore stack pointer.
+    addl LITERAL(12), %esp
+    POP ebp
+    mov %ebp, %esp
+
+    // Restore callee saves.
+    POP edi
+    POP esi
+    POP ebx
+    POP ebp
+    mov 16(%esp), %ecx            // Get JValue result
+    mov %eax, (%ecx)              // Store the result assuming it is a long, int or Object*
+    mov %edx, 4(%ecx)             // Store the other half of the result
+    mov 20(%esp), %edx            // Get the shorty
+    cmpb LITERAL(68), (%edx)      // Test if result type char == 'D'
+    je .Losr_return_double_quick
+    cmpb LITERAL(70), (%edx)      // Test if result type char == 'F'
+    je .Losr_return_float_quick
+    ret
+.Losr_return_double_quick:
+    movsd %xmm0, (%ecx)           // Store the floating point result
+    ret
+.Losr_return_float_quick:
+    movss %xmm0, (%ecx)           // Store the floating point result
+    ret
+.Losr_entry:
+    subl LITERAL(4), %ecx         // Given stack size contains pushed frame pointer, substract it.
+    subl %ecx, %esp
+    mov %esp, %edi                // EDI = beginning of stack
+    rep movsb                     // while (ecx--) { *edi++ = *esi++ }
+    jmp *%ebx
+END_FUNCTION art_quick_osr_stub
+
     // TODO: implement these!
 UNIMPLEMENTED art_quick_memcmp16
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 883da96..d6e0f1c 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1744,3 +1744,62 @@
     RESTORE_FP_CALLEE_SAVE_FRAME
     ret
 END_FUNCTION art_quick_read_barrier_for_root_slow
+
+    /*
+     * On stack replacement stub.
+     * On entry:
+     *   [sp] = return address
+     *   rdi = stack to copy
+     *   rsi = size of stack
+     *   rdx = pc to call
+     *   rcx = JValue* result
+     *   r8 = shorty
+     *   r9 = thread
+     */
+DEFINE_FUNCTION art_quick_osr_stub
+    // Save the non-volatiles.
+    PUSH rbp                      // Save rbp.
+    PUSH rcx                      // Save rcx/result*.
+    PUSH r8                       // Save r8/shorty*.
+
+    // Save callee saves.
+    PUSH rbx
+    PUSH r12
+    PUSH r13
+    PUSH r14
+    PUSH r15
+
+    pushq LITERAL(0)              // Push null for ArtMethod*.
+    movl %esi, %ecx               // rcx := size of stack
+    movq %rdi, %rsi               // rsi := stack to copy
+    call .Losr_entry
+
+    // Restore stack and callee-saves.
+    addq LITERAL(8), %rsp
+    POP r15
+    POP r14
+    POP r13
+    POP r12
+    POP rbx
+    POP r8
+    POP rcx
+    POP rbp
+    cmpb LITERAL(68), (%r8)        // Test if result type char == 'D'.
+    je .Losr_return_double_quick
+    cmpb LITERAL(70), (%r8)        // Test if result type char == 'F'.
+    je .Losr_return_float_quick
+    movq %rax, (%rcx)              // Store the result assuming its a long, int or Object*
+    ret
+.Losr_return_double_quick:
+    movsd %xmm0, (%rcx)            // Store the double floating point result.
+    ret
+.Losr_return_float_quick:
+    movss %xmm0, (%rcx)            // Store the floating point result.
+    ret
+.Losr_entry:
+    subl LITERAL(8), %ecx         // Given stack size contains pushed frame pointer, substract it.
+    subq %rcx, %rsp
+    movq %rsp, %rdi               // rdi := beginning of stack
+    rep movsb                     // while (rcx--) { *rdi++ = *rsi++ }
+    jmp *%rdx
+END_FUNCTION art_quick_osr_stub
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 28540c8..cc45c38 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -463,9 +463,11 @@
   }
 
   visitor.VisitRootIfNonNull(declaring_class_.AddressWithoutBarrier());
-  ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
-  if (hotness_count_ != 0 && !IsNative() && profiling_info != nullptr) {
-    profiling_info->VisitRoots(visitor);
+  if (!IsNative()) {
+    ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
+    if (profiling_info != nullptr) {
+      profiling_info->VisitRoots(visitor);
+    }
   }
 }
 
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 6f36016..cd38e16 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -292,22 +292,7 @@
         // Unusual case where we were running generated code and an
         // exception was thrown to force the activations to be removed from the
         // stack. Continue execution in the interpreter.
-        self->ClearException();
-        ShadowFrame* shadow_frame =
-            self->PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame);
-        mirror::Throwable* pending_exception = nullptr;
-        bool from_code = false;
-        self->PopDeoptimizationContext(result, &pending_exception, &from_code);
-        CHECK(!from_code);
-        self->SetTopOfStack(nullptr);
-        self->SetTopOfShadowStack(shadow_frame);
-
-        // Restore the exception that was pending before deoptimization then interpret the
-        // deoptimized frames.
-        if (pending_exception != nullptr) {
-          self->SetException(pending_exception);
-        }
-        interpreter::EnterInterpreterFromDeoptimize(self, shadow_frame, from_code, result);
+        self->DeoptimizeWithDeoptimizationException(result);
       }
       if (kLogInvocationStartAndReturn) {
         LOG(INFO) << StringPrintf("Returned '%s' quick code=%p", PrettyMethod(this).c_str(),
diff --git a/runtime/art_method.h b/runtime/art_method.h
index ce23c2a..078a978 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -174,13 +174,13 @@
 
   bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsPreverified() {
-    return (GetAccessFlags() & kAccPreverified) != 0;
+  bool SkipAccessChecks() {
+    return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
   }
 
-  void SetPreverified() {
-    DCHECK(!IsPreverified());
-    SetAccessFlags(GetAccessFlags() | kAccPreverified);
+  void SetSkipAccessChecks() {
+    DCHECK(!SkipAccessChecks());
+    SetAccessFlags(GetAccessFlags() | kAccSkipAccessChecks);
   }
 
   // Returns true if this method could be overridden by a default method.
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 31610a3..eb3b7f3 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -150,11 +150,11 @@
 ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
             art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
 // Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
-#define THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 34 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 16 * __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
             art::Thread::ThreadLocalAllocStackTopOffset<__SIZEOF_POINTER__>().Int32Value())
 // Offset of field Thread::tlsPtr_.thread_local_alloc_stack_end.
-#define THREAD_LOCAL_ALLOC_STACK_END_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 35 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_ALLOC_STACK_END_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 17 * __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
             art::Thread::ThreadLocalAllocStackEndOffset<__SIZEOF_POINTER__>().Int32Value())
 
@@ -331,21 +331,23 @@
 ADD_TEST_EQ(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE,
             static_cast<int32_t>(art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize))
 
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT 4
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT 3
 ADD_TEST_EQ(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT,
-            static_cast<int32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSizeShift))
+            static_cast<int32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSizeShift))
 
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK 15
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK 7
 ADD_TEST_EQ(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK,
-            static_cast<int32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+            static_cast<int32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
 
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32 0xfffffff0
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32 0xfffffff8
 ADD_TEST_EQ(static_cast<uint32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32),
-            ~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+            ~static_cast<uint32_t>(
+                art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
 
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64 0xfffffffffffffff0
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64 0xfffffffffffffff8
 ADD_TEST_EQ(static_cast<uint64_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64),
-            ~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+            ~static_cast<uint64_t>(
+                art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
 
 #define ROSALLOC_RUN_FREE_LIST_OFFSET 8
 ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_OFFSET,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index e4f492b..0631ebe 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -326,6 +326,24 @@
   std::fill_n(find_array_class_cache_, kFindArrayCacheSize, GcRoot<mirror::Class>(nullptr));
 }
 
+void ClassLinker::CheckSystemClass(Thread* self, Handle<mirror::Class> c1, const char* descriptor) {
+  mirror::Class* c2 = FindSystemClass(self, descriptor);
+  if (c2 == nullptr) {
+    LOG(FATAL) << "Could not find class " << descriptor;
+    UNREACHABLE();
+  }
+  if (c1.Get() != c2) {
+    std::ostringstream os1, os2;
+    c1->DumpClass(os1, mirror::Class::kDumpClassFullDetail);
+    c2->DumpClass(os2, mirror::Class::kDumpClassFullDetail);
+    LOG(FATAL) << "InitWithoutImage: Class mismatch for " << descriptor
+               << ". This is most likely the result of a broken build. Make sure that "
+               << "libcore and art projects match.\n\n"
+               << os1.str() << "\n\n" << os2.str();
+    UNREACHABLE();
+  }
+}
+
 bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path,
                                    std::string* error_msg) {
   VLOG(startup) << "ClassLinker::Init";
@@ -517,18 +535,12 @@
 
   // Object, String and DexCache need to be rerun through FindSystemClass to finish init
   mirror::Class::SetStatus(java_lang_Object, mirror::Class::kStatusNotReady, self);
-  CHECK_EQ(java_lang_Object.Get(), FindSystemClass(self, "Ljava/lang/Object;"));
+  CheckSystemClass(self, java_lang_Object, "Ljava/lang/Object;");
   CHECK_EQ(java_lang_Object->GetObjectSize(), mirror::Object::InstanceSize());
   mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusNotReady, self);
-  mirror::Class* String_class = FindSystemClass(self, "Ljava/lang/String;");
-  if (java_lang_String.Get() != String_class) {
-    std::ostringstream os1, os2;
-    java_lang_String->DumpClass(os1, mirror::Class::kDumpClassFullDetail);
-    String_class->DumpClass(os2, mirror::Class::kDumpClassFullDetail);
-    LOG(FATAL) << os1.str() << "\n\n" << os2.str();
-  }
+  CheckSystemClass(self, java_lang_String, "Ljava/lang/String;");
   mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusNotReady, self);
-  CHECK_EQ(java_lang_DexCache.Get(), FindSystemClass(self, "Ljava/lang/DexCache;"));
+  CheckSystemClass(self, java_lang_DexCache, "Ljava/lang/DexCache;");
   CHECK_EQ(java_lang_DexCache->GetObjectSize(), mirror::DexCache::InstanceSize());
 
   // Setup the primitive array type classes - can't be done until Object has a vtable.
@@ -538,14 +550,13 @@
   SetClassRoot(kByteArrayClass, FindSystemClass(self, "[B"));
   mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
 
-  CHECK_EQ(char_array_class.Get(), FindSystemClass(self, "[C"));
+  CheckSystemClass(self, char_array_class, "[C");
 
   SetClassRoot(kShortArrayClass, FindSystemClass(self, "[S"));
   mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
 
-  CHECK_EQ(int_array_class.Get(), FindSystemClass(self, "[I"));
-
-  CHECK_EQ(long_array_class.Get(), FindSystemClass(self, "[J"));
+  CheckSystemClass(self, int_array_class, "[I");
+  CheckSystemClass(self, long_array_class, "[J");
 
   SetClassRoot(kFloatArrayClass, FindSystemClass(self, "[F"));
   mirror::FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass));
@@ -553,9 +564,12 @@
   SetClassRoot(kDoubleArrayClass, FindSystemClass(self, "[D"));
   mirror::DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass));
 
-  CHECK_EQ(class_array_class.Get(), FindSystemClass(self, "[Ljava/lang/Class;"));
+  // Run Class through FindSystemClass. This initializes the dex_cache_ fields and register it
+  // in class_table_.
+  CheckSystemClass(self, java_lang_Class, "Ljava/lang/Class;");
 
-  CHECK_EQ(object_array_class.Get(), FindSystemClass(self, "[Ljava/lang/Object;"));
+  CheckSystemClass(self, class_array_class, "[Ljava/lang/Class;");
+  CheckSystemClass(self, object_array_class, "[Ljava/lang/Object;");
 
   // Setup the single, global copy of "iftable".
   auto java_lang_Cloneable = hs.NewHandle(FindSystemClass(self, "Ljava/lang/Cloneable;"));
@@ -577,14 +591,11 @@
            mirror::Class::GetDirectInterface(self, object_array_class, 0));
   CHECK_EQ(java_io_Serializable.Get(),
            mirror::Class::GetDirectInterface(self, object_array_class, 1));
-  // Run Class, ArtField, and ArtMethod through FindSystemClass. This initializes their
-  // dex_cache_ fields and register them in class_table_.
-  CHECK_EQ(java_lang_Class.Get(), FindSystemClass(self, "Ljava/lang/Class;"));
 
   CHECK_EQ(object_array_string.Get(),
            FindSystemClass(self, GetClassRootDescriptor(kJavaLangStringArrayClass)));
 
-  // End of special init trickery, subsequent classes may be loaded via FindSystemClass.
+  // End of special init trickery, all subsequent classes may be loaded via FindSystemClass.
 
   // Create java.lang.reflect.Proxy root.
   SetClassRoot(kJavaLangReflectProxy, FindSystemClass(self, "Ljava/lang/reflect/Proxy;"));
@@ -624,7 +635,7 @@
   // java.lang.ref classes need to be specially flagged, but otherwise are normal classes
   // finish initializing Reference class
   mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusNotReady, self);
-  CHECK_EQ(java_lang_ref_Reference.Get(), FindSystemClass(self, "Ljava/lang/ref/Reference;"));
+  CheckSystemClass(self, java_lang_ref_Reference, "Ljava/lang/ref/Reference;");
   CHECK_EQ(java_lang_ref_Reference->GetObjectSize(), mirror::Reference::InstanceSize());
   CHECK_EQ(java_lang_ref_Reference->GetClassSize(),
            mirror::Reference::ClassSize(image_pointer_size_));
@@ -1669,6 +1680,22 @@
                                                          forward_dex_cache_arrays);
       class_table->Visit(visitor);
     }
+    // forward_dex_cache_arrays is true iff we copied all of the dex cache arrays into the .bss.
+    // In this case, madvise away the dex cache arrays section of the image to reduce RAM usage and
+    // mark as PROT_NONE to catch any invalid accesses.
+    if (forward_dex_cache_arrays) {
+      const ImageSection& dex_cache_section = header.GetImageSection(
+          ImageHeader::kSectionDexCacheArrays);
+      uint8_t* section_begin = AlignUp(space->Begin() + dex_cache_section.Offset(), kPageSize);
+      uint8_t* section_end = AlignDown(space->Begin() + dex_cache_section.End(), kPageSize);
+      if (section_begin < section_end) {
+        madvise(section_begin, section_end - section_begin, MADV_DONTNEED);
+        mprotect(section_begin, section_end - section_begin, PROT_NONE);
+        VLOG(image) << "Released and protected dex cache array image section from "
+                    << reinterpret_cast<const void*>(section_begin) << "-"
+                    << reinterpret_cast<const void*>(section_end);
+      }
+    }
   }
   VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time);
   return true;
@@ -3625,7 +3652,7 @@
 
   // Don't attempt to re-verify if already sufficiently verified.
   if (klass->IsVerified()) {
-    EnsurePreverifiedMethods(klass);
+    EnsureSkipAccessChecksMethods(klass);
     return;
   }
   if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
@@ -3648,22 +3675,10 @@
     mirror::Class::SetStatus(klass, mirror::Class::kStatusVerifyingAtRuntime, self);
   }
 
-  // Skip verification if we are forcing a soft fail.
-  // This has to be before the normal verification enabled check,
-  // since technically verification is disabled in this mode.
-  if (UNLIKELY(Runtime::Current()->IsVerificationSoftFail())) {
-    // Force verification to be a 'soft failure'.
-    mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
-    // As this is a fake verified status, make sure the methods are _not_ marked preverified
-    // later.
-    klass->SetPreverified();
-    return;
-  }
-
   // Skip verification if disabled.
   if (!Runtime::Current()->IsVerificationEnabled()) {
     mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
-    EnsurePreverifiedMethods(klass);
+    EnsureSkipAccessChecksMethods(klass);
     return;
   }
 
@@ -3766,9 +3781,9 @@
         mirror::Class::SetStatus(klass, mirror::Class::kStatusRetryVerificationAtRuntime, self);
       } else {
         mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
-        // As this is a fake verified status, make sure the methods are _not_ marked preverified
-        // later.
-        klass->SetPreverified();
+        // As this is a fake verified status, make sure the methods are _not_ marked
+        // kAccSkipAccessChecks later.
+        klass->SetVerificationAttempted();
       }
     }
   } else {
@@ -3781,19 +3796,26 @@
   }
   if (preverified || verifier_failure == verifier::MethodVerifier::kNoFailure) {
     // Class is verified so we don't need to do any access check on its methods.
-    // Let the interpreter know it by setting the kAccPreverified flag onto each
+    // Let the interpreter know it by setting the kAccSkipAccessChecks flag onto each
     // method.
     // Note: we're going here during compilation and at runtime. When we set the
-    // kAccPreverified flag when compiling image classes, the flag is recorded
+    // kAccSkipAccessChecks flag when compiling image classes, the flag is recorded
     // in the image and is set when loading the image.
-    EnsurePreverifiedMethods(klass);
+
+    if (UNLIKELY(Runtime::Current()->IsVerificationSoftFail())) {
+      // Never skip access checks if the verification soft fail is forced.
+      // Mark the class as having a verification attempt to avoid re-running the verifier.
+      klass->SetVerificationAttempted();
+    } else {
+      EnsureSkipAccessChecksMethods(klass);
+    }
   }
 }
 
-void ClassLinker::EnsurePreverifiedMethods(Handle<mirror::Class> klass) {
-  if (!klass->IsPreverified()) {
-    klass->SetPreverifiedFlagOnAllMethods(image_pointer_size_);
-    klass->SetPreverified();
+void ClassLinker::EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass) {
+  if (!klass->WasVerificationAttempted()) {
+    klass->SetSkipAccessChecksFlagOnAllMethods(image_pointer_size_);
+    klass->SetVerificationAttempted();
   }
 }
 
@@ -3824,7 +3846,7 @@
   }
 
   // We may be running with a preopted oat file but without image. In this case,
-  // we don't skip verification of preverified classes to ensure we initialize
+  // we don't skip verification of skip_access_checks classes to ensure we initialize
   // dex caches with all types resolved during verification.
   // We need to trust image classes, as these might be coming out of a pre-opted, quickened boot
   // image (that we just failed loading), and the verifier can't be run on quickened opcodes when
@@ -3932,8 +3954,9 @@
   }
   DCHECK(klass->GetClass() != nullptr);
   klass->SetObjectSize(sizeof(mirror::Proxy));
-  // Set the class access flags incl. preverified, so we do not try to set the flag on the methods.
-  klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal | kAccPreverified);
+  // Set the class access flags incl. VerificationAttempted, so we do not try to set the flag on
+  // the methods.
+  klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal | kAccVerificationAttempted);
   klass->SetClassLoader(soa.Decode<mirror::ClassLoader*>(loader));
   DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
   klass->SetName(soa.Decode<mirror::String*>(name));
@@ -4742,7 +4765,7 @@
                                     bool can_init_parents) {
   DCHECK(c.Get() != nullptr);
   if (c->IsInitialized()) {
-    EnsurePreverifiedMethods(c);
+    EnsureSkipAccessChecksMethods(c);
     return true;
   }
   const bool success = InitializeClass(self, c, can_init_fields, can_init_parents);
@@ -6437,11 +6460,11 @@
     for (ArtMethod* def_method : default_methods) {
       ArtMethod& new_method = *out;
       new_method.CopyFrom(def_method, image_pointer_size_);
-      // Clear the preverified flag if it is present. Since this class hasn't been verified yet it
-      // shouldn't have methods that are preverified.
+      // Clear the kAccSkipAccessChecks flag if it is present. Since this class hasn't been verified
+      // yet it shouldn't have methods that are skipping access checks.
       // TODO This is rather arbitrary. We should maybe support classes where only some of its
-      // methods are preverified.
-      new_method.SetAccessFlags((new_method.GetAccessFlags() | kAccDefault) & ~kAccPreverified);
+      // methods are skip_access_checks.
+      new_method.SetAccessFlags((new_method.GetAccessFlags() | kAccDefault) & ~kAccSkipAccessChecks);
       move_table.emplace(def_method, &new_method);
       ++out;
     }
@@ -6449,11 +6472,11 @@
       ArtMethod& new_method = *out;
       new_method.CopyFrom(conf_method, image_pointer_size_);
       // This is a type of default method (there are default method impls, just a conflict) so mark
-      // this as a default, non-abstract method, since thats what it is. Also clear the preverified
-      // bit since this class hasn't been verified yet it shouldn't have methods that are
-      // preverified.
+      // this as a default, non-abstract method, since thats what it is. Also clear the
+      // kAccSkipAccessChecks bit since this class hasn't been verified yet it shouldn't have
+      // methods that are skipping access checks.
       constexpr uint32_t kSetFlags = kAccDefault | kAccDefaultConflict;
-      constexpr uint32_t kMaskFlags = ~(kAccAbstract | kAccPreverified);
+      constexpr uint32_t kMaskFlags = ~(kAccAbstract | kAccSkipAccessChecks);
       new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
       DCHECK(new_method.IsDefaultConflicting());
       // The actual method might or might not be marked abstract since we just copied it from a
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 9217c32..56a868a 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -964,9 +964,10 @@
   void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Ensures that methods have the kAccPreverified bit set. We use the kAccPreverfied bit on the
-  // class access flags to determine whether this has been done before.
-  void EnsurePreverifiedMethods(Handle<mirror::Class> c)
+  // Ensures that methods have the kAccSkipAccessChecks bit set. We use the
+  // kAccVerificationAttempted bit on the class access flags to determine whether this has been done
+  // before.
+  void EnsureSkipAccessChecksMethods(Handle<mirror::Class> c)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   mirror::Class* LookupClassFromBootImage(const char* descriptor)
@@ -1024,6 +1025,11 @@
       REQUIRES(!dex_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  // Check that c1 == FindSystemClass(self, descriptor). Abort with class dumps otherwise.
+  void CheckSystemClass(Thread* self, Handle<mirror::Class> c1, const char* descriptor)
+      REQUIRES(!dex_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   std::vector<const DexFile*> boot_class_path_;
   std::vector<std::unique_ptr<const DexFile>> boot_dex_files_;
 
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index a909cd8..3a0f3e5 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1125,14 +1125,14 @@
 static void CheckMethod(ArtMethod* method, bool verified)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   if (!method->IsNative() && !method->IsAbstract()) {
-    EXPECT_EQ((method->GetAccessFlags() & kAccPreverified) != 0U, verified)
+    EXPECT_EQ((method->GetAccessFlags() & kAccSkipAccessChecks) != 0U, verified)
         << PrettyMethod(method, true);
   }
 }
 
-static void CheckPreverified(mirror::Class* c, bool preverified)
+static void CheckVerificationAttempted(mirror::Class* c, bool preverified)
     SHARED_REQUIRES(Locks::mutator_lock_) {
-  EXPECT_EQ((c->GetAccessFlags() & kAccPreverified) != 0U, preverified)
+  EXPECT_EQ((c->GetAccessFlags() & kAccVerificationAttempted) != 0U, preverified)
       << "Class " << PrettyClass(c) << " not as expected";
   for (auto& m : c->GetMethods(sizeof(void*))) {
     CheckMethod(&m, preverified);
@@ -1146,7 +1146,7 @@
   ASSERT_TRUE(JavaLangObject != nullptr);
   EXPECT_TRUE(JavaLangObject->IsInitialized()) << "Not testing already initialized class from the "
                                                   "core";
-  CheckPreverified(JavaLangObject, true);
+  CheckVerificationAttempted(JavaLangObject, true);
 }
 
 TEST_F(ClassLinkerTest, Preverified_UninitializedBoot) {
@@ -1159,10 +1159,10 @@
   EXPECT_FALSE(security_manager->IsInitialized()) << "Not testing uninitialized class from the "
                                                      "core";
 
-  CheckPreverified(security_manager.Get(), false);
+  CheckVerificationAttempted(security_manager.Get(), false);
 
   class_linker_->EnsureInitialized(soa.Self(), security_manager, true, true);
-  CheckPreverified(security_manager.Get(), true);
+  CheckVerificationAttempted(security_manager.Get(), true);
 }
 
 TEST_F(ClassLinkerTest, Preverified_App) {
@@ -1174,10 +1174,10 @@
   Handle<mirror::Class> statics(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStatics;", class_loader)));
 
-  CheckPreverified(statics.Get(), false);
+  CheckVerificationAttempted(statics.Get(), false);
 
   class_linker_->EnsureInitialized(soa.Self(), statics, true, true);
-  CheckPreverified(statics.Get(), true);
+  CheckVerificationAttempted(statics.Get(), true);
 }
 
 TEST_F(ClassLinkerTest, IsBootStrapClassLoaded) {
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 7a852e2..ddf2749 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -478,7 +478,7 @@
 
   // Check field access flags.
   std::string error_msg;
-  if (!CheckFieldAccessFlags(access_flags, class_access_flags, &error_msg)) {
+  if (!CheckFieldAccessFlags(idx, access_flags, class_access_flags, &error_msg)) {
     ErrorStringPrintf("%s", error_msg.c_str());
     return false;
   }
@@ -2312,12 +2312,88 @@
   return count <= 1;
 }
 
-bool DexFileVerifier::CheckFieldAccessFlags(uint32_t field_access_flags,
+// Helper functions to retrieve names from the dex file. We do not want to rely on DexFile
+// functionality, as we're still verifying the dex file. begin and header correspond to the
+// underscored variants in the DexFileVerifier.
+
+static std::string GetStringOrError(const uint8_t* const begin,
+                                    const DexFile::Header* const header,
+                                    uint32_t string_idx) {
+  if (header->string_ids_size_ < string_idx) {
+    return "(error)";
+  }
+
+  const DexFile::StringId* string_id =
+      reinterpret_cast<const DexFile::StringId*>(begin + header->string_ids_off_) + string_idx;
+
+  // Assume that the data is OK at this point. String data has been checked at this point.
+
+  const uint8_t* ptr = begin + string_id->string_data_off_;
+  DecodeUnsignedLeb128(&ptr);
+  return reinterpret_cast<const char*>(ptr);
+}
+
+static std::string GetClassOrError(const uint8_t* const begin,
+                                   const DexFile::Header* const header,
+                                   uint32_t class_idx) {
+  if (header->type_ids_size_ < class_idx) {
+    return "(error)";
+  }
+
+  const DexFile::TypeId* type_id =
+      reinterpret_cast<const DexFile::TypeId*>(begin + header->type_ids_off_) + class_idx;
+
+  // Assume that the data is OK at this point. Type id offsets have been checked at this point.
+
+  return GetStringOrError(begin, header, type_id->descriptor_idx_);
+}
+
+static std::string GetFieldDescriptionOrError(const uint8_t* const begin,
+                                              const DexFile::Header* const header,
+                                              uint32_t idx) {
+  if (header->field_ids_size_ < idx) {
+    return "(error)";
+  }
+
+  const DexFile::FieldId* field_id =
+      reinterpret_cast<const DexFile::FieldId*>(begin + header->field_ids_off_) + idx;
+
+  // Assume that the data is OK at this point. Field id offsets have been checked at this point.
+
+  std::string class_name = GetClassOrError(begin, header, field_id->class_idx_);
+  std::string field_name = GetStringOrError(begin, header, field_id->name_idx_);
+
+  return class_name + "." + field_name;
+}
+
+static std::string GetMethodDescriptionOrError(const uint8_t* const begin,
+                                               const DexFile::Header* const header,
+                                               uint32_t idx) {
+  if (header->method_ids_size_ < idx) {
+    return "(error)";
+  }
+
+  const DexFile::MethodId* method_id =
+      reinterpret_cast<const DexFile::MethodId*>(begin + header->method_ids_off_) + idx;
+
+  // Assume that the data is OK at this point. Method id offsets have been checked at this point.
+
+  std::string class_name = GetClassOrError(begin, header, method_id->class_idx_);
+  std::string method_name = GetStringOrError(begin, header, method_id->name_idx_);
+
+  return class_name + "." + method_name;
+}
+
+bool DexFileVerifier::CheckFieldAccessFlags(uint32_t idx,
+                                            uint32_t field_access_flags,
                                             uint32_t class_access_flags,
                                             std::string* error_msg) {
   // Generally sort out >16-bit flags.
   if ((field_access_flags & ~kAccJavaFlagsMask) != 0) {
-    *error_msg = StringPrintf("Bad class_data_item field access_flags %x", field_access_flags);
+    *error_msg = StringPrintf("Bad field access_flags for %s: %x(%s)",
+                              GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+                              field_access_flags,
+                              PrettyJavaAccessFlags(field_access_flags).c_str());
     return false;
   }
 
@@ -2334,8 +2410,10 @@
 
   // Fields may have only one of public/protected/final.
   if (!CheckAtMostOneOfPublicProtectedPrivate(field_access_flags)) {
-    *error_msg = StringPrintf("Field may have only one of public/protected/private, %x",
-                              field_access_flags);
+    *error_msg = StringPrintf("Field may have only one of public/protected/private, %s: %x(%s)",
+                              GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+                              field_access_flags,
+                              PrettyJavaAccessFlags(field_access_flags).c_str());
     return false;
   }
 
@@ -2344,14 +2422,19 @@
     // Interface fields must be public final static.
     constexpr uint32_t kPublicFinalStatic = kAccPublic | kAccFinal | kAccStatic;
     if ((field_access_flags & kPublicFinalStatic) != kPublicFinalStatic) {
-      *error_msg = StringPrintf("Interface field is not public final static: %x",
-                                field_access_flags);
+      *error_msg = StringPrintf("Interface field is not public final static, %s: %x(%s)",
+                                GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+                                field_access_flags,
+                                PrettyJavaAccessFlags(field_access_flags).c_str());
       return false;
     }
     // Interface fields may be synthetic, but may not have other flags.
     constexpr uint32_t kDisallowed = ~(kPublicFinalStatic | kAccSynthetic);
     if ((field_access_flags & kFieldAccessFlags & kDisallowed) != 0) {
-      *error_msg = StringPrintf("Interface field has disallowed flag: %x", field_access_flags);
+      *error_msg = StringPrintf("Interface field has disallowed flag, %s: %x(%s)",
+                                GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+                                field_access_flags,
+                                PrettyJavaAccessFlags(field_access_flags).c_str());
       return false;
     }
     return true;
@@ -2360,7 +2443,8 @@
   // Volatile fields may not be final.
   constexpr uint32_t kVolatileFinal = kAccVolatile | kAccFinal;
   if ((field_access_flags & kVolatileFinal) == kVolatileFinal) {
-    *error_msg = "Fields may not be volatile and final";
+    *error_msg = StringPrintf("Fields may not be volatile and final: %s",
+                              GetFieldDescriptionOrError(begin_, header_, idx).c_str());
     return false;
   }
 
@@ -2410,7 +2494,9 @@
   constexpr uint32_t kAllMethodFlags =
       kAccJavaFlagsMask | kAccConstructor | kAccDeclaredSynchronized;
   if ((method_access_flags & ~kAllMethodFlags) != 0) {
-    *error_msg = StringPrintf("Bad class_data_item method access_flags %x", method_access_flags);
+    *error_msg = StringPrintf("Bad method access_flags for %s: %x",
+                              GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
+                              method_access_flags);
     return false;
   }
 
@@ -2430,7 +2516,8 @@
 
   // Methods may have only one of public/protected/final.
   if (!CheckAtMostOneOfPublicProtectedPrivate(method_access_flags)) {
-    *error_msg = StringPrintf("Method may have only one of public/protected/private, %x",
+    *error_msg = StringPrintf("Method may have only one of public/protected/private, %s: %x",
+                              GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
                               method_access_flags);
     return false;
   }
@@ -2456,8 +2543,10 @@
   // Only methods named "<clinit>" or "<init>" may be marked constructor. Note: we cannot enforce
   // the reverse for backwards compatibility reasons.
   if (((method_access_flags & kAccConstructor) != 0) && !is_constructor) {
-    *error_msg = StringPrintf("Method %" PRIu32 " is marked constructor, but doesn't match name",
-                              method_index);
+    *error_msg =
+        StringPrintf("Method %" PRIu32 "(%s) is marked constructor, but doesn't match name",
+                     method_index,
+                     GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
     return false;
   }
   // Check that the static constructor (= static initializer) is named "<clinit>" and that the
@@ -2465,8 +2554,9 @@
   if (is_constructor) {
     bool is_static = (method_access_flags & kAccStatic) != 0;
     if (is_static ^ is_clinit_by_name) {
-      *error_msg = StringPrintf("Constructor %" PRIu32 " is not flagged correctly wrt/ static.",
-                                method_index);
+      *error_msg = StringPrintf("Constructor %" PRIu32 "(%s) is not flagged correctly wrt/ static.",
+                                method_index,
+                                GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
       return false;
     }
   }
@@ -2474,8 +2564,9 @@
   // and other methods in the virtual methods list.
   bool is_direct = (method_access_flags & (kAccStatic | kAccPrivate)) != 0 || is_constructor;
   if (is_direct != expect_direct) {
-    *error_msg = StringPrintf("Direct/virtual method %" PRIu32 " not in expected list %d",
+    *error_msg = StringPrintf("Direct/virtual method %" PRIu32 "(%s) not in expected list %d",
                               method_index,
+                              GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
                               expect_direct);
     return false;
   }
@@ -2488,14 +2579,17 @@
   if (!has_code) {
     // Only native or abstract methods may not have code.
     if ((method_access_flags & (kAccNative | kAccAbstract)) == 0) {
-      *error_msg = StringPrintf("Method %" PRIu32 " has no code, but is not marked native or "
+      *error_msg = StringPrintf("Method %" PRIu32 "(%s) has no code, but is not marked native or "
                                 "abstract",
-                                method_index);
+                                method_index,
+                                GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
       return false;
     }
     // Constructors must always have code.
     if (is_constructor) {
-      *error_msg = StringPrintf("Constructor %u must not be abstract or native", method_index);
+      *error_msg = StringPrintf("Constructor %u(%s) must not be abstract or native",
+                                method_index,
+                                GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
       return false;
     }
     if ((method_access_flags & kAccAbstract) != 0) {
@@ -2503,14 +2597,15 @@
       constexpr uint32_t kForbidden =
           kAccPrivate | kAccStatic | kAccFinal | kAccNative | kAccStrict | kAccSynchronized;
       if ((method_access_flags & kForbidden) != 0) {
-        *error_msg = StringPrintf("Abstract method %" PRIu32 " has disallowed access flags %x",
-                                  method_index,
-                                  method_access_flags);
+        *error_msg = StringPrintf("Abstract method %" PRIu32 "(%s) has disallowed access flags %x",
+            method_index,
+            GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
+            method_access_flags);
         return false;
       }
       // Abstract methods should be in an abstract class or interface.
       if ((class_access_flags & (kAccInterface | kAccAbstract)) == 0) {
-        LOG(WARNING) << "Method " << PrettyMethod(method_index, *dex_file_)
+        LOG(WARNING) << "Method " << GetMethodDescriptionOrError(begin_, header_, method_index)
                      << " is abstract, but the declaring class is neither abstract nor an "
                      << "interface in dex file "
                      << dex_file_->GetLocation();
@@ -2520,8 +2615,9 @@
     if ((class_access_flags & kAccInterface) != 0) {
       // Interface methods must be public and abstract.
       if ((method_access_flags & (kAccPublic | kAccAbstract)) != (kAccPublic | kAccAbstract)) {
-        *error_msg = StringPrintf("Interface method %" PRIu32 " is not public and abstract",
-                                  method_index);
+        *error_msg = StringPrintf("Interface method %" PRIu32 "(%s) is not public and abstract",
+            method_index,
+            GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
         return false;
       }
       // At this point, we know the method is public and abstract. This means that all the checks
@@ -2533,8 +2629,9 @@
 
   // When there's code, the method must not be native or abstract.
   if ((method_access_flags & (kAccNative | kAccAbstract)) != 0) {
-    *error_msg = StringPrintf("Method %" PRIu32 " has code, but is marked native or abstract",
-                              method_index);
+    *error_msg = StringPrintf("Method %" PRIu32 "(%s) has code, but is marked native or abstract",
+                              method_index,
+                              GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
     return false;
   }
 
@@ -2543,8 +2640,9 @@
     static constexpr uint32_t kInitAllowed =
         kAccPrivate | kAccProtected | kAccPublic | kAccStrict | kAccVarargs | kAccSynthetic;
     if ((method_access_flags & ~kInitAllowed) != 0) {
-      *error_msg = StringPrintf("Constructor %" PRIu32 " flagged inappropriately %x",
+      *error_msg = StringPrintf("Constructor %" PRIu32 "(%s) flagged inappropriately %x",
                                 method_index,
+                                GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
                                 method_access_flags);
       return false;
     }
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 6c63749..ddfeea2 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -157,9 +157,10 @@
 
   // Check validity of the given access flags, interpreted for a field in the context of a class
   // with the given second access flags.
-  static bool CheckFieldAccessFlags(uint32_t field_access_flags,
-                                    uint32_t class_access_flags,
-                                    std::string* error_msg);
+  bool CheckFieldAccessFlags(uint32_t idx,
+                             uint32_t field_access_flags,
+                             uint32_t class_access_flags,
+                             std::string* error_msg);
   // Check validity of the given method and access flags, in the context of a class with the given
   // second access flags.
   bool CheckMethodAccessFlags(uint32_t method_index,
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index b67af53..558a6ed 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -527,7 +527,7 @@
         ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
         OrMaskToMethodFlags(dex_file, "<init>", kAccStatic);
       },
-      "Constructor 1 is not flagged correctly wrt/ static");
+      "Constructor 1(LMethodFlags;.<init>) is not flagged correctly wrt/ static");
   static constexpr uint32_t kInitNotAllowed[] = {
       kAccFinal,
       kAccSynchronized,
@@ -544,7 +544,7 @@
           ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
           OrMaskToMethodFlags(dex_file, "<init>", kInitNotAllowed[i]);
         },
-        "Constructor 1 flagged inappropriately");
+        "Constructor 1(LMethodFlags;.<init>) flagged inappropriately");
   }
 }
 
@@ -742,7 +742,7 @@
 
         ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
       },
-      "Interface method 1 is not public and abstract");
+      "Interface method 1(LInterfaceMethodFlags;.foo) is not public and abstract");
   VerifyModification(
       kMethodFlagsInterface,
       "method_flags_interface_non_abstract",
@@ -751,7 +751,7 @@
 
         ApplyMaskToMethodFlags(dex_file, "foo", ~kAccAbstract);
       },
-      "Method 1 has no code, but is not marked native or abstract");
+      "Method 1(LInterfaceMethodFlags;.foo) has no code, but is not marked native or abstract");
 
   VerifyModification(
       kMethodFlagsInterface,
@@ -761,7 +761,7 @@
 
         OrMaskToMethodFlags(dex_file, "foo", kAccStatic);
       },
-      "Direct/virtual method 1 not in expected list 0");
+      "Direct/virtual method 1(LInterfaceMethodFlags;.foo) not in expected list 0");
   VerifyModification(
       kMethodFlagsInterface,
       "method_flags_interface_private",
@@ -771,7 +771,7 @@
         ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
         OrMaskToMethodFlags(dex_file, "foo", kAccPrivate);
       },
-      "Direct/virtual method 1 not in expected list 0");
+      "Direct/virtual method 1(LInterfaceMethodFlags;.foo) not in expected list 0");
 
   VerifyModification(
       kMethodFlagsInterface,
@@ -781,7 +781,7 @@
 
         ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
       },
-      "Interface method 1 is not public and abstract");
+      "Interface method 1(LInterfaceMethodFlags;.foo) is not public and abstract");
   VerifyModification(
       kMethodFlagsInterface,
       "method_flags_interface_protected",
@@ -791,7 +791,7 @@
         ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
         OrMaskToMethodFlags(dex_file, "foo", kAccProtected);
       },
-      "Interface method 1 is not public and abstract");
+      "Interface method 1(LInterfaceMethodFlags;.foo) is not public and abstract");
 
   constexpr uint32_t kAllMethodFlags =
       kAccPublic |
@@ -831,7 +831,7 @@
           }
           OrMaskToMethodFlags(dex_file, "foo", mask);
         },
-        "Abstract method 1 has disallowed access flags");
+        "Abstract method 1(LInterfaceMethodFlags;.foo) has disallowed access flags");
   }
 }
 
diff --git a/runtime/dex_instruction_utils.h b/runtime/dex_instruction_utils.h
index 1ae2b1b..2849cd8 100644
--- a/runtime/dex_instruction_utils.h
+++ b/runtime/dex_instruction_utils.h
@@ -49,6 +49,16 @@
 
 // NOTE: The following functions disregard quickened instructions.
 
+// By "direct" const we mean to exclude const-string and const-class
+// which load data from somewhere else, i.e. indirectly.
+constexpr bool IsInstructionDirectConst(Instruction::Code opcode) {
+  return Instruction::CONST_4 <= opcode && opcode <= Instruction::CONST_WIDE_HIGH16;
+}
+
+constexpr bool IsInstructionConstWide(Instruction::Code opcode) {
+  return Instruction::CONST_WIDE_16 <= opcode && opcode <= Instruction::CONST_WIDE_HIGH16;
+}
+
 constexpr bool IsInstructionReturn(Instruction::Code opcode) {
   return Instruction::RETURN_VOID <= opcode && opcode <= Instruction::RETURN_OBJECT;
 }
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 5161175..7e7d904 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -299,8 +299,10 @@
 }
 
 template<FindFieldType type, bool access_check>
-inline ArtField* FindFieldFromCode(uint32_t field_idx, ArtMethod* referrer,
-                                           Thread* self, size_t expected_size) {
+inline ArtField* FindFieldFromCode(uint32_t field_idx,
+                                   ArtMethod* referrer,
+                                   Thread* self,
+                                   size_t expected_size) REQUIRES(!Roles::uninterruptible_) {
   bool is_primitive;
   bool is_set;
   bool is_static;
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index b5a55bf..3dfad76 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -273,15 +273,15 @@
     if (outer_method != nullptr) {
       const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
       if (current_code->IsOptimized()) {
-          uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
-          CodeInfo code_info = current_code->GetOptimizedCodeInfo();
-          StackMapEncoding encoding = code_info.ExtractEncoding();
-          StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
-          DCHECK(stack_map.IsValid());
-          if (stack_map.HasInlineInfo(encoding)) {
-            InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-            caller = GetResolvedMethod(outer_method, inline_info, inline_info.GetDepth() - 1);
-          }
+        uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
+        CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+        StackMapEncoding encoding = code_info.ExtractEncoding();
+        StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+        DCHECK(stack_map.IsValid());
+        if (stack_map.HasInlineInfo(encoding)) {
+          InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+          caller = GetResolvedMethod(outer_method, inline_info, inline_info.GetDepth() - 1);
+        }
       }
     }
     if (kIsDebugBuild && do_caller_check) {
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 1850254..a245f18 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -27,7 +27,36 @@
 
 namespace art {
 
-extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
+inline constexpr bool FindFieldTypeIsRead(FindFieldType type) {
+  return type == InstanceObjectRead ||
+         type == InstancePrimitiveRead ||
+         type == StaticObjectRead ||
+         type == StaticPrimitiveRead;
+}
+
+// Helper function to do a null check after trying to resolve the field. Not for statics since obj
+// does not exist there. There is a suspend check, object is a double pointer to update the value
+// in the caller in case it moves.
+template<FindFieldType type, bool kAccessCheck>
+ALWAYS_INLINE static inline ArtField* FindInstanceField(uint32_t field_idx,
+                                                        ArtMethod* referrer,
+                                                        Thread* self,
+                                                        size_t size,
+                                                        mirror::Object** obj)
+    REQUIRES(!Roles::uninterruptible_)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  StackHandleScope<1> hs(self);
+  HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(obj));
+  ArtField* field = FindFieldFromCode<type, kAccessCheck>(field_idx, referrer, self, size);
+  if (LIKELY(field != nullptr) && UNLIKELY(h.Get() == nullptr)) {
+    ThrowNullPointerExceptionForFieldAccess(field, /*is_read*/FindFieldTypeIsRead(type));
+    return nullptr;
+  }
+  return field;
+}
+
+extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx,
+                                           ArtMethod* referrer,
                                            Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
@@ -42,7 +71,8 @@
   return 0;  // Will throw exception by checking with Thread::Current.
 }
 
-extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
+extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx,
+                                               ArtMethod* referrer,
                                                Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
@@ -57,7 +87,8 @@
   return 0;  // Will throw exception by checking with Thread::Current.
 }
 
-extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer,
+extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx,
+                                             ArtMethod* referrer,
                                              Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
@@ -125,12 +156,16 @@
                                                    Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
-  ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
+  ArtField* field = FindFieldFast(field_idx,
+                                  referrer,
+                                  StaticObjectRead,
                                   sizeof(mirror::HeapReference<mirror::Object>));
   if (LIKELY(field != nullptr)) {
     return field->GetObj(field->GetDeclaringClass());
   }
-  field = FindFieldFromCode<StaticObjectRead, true>(field_idx, referrer, self,
+  field = FindFieldFromCode<StaticObjectRead, true>(field_idx,
+                                                    referrer,
+                                                    self,
                                                     sizeof(mirror::HeapReference<mirror::Object>));
   if (LIKELY(field != nullptr)) {
     return field->GetObj(field->GetDeclaringClass());
@@ -138,149 +173,159 @@
   return nullptr;  // Will throw exception by checking with Thread::Current.
 }
 
-extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
-                                             ArtMethod* referrer, Thread* self)
+extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx,
+                                             mirror::Object* obj,
+                                             ArtMethod* referrer,
+                                             Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
     return field->GetByte(obj);
   }
-  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
-                                                         sizeof(int8_t));
+  field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
+                                                         referrer,
+                                                         self,
+                                                         sizeof(int8_t),
+                                                         &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, true);
-    } else {
-      return field->GetByte(obj);
-    }
+    return field->GetByte(obj);
   }
   return 0;  // Will throw exception by checking with Thread::Current.
 }
 
-extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
-                                                 ArtMethod* referrer, Thread* self)
+extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx,
+                                                 mirror::Object* obj,
+                                                 ArtMethod* referrer,
+                                                 Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
     return field->GetBoolean(obj);
   }
-  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
-                                                         sizeof(int8_t));
+  field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
+                                                         referrer,
+                                                         self,
+                                                         sizeof(int8_t),
+                                                         &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, true);
-    } else {
-      return field->GetBoolean(obj);
-    }
+    return field->GetBoolean(obj);
   }
   return 0;  // Will throw exception by checking with Thread::Current.
 }
-extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
-                                               ArtMethod* referrer, Thread* self)
+extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx,
+                                               mirror::Object* obj,
+                                               ArtMethod* referrer,
+                                               Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
     return field->GetShort(obj);
   }
-  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
-                                                         sizeof(int16_t));
+  field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
+                                                         referrer,
+                                                         self,
+                                                         sizeof(int16_t),
+                                                         &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, true);
-    } else {
-      return field->GetShort(obj);
-    }
+    return field->GetShort(obj);
   }
   return 0;  // Will throw exception by checking with Thread::Current.
 }
 
-extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
-                                               ArtMethod* referrer, Thread* self)
+extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx,
+                                               mirror::Object* obj,
+                                               ArtMethod* referrer,
+                                               Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
     return field->GetChar(obj);
   }
-  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
-                                                         sizeof(int16_t));
+  field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
+                                                         referrer,
+                                                         self,
+                                                         sizeof(int16_t),
+                                                         &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, true);
-    } else {
-      return field->GetChar(obj);
-    }
+    return field->GetChar(obj);
   }
   return 0;  // Will throw exception by checking with Thread::Current.
 }
 
-extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
-                                             ArtMethod* referrer, Thread* self)
+extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx,
+                                             mirror::Object* obj,
+                                             ArtMethod* referrer,
+                                             Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
     return field->Get32(obj);
   }
-  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
-                                                         sizeof(int32_t));
+  field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
+                                                         referrer,
+                                                         self,
+                                                         sizeof(int32_t),
+                                                         &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, true);
-    } else {
-      return field->Get32(obj);
-    }
+    return field->Get32(obj);
   }
   return 0;  // Will throw exception by checking with Thread::Current.
 }
 
-extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
-                                             ArtMethod* referrer, Thread* self)
+extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx,
+                                             mirror::Object* obj,
+                                             ArtMethod* referrer,
+                                             Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
   if (LIKELY(field != nullptr && obj != nullptr)) {
     return field->Get64(obj);
   }
-  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
-                                                         sizeof(int64_t));
+  field = FindInstanceField<InstancePrimitiveRead, true>(field_idx,
+                                                         referrer,
+                                                         self,
+                                                         sizeof(int64_t),
+                                                         &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, true);
-    } else {
-      return field->Get64(obj);
-    }
+    return field->Get64(obj);
   }
   return 0;  // Will throw exception by checking with Thread::Current.
 }
 
-extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
+extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx,
+                                                     mirror::Object* obj,
                                                      ArtMethod* referrer,
                                                      Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
-  ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
+  ArtField* field = FindFieldFast(field_idx,
+                                  referrer,
+                                  InstanceObjectRead,
                                   sizeof(mirror::HeapReference<mirror::Object>));
   if (LIKELY(field != nullptr && obj != nullptr)) {
     return field->GetObj(obj);
   }
-  field = FindFieldFromCode<InstanceObjectRead, true>(
-      field_idx, referrer, self, sizeof(mirror::HeapReference<mirror::Object>));
+  field = FindInstanceField<InstanceObjectRead, true>(field_idx,
+                                                      referrer,
+                                                      self,
+                                                      sizeof(mirror::HeapReference<mirror::Object>),
+                                                      &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, true);
-    } else {
-      return field->GetObj(obj);
-    }
+    return field->GetObj(obj);
   }
   return nullptr;  // Will throw exception by checking with Thread::Current.
 }
 
-extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value,
-                                     ArtMethod* referrer, Thread* self)
+extern "C" int artSet8StaticFromCode(uint32_t field_idx,
+                                     uint32_t new_value,
+                                     ArtMethod* referrer,
+                                     Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t));
@@ -310,8 +355,10 @@
   return -1;  // failure
 }
 
-extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value,
-                                      ArtMethod* referrer, Thread* self)
+extern "C" int artSet16StaticFromCode(uint32_t field_idx,
+                                      uint16_t new_value,
+                                      ArtMethod* referrer,
+                                      Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t));
@@ -341,8 +388,10 @@
   return -1;  // failure
 }
 
-extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
-                                      ArtMethod* referrer, Thread* self)
+extern "C" int artSet32StaticFromCode(uint32_t field_idx,
+                                      uint32_t new_value,
+                                      ArtMethod* referrer,
+                                      Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
@@ -360,8 +409,10 @@
   return -1;  // failure
 }
 
-extern "C" int artSet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer,
-                                      uint64_t new_value, Thread* self)
+extern "C" int artSet64StaticFromCode(uint32_t field_idx,
+                                      ArtMethod* referrer,
+                                      uint64_t new_value,
+                                      Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
@@ -379,11 +430,15 @@
   return -1;  // failure
 }
 
-extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value,
-                                       ArtMethod* referrer, Thread* self)
+extern "C" int artSetObjStaticFromCode(uint32_t field_idx,
+                                       mirror::Object* new_value,
+                                       ArtMethod* referrer,
+                                       Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
-  ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
+  ArtField* field = FindFieldFast(field_idx,
+                                  referrer,
+                                  StaticObjectWrite,
                                   sizeof(mirror::HeapReference<mirror::Object>));
   if (LIKELY(field != nullptr)) {
     if (LIKELY(!field->IsPrimitiveType())) {
@@ -392,8 +447,15 @@
       return 0;  // success
     }
   }
-  field = FindFieldFromCode<StaticObjectWrite, true>(field_idx, referrer, self,
-                                                     sizeof(mirror::HeapReference<mirror::Object>));
+  {
+    StackHandleScope<1> hs(self);
+    HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&new_value));
+    field = FindFieldFromCode<StaticObjectWrite, true>(
+        field_idx,
+        referrer,
+        self,
+        sizeof(mirror::HeapReference<mirror::Object>));
+  }
   if (LIKELY(field != nullptr)) {
     // Compiled code can't use transactional mode.
     field->SetObj<false>(field->GetDeclaringClass(), new_value);
@@ -402,8 +464,11 @@
   return -1;  // failure
 }
 
-extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint8_t new_value,
-                                       ArtMethod* referrer, Thread* self)
+extern "C" int artSet8InstanceFromCode(uint32_t field_idx,
+                                       mirror::Object* obj,
+                                       uint8_t new_value,
+                                       ArtMethod* referrer,
+                                       Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
@@ -418,31 +483,29 @@
     }
     return 0;  // success
   }
-  {
-    StackHandleScope<1> hs(self);
-    HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
-    field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
-                                                            sizeof(int8_t));
-  }
+  field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
+                                                          referrer,
+                                                          self,
+                                                          sizeof(int8_t),
+                                                          &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, false);
+    Primitive::Type type = field->GetTypeAsPrimitiveType();
+    // Compiled code can't use transactional mode.
+    if (type == Primitive::kPrimBoolean) {
+      field->SetBoolean<false>(obj, new_value);
     } else {
-      Primitive::Type type = field->GetTypeAsPrimitiveType();
-      // Compiled code can't use transactional mode.
-      if (type == Primitive::kPrimBoolean) {
-        field->SetBoolean<false>(obj, new_value);
-      } else {
-        field->SetByte<false>(obj, new_value);
-      }
-      return 0;  // success
+      field->SetByte<false>(obj, new_value);
     }
+    return 0;  // success
   }
   return -1;  // failure
 }
 
-extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint16_t new_value,
-                                        ArtMethod* referrer, Thread* self)
+extern "C" int artSet16InstanceFromCode(uint32_t field_idx,
+                                        mirror::Object* obj,
+                                        uint16_t new_value,
+                                        ArtMethod* referrer,
+                                        Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t));
@@ -457,32 +520,30 @@
     }
     return 0;  // success
   }
-  {
-    StackHandleScope<1> hs(self);
-    HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
-    field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
-                                                            sizeof(int16_t));
-  }
+  field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
+                                                          referrer,
+                                                          self,
+                                                          sizeof(int16_t),
+                                                          &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, false);
+    Primitive::Type type = field->GetTypeAsPrimitiveType();
+    // Compiled code can't use transactional mode.
+    if (type == Primitive::kPrimChar) {
+      field->SetChar<false>(obj, new_value);
     } else {
-      Primitive::Type type = field->GetTypeAsPrimitiveType();
-      // Compiled code can't use transactional mode.
-      if (type == Primitive::kPrimChar) {
-        field->SetChar<false>(obj, new_value);
-      } else {
-        DCHECK_EQ(Primitive::kPrimShort, type);
-        field->SetShort<false>(obj, new_value);
-      }
-      return 0;  // success
+      DCHECK_EQ(Primitive::kPrimShort, type);
+      field->SetShort<false>(obj, new_value);
     }
+    return 0;  // success
   }
   return -1;  // failure
 }
 
-extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
-                                        ArtMethod* referrer, Thread* self)
+extern "C" int artSet32InstanceFromCode(uint32_t field_idx,
+                                        mirror::Object* obj,
+                                        uint32_t new_value,
+                                        ArtMethod* referrer,
+                                        Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
@@ -491,26 +552,24 @@
     field->Set32<false>(obj, new_value);
     return 0;  // success
   }
-  {
-    StackHandleScope<1> hs(self);
-    HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
-    field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
-                                                            sizeof(int32_t));
-  }
+  field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
+                                                          referrer,
+                                                          self,
+                                                          sizeof(int32_t),
+                                                          &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, false);
-    } else {
-      // Compiled code can't use transactional mode.
-      field->Set32<false>(obj, new_value);
-      return 0;  // success
-    }
+    // Compiled code can't use transactional mode.
+    field->Set32<false>(obj, new_value);
+    return 0;  // success
   }
   return -1;  // failure
 }
 
-extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value,
-                                        ArtMethod* referrer, Thread* self)
+extern "C" int artSet64InstanceFromCode(uint32_t field_idx,
+                                        mirror::Object* obj,
+                                        uint64_t new_value,
+                                        ArtMethod* referrer,
+                                        Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t));
@@ -519,34 +578,45 @@
     field->Set64<false>(obj, new_value);
     return 0;  // success
   }
-  field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
-                                                          sizeof(int64_t));
+  field = FindInstanceField<InstancePrimitiveWrite, true>(field_idx,
+                                                          referrer,
+                                                          self,
+                                                          sizeof(int64_t),
+                                                          &obj);
   if (LIKELY(field != nullptr)) {
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(field, false);
-    } else {
-      // Compiled code can't use transactional mode.
-      field->Set64<false>(obj, new_value);
-      return 0;  // success
-    }
+    // Compiled code can't use transactional mode.
+    field->Set64<false>(obj, new_value);
+    return 0;
   }
   return -1;  // failure
 }
 
-extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
+extern "C" int artSetObjInstanceFromCode(uint32_t field_idx,
+                                         mirror::Object* obj,
                                          mirror::Object* new_value,
-                                         ArtMethod* referrer, Thread* self)
+                                         ArtMethod* referrer,
+                                         Thread* self)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
-  ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
+  ArtField* field = FindFieldFast(field_idx,
+                                  referrer,
+                                  InstanceObjectWrite,
                                   sizeof(mirror::HeapReference<mirror::Object>));
   if (LIKELY(field != nullptr && obj != nullptr)) {
     // Compiled code can't use transactional mode.
     field->SetObj<false>(obj, new_value);
     return 0;  // success
   }
-  field = FindFieldFromCode<InstanceObjectWrite, true>(field_idx, referrer, self,
-                                                       sizeof(mirror::HeapReference<mirror::Object>));
+  {
+    StackHandleScope<2> hs(self);
+    HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
+    HandleWrapper<mirror::Object> h_new_value(hs.NewHandleWrapper(&new_value));
+    field = FindFieldFromCode<InstanceObjectWrite, true>(
+        field_idx,
+        referrer,
+        self,
+        sizeof(mirror::HeapReference<mirror::Object>));
+  }
   if (LIKELY(field != nullptr)) {
     if (UNLIKELY(obj == nullptr)) {
       ThrowNullPointerExceptionForFieldAccess(field, false);
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index f87d48d..e72809b 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -127,7 +127,7 @@
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, rosalloc_runs, thread_local_alloc_stack_top,
-                        sizeof(void*) * kNumRosAllocThreadLocalSizeBrackets);
+                        sizeof(void*) * kNumRosAllocThreadLocalSizeBracketsInThread);
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_top, thread_local_alloc_stack_end,
                         sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_end, held_mutexes, sizeof(void*));
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index 2510514..d1c81e3 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -62,11 +62,6 @@
   }
   size_t bracket_size;
   size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
-  DCHECK_EQ(idx, SizeToIndex(size));
-  DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
-  DCHECK_EQ(bracket_size, bracketSizes[idx]);
-  DCHECK_LE(size, bracket_size);
-  DCHECK(size > 512 || bracket_size - size < 16);
   DCHECK_LT(idx, kNumThreadLocalSizeBrackets);
   Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
   if (kIsDebugBuild) {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 7d00094..8b125dd 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -638,11 +638,6 @@
   DCHECK_LE(size, kLargeSizeThreshold);
   size_t bracket_size;
   size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
-  DCHECK_EQ(idx, SizeToIndex(size));
-  DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
-  DCHECK_EQ(bracket_size, bracketSizes[idx]);
-  DCHECK_LE(size, bracket_size);
-  DCHECK(size > 512 || bracket_size - size < 16);
   Locks::mutator_lock_->AssertExclusiveHeld(self);
   void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
   if (LIKELY(slot_addr != nullptr)) {
@@ -662,14 +657,7 @@
   DCHECK_LE(size, kLargeSizeThreshold);
   size_t bracket_size;
   size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
-  DCHECK_EQ(idx, SizeToIndex(size));
-  DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
-  DCHECK_EQ(bracket_size, bracketSizes[idx]);
-  DCHECK_LE(size, bracket_size);
-  DCHECK(size > 512 || bracket_size - size < 16);
-
   void* slot_addr;
-
   if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
     // Use a thread-local run.
     Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
@@ -881,17 +869,6 @@
   return stream.str();
 }
 
-inline size_t RosAlloc::Run::SlotIndex(Slot* slot) {
-  const uint8_t idx = size_bracket_idx_;
-  const size_t bracket_size = bracketSizes[idx];
-  const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(slot)
-      - reinterpret_cast<uint8_t*>(FirstSlot());
-  DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
-  size_t slot_idx = offset_from_slot_base / bracket_size;
-  DCHECK_LT(slot_idx, numOfSlots[idx]);
-  return slot_idx;
-}
-
 void RosAlloc::Run::FreeSlot(void* ptr) {
   DCHECK(!IsThreadLocal());
   const uint8_t idx = size_bracket_idx_;
@@ -1647,9 +1624,14 @@
 
 void RosAlloc::Initialize() {
   // bracketSizes.
+  static_assert(kNumRegularSizeBrackets == kNumOfSizeBrackets - 2,
+                "There should be two non-regular brackets");
   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
-    if (i < kNumOfSizeBrackets - 2) {
-      bracketSizes[i] = 16 * (i + 1);
+    if (i < kNumThreadLocalSizeBrackets) {
+      bracketSizes[i] = kThreadLocalBracketQuantumSize * (i + 1);
+    } else if (i < kNumRegularSizeBrackets) {
+      bracketSizes[i] = kBracketQuantumSize * (i - kNumThreadLocalSizeBrackets + 1) +
+          (kThreadLocalBracketQuantumSize *  kNumThreadLocalSizeBrackets);
     } else if (i == kNumOfSizeBrackets - 2) {
       bracketSizes[i] = 1 * KB;
     } else {
@@ -1662,16 +1644,13 @@
   }
   // numOfPages.
   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
-    if (i < 4) {
+    if (i < kNumThreadLocalSizeBrackets) {
       numOfPages[i] = 1;
-    } else if (i < 8) {
-      numOfPages[i] = 1;
-    } else if (i < 16) {
+    } else if (i < (kNumThreadLocalSizeBrackets + kNumRegularSizeBrackets) / 2) {
       numOfPages[i] = 4;
-    } else if (i < 32) {
+    } else if (i < kNumRegularSizeBrackets) {
       numOfPages[i] = 8;
-    } else if (i == 32) {
-      DCHECK_EQ(i, kNumOfSizeBrackets - 2);
+    } else if (i == kNumOfSizeBrackets - 2) {
       numOfPages[i] = 16;
     } else {
       DCHECK_EQ(i, kNumOfSizeBrackets - 1);
@@ -1701,8 +1680,8 @@
       size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
           tmp_unaligned_header_size :
           tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
-      DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
-      DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
+      DCHECK_EQ(tmp_header_size % bracket_size, 0U);
+      DCHECK_EQ(tmp_header_size % sizeof(uint64_t), 0U);
       if (tmp_slots_size + tmp_header_size <= run_size) {
         // Found the right number of slots, that is, there was enough
         // space for the header (including the bit maps.)
@@ -1711,8 +1690,8 @@
         break;
       }
     }
-    DCHECK_GT(num_of_slots, 0U);
-    DCHECK_GT(header_size, 0U);
+    DCHECK_GT(num_of_slots, 0U) << i;
+    DCHECK_GT(header_size, 0U) << i;
     // Add the padding for the alignment remainder.
     header_size += run_size % bracket_size;
     DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
@@ -1723,7 +1702,7 @@
                 << ", headerSizes[" << i << "]=" << headerSizes[i];
     }
   }
-  // Fill the alloc bitmap so nobody can successfully allocate from it.
+  // Set up the dedicated full run so that nobody can successfully allocate from it.
   if (kIsDebugBuild) {
     dedicated_full_run_->magic_num_ = kMagicNum;
   }
@@ -1735,6 +1714,9 @@
 
   // The smallest bracket size must be at least as large as the sizeof(Slot).
   DCHECK_LE(sizeof(Slot), bracketSizes[0]) << "sizeof(Slot) <= the smallest bracket size";
+  // Check the invariants between the max bracket sizes and the number of brackets.
+  DCHECK_EQ(kMaxThreadLocalBracketSize, bracketSizes[kNumThreadLocalSizeBrackets - 1]);
+  DCHECK_EQ(kMaxRegularBracketSize, bracketSizes[kNumRegularSizeBrackets - 1]);
 }
 
 void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 3ce3d63..a472a8b 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -366,7 +366,7 @@
     static size_t fixed_header_size() {
       return sizeof(Run);
     }
-    Slot* FirstSlot() {
+    Slot* FirstSlot() const {
       const uint8_t idx = size_bracket_idx_;
       return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) + headerSizes[idx]);
     }
@@ -473,7 +473,16 @@
       DCHECK_LT(slot_idx, numOfSlots[idx]);
       return reinterpret_cast<Slot*>(ptr);
     }
-    size_t SlotIndex(Slot* slot);
+    size_t SlotIndex(Slot* slot) const {
+      const uint8_t idx = size_bracket_idx_;
+      const size_t bracket_size = bracketSizes[idx];
+      const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(slot)
+          - reinterpret_cast<uint8_t*>(FirstSlot());
+      DCHECK_EQ(offset_from_slot_base % bracket_size, 0U);
+      size_t slot_idx = offset_from_slot_base / bracket_size;
+      DCHECK_LT(slot_idx, numOfSlots[idx]);
+      return slot_idx;
+    }
 
     // TODO: DISALLOW_COPY_AND_ASSIGN(Run);
   };
@@ -482,10 +491,8 @@
   static constexpr uint8_t kMagicNum = 42;
   // The magic number for free pages.
   static constexpr uint8_t kMagicNumFree = 43;
-  // The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
-  static constexpr size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
-  // The number of smaller size brackets that are the quantum size apart.
-  static constexpr size_t kNumOfQuantumSizeBrackets = 32;
+  // The number of size brackets.
+  static constexpr size_t kNumOfSizeBrackets = 42;
   // The sizes (the slot sizes, in bytes) of the size brackets.
   static size_t bracketSizes[kNumOfSizeBrackets];
   // The numbers of pages that are used for runs for each size bracket.
@@ -506,16 +513,23 @@
   }
   // Returns the index of the size bracket from the bracket size.
   static size_t BracketSizeToIndex(size_t size) {
-    DCHECK(16 <= size && ((size < 1 * KB && size % 16 == 0) || size == 1 * KB || size == 2 * KB));
+    DCHECK(8 <= size &&
+           ((size <= kMaxThreadLocalBracketSize && size % kThreadLocalBracketQuantumSize == 0) ||
+            (size <= kMaxRegularBracketSize && size % kBracketQuantumSize == 0) ||
+            size == 1 * KB || size == 2 * KB));
     size_t idx;
     if (UNLIKELY(size == 1 * KB)) {
       idx = kNumOfSizeBrackets - 2;
     } else if (UNLIKELY(size == 2 * KB)) {
       idx = kNumOfSizeBrackets - 1;
+    } else if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
+      DCHECK_EQ(size % kThreadLocalBracketQuantumSize, 0U);
+      idx = size / kThreadLocalBracketQuantumSize - 1;
     } else {
-      DCHECK(size < 1 * KB);
-      DCHECK_EQ(size % 16, static_cast<size_t>(0));
-      idx = size / 16 - 1;
+      DCHECK(size <= kMaxRegularBracketSize);
+      DCHECK_EQ((size - kMaxThreadLocalBracketSize) % kBracketQuantumSize, 0U);
+      idx = ((size - kMaxThreadLocalBracketSize) / kBracketQuantumSize - 1)
+          + kNumThreadLocalSizeBrackets;
     }
     DCHECK(bracketSizes[idx] == size);
     return idx;
@@ -530,51 +544,64 @@
   // Rounds up the size up the nearest bracket size.
   static size_t RoundToBracketSize(size_t size) {
     DCHECK(size <= kLargeSizeThreshold);
-    if (LIKELY(size <= 512)) {
-      return RoundUp(size, 16);
-    } else if (512 < size && size <= 1 * KB) {
+    if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
+      return RoundUp(size, kThreadLocalBracketQuantumSize);
+    } else if (size <= kMaxRegularBracketSize) {
+      return RoundUp(size, kBracketQuantumSize);
+    } else if (UNLIKELY(size <= 1 * KB)) {
       return 1 * KB;
     } else {
-      DCHECK(1 * KB < size && size <= 2 * KB);
+      DCHECK_LE(size, 2 * KB);
       return 2 * KB;
     }
   }
   // Returns the size bracket index from the byte size with rounding.
   static size_t SizeToIndex(size_t size) {
     DCHECK(size <= kLargeSizeThreshold);
-    if (LIKELY(size <= 512)) {
-      return RoundUp(size, 16) / 16 - 1;
-    } else if (512 < size && size <= 1 * KB) {
+    if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
+      return RoundUp(size, kThreadLocalBracketQuantumSize) / kThreadLocalBracketQuantumSize - 1;
+    } else if (size <= kMaxRegularBracketSize) {
+      return (RoundUp(size, kBracketQuantumSize) - kMaxThreadLocalBracketSize) / kBracketQuantumSize
+          - 1 + kNumThreadLocalSizeBrackets;
+    } else if (size <= 1 * KB) {
       return kNumOfSizeBrackets - 2;
     } else {
-      DCHECK(1 * KB < size && size <= 2 * KB);
+      DCHECK_LE(size, 2 * KB);
       return kNumOfSizeBrackets - 1;
     }
   }
   // A combination of SizeToIndex() and RoundToBracketSize().
   static size_t SizeToIndexAndBracketSize(size_t size, size_t* bracket_size_out) {
     DCHECK(size <= kLargeSizeThreshold);
-    if (LIKELY(size <= 512)) {
-      size_t bracket_size = RoundUp(size, 16);
-      *bracket_size_out = bracket_size;
-      size_t idx = bracket_size / 16 - 1;
-      DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
-      return idx;
-    } else if (512 < size && size <= 1 * KB) {
-      size_t bracket_size = 1024;
-      *bracket_size_out = bracket_size;
-      size_t idx = kNumOfSizeBrackets - 2;
-      DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
-      return idx;
+    size_t idx;
+    size_t bracket_size;
+    if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
+      bracket_size = RoundUp(size, kThreadLocalBracketQuantumSize);
+      idx = bracket_size / kThreadLocalBracketQuantumSize - 1;
+    } else if (size <= kMaxRegularBracketSize) {
+      bracket_size = RoundUp(size, kBracketQuantumSize);
+      idx = ((bracket_size - kMaxThreadLocalBracketSize) / kBracketQuantumSize - 1)
+          + kNumThreadLocalSizeBrackets;
+    } else if (size <= 1 * KB) {
+      bracket_size = 1 * KB;
+      idx = kNumOfSizeBrackets - 2;
     } else {
-      DCHECK(1 * KB < size && size <= 2 * KB);
-      size_t bracket_size = 2048;
-      *bracket_size_out = bracket_size;
-      size_t idx = kNumOfSizeBrackets - 1;
-      DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
-      return idx;
+      DCHECK(size <= 2 * KB);
+      bracket_size = 2 * KB;
+      idx = kNumOfSizeBrackets - 1;
     }
+    DCHECK_EQ(idx, SizeToIndex(size)) << idx;
+    DCHECK_EQ(bracket_size, IndexToBracketSize(idx)) << idx;
+    DCHECK_EQ(bracket_size, bracketSizes[idx]) << idx;
+    DCHECK_LE(size, bracket_size) << idx;
+    DCHECK(size > kMaxRegularBracketSize ||
+           (size <= kMaxThreadLocalBracketSize &&
+            bracket_size - size < kThreadLocalBracketQuantumSize) ||
+           (size <= kMaxRegularBracketSize && bracket_size - size < kBracketQuantumSize)) << idx;
+    *bracket_size_out = bracket_size;
+    return idx;
   }
+
   // Returns the page map index from an address. Requires that the
   // address is page size aligned.
   size_t ToPageMapIndex(const void* addr) const {
@@ -630,18 +657,37 @@
   // The default value for page_release_size_threshold_.
   static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB;
 
-  // We use thread-local runs for the size Brackets whose indexes
+  // We use thread-local runs for the size brackets whose indexes
   // are less than this index. We use shared (current) runs for the rest.
-  static const size_t kNumThreadLocalSizeBrackets = 8;
+  // Sync this with the length of Thread::rosalloc_runs_.
+  static const size_t kNumThreadLocalSizeBrackets = 16;
+  static_assert(kNumThreadLocalSizeBrackets == kNumRosAllocThreadLocalSizeBracketsInThread,
+                "Mismatch between kNumThreadLocalSizeBrackets and "
+                "kNumRosAllocThreadLocalSizeBracketsInThread");
 
   // The size of the largest bracket we use thread-local runs for.
   // This should be equal to bracketSizes[kNumThreadLocalSizeBrackets - 1].
   static const size_t kMaxThreadLocalBracketSize = 128;
 
-  // The bracket size increment for the brackets of size <= 512 bytes.
+  // We use regular (8 or 16-bytes increment) runs for the size brackets whose indexes are less than
+  // this index.
+  static const size_t kNumRegularSizeBrackets = 40;
+
+  // The size of the largest regular (8 or 16-byte increment) bracket. Non-regular brackets are the
+  // 1 KB and the 2 KB brackets. This should be equal to bracketSizes[kNumRegularSizeBrackets - 1].
+  static const size_t kMaxRegularBracketSize = 512;
+
+  // The bracket size increment for the thread-local brackets (<= kMaxThreadLocalBracketSize bytes).
+  static constexpr size_t kThreadLocalBracketQuantumSize = 8;
+
+  // Equal to Log2(kThreadLocalBracketQuantumSize).
+  static constexpr size_t kThreadLocalBracketQuantumSizeShift = 3;
+
+  // The bracket size increment for the non-thread-local, regular brackets (of size <=
+  // kMaxRegularBracketSize bytes and > kMaxThreadLocalBracketSize bytes).
   static constexpr size_t kBracketQuantumSize = 16;
 
-  // Equal to Log2(kQuantumBracketSizeIncrement).
+  // Equal to Log2(kBracketQuantumSize).
   static constexpr size_t kBracketQuantumSizeShift = 4;
 
  private:
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index c8e913c..ae41226 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -42,6 +42,8 @@
   kCollectorTypeCC,
   // Instrumentation critical section fake collector.
   kCollectorTypeInstrumentation,
+  // Fake collector for adding or removing application image spaces.
+  kCollectorTypeAddRemoveAppImageSpace,
   // A homogeneous space compaction collector used in background transition
   // when both foreground and background collector are CMS.
   kCollectorTypeHomogeneousSpaceCompact,
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 84243df..679432b 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -34,6 +34,7 @@
     case kGcCauseHomogeneousSpaceCompact: return "HomogeneousSpaceCompact";
     case kGcCauseTrim: return "HeapTrim";
     case kGcCauseInstrumentation: return "Instrumentation";
+    case kGcCauseAddRemoveAppImageSpace: return "AddRemoveAppImageSpace";
     default:
       LOG(FATAL) << "Unreachable";
       UNREACHABLE();
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 34c7766..c6b505c 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -41,6 +41,8 @@
   kGcCauseTrim,
   // Not a real GC cause, used to implement exclusion between GC and instrumentation.
   kGcCauseInstrumentation,
+  // Not a real GC cause, used to add or remove app image spaces.
+  kGcCauseAddRemoveAppImageSpace,
   // GC triggered for background transition when both foreground and background collector are CMS.
   kGcCauseHomogeneousSpaceCompact,
 };
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 136b793..3c9312f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -271,7 +271,7 @@
     // The loaded spaces. Secondary images may fail to load, in which case we need to remove
     // already added spaces.
     std::vector<space::Space*> added_image_spaces;
-
+    uint8_t* const original_requested_alloc_space_begin = requested_alloc_space_begin;
     for (size_t index = 0; index < image_file_names.size(); ++index) {
       std::string& image_name = image_file_names[index];
       ATRACE_BEGIN("ImageSpace::Create");
@@ -320,7 +320,7 @@
           delete loaded_space;
         }
         boot_image_spaces_.clear();
-        requested_alloc_space_begin = nullptr;
+        requested_alloc_space_begin = original_requested_alloc_space_begin;
         break;
       }
     }
@@ -2622,6 +2622,10 @@
     }
     if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) {
       temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+      if (kIsDebugBuild) {
+        // Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
+        temp_space_->GetMemMap()->TryReadable();
+      }
       CHECK(temp_space_->IsEmpty());
     }
     gc_type = collector::kGcTypeFull;  // TODO: Not hard code this in.
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 3eff7fc..0b2471b 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -311,7 +311,7 @@
   shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
 
   bool transaction_active = Runtime::Current()->IsActiveTransaction();
-  if (LIKELY(shadow_frame.GetMethod()->IsPreverified())) {
+  if (LIKELY(shadow_frame.GetMethod()->SkipAccessChecks())) {
     // Enter the "without access check" interpreter.
     if (kInterpreterImplKind == kMterpImplKind) {
       if (transaction_active) {
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 940d344..ca8598e 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -21,6 +21,7 @@
 #include "base/stl_util.h"  // MakeUnique
 #include "experimental_flags.h"
 #include "interpreter_common.h"
+#include "jit/jit.h"
 #include "safe_math.h"
 
 #include <memory>  // std::unique_ptr
@@ -63,10 +64,15 @@
   currentHandlersTable = handlersTable[ \
       Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
 
-#define BRANCH_INSTRUMENTATION(offset) \
-  do { \
+#define BRANCH_INSTRUMENTATION(offset)                                                            \
+  do {                                                                                            \
+    ArtMethod* method = shadow_frame.GetMethod();                                                 \
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); \
-    instrumentation->Branch(self, shadow_frame.GetMethod(), dex_pc, offset); \
+    instrumentation->Branch(self, method, dex_pc, offset);                                        \
+    JValue result;                                                                                \
+    if (jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, &result)) {             \
+      return result;                                                                              \
+    }                                                                                             \
   } while (false)
 
 #define UNREACHABLE_CODE_CHECK()                \
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index f606978..25dbab2 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -17,6 +17,7 @@
 #include "base/stl_util.h"  // MakeUnique
 #include "experimental_flags.h"
 #include "interpreter_common.h"
+#include "jit/jit.h"
 #include "safe_math.h"
 
 #include <memory>  // std::unique_ptr
@@ -69,9 +70,14 @@
     }                                                                                           \
   } while (false)
 
-#define BRANCH_INSTRUMENTATION(offset) \
-  do { \
-    instrumentation->Branch(self, shadow_frame.GetMethod(), dex_pc, offset); \
+#define BRANCH_INSTRUMENTATION(offset)                                                         \
+  do {                                                                                         \
+    ArtMethod* method = shadow_frame.GetMethod();                                              \
+    instrumentation->Branch(self, method, dex_pc, offset);                                     \
+    JValue result;                                                                             \
+    if (jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, &result)) {          \
+      return result;                                                                           \
+    }                                                                                          \
   } while (false)
 
 static bool IsExperimentalInstructionEnabled(const Instruction *inst) {
diff --git a/runtime/interpreter/mterp/arm/binopWide.S b/runtime/interpreter/mterp/arm/binopWide.S
index 57d43c6..1d511ec 100644
--- a/runtime/interpreter/mterp/arm/binopWide.S
+++ b/runtime/interpreter/mterp/arm/binopWide.S
@@ -16,10 +16,10 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     and     r2, r0, #255                @ r2<- BB
     mov     r3, r0, lsr #8              @ r3<- CC
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[AA]
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
     ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
@@ -28,8 +28,8 @@
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
     $preinstr                           @ optional op; may set condition codes
     $instr                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/binopWide2addr.S b/runtime/interpreter/mterp/arm/binopWide2addr.S
index 4e855f2..81db48b 100644
--- a/runtime/interpreter/mterp/arm/binopWide2addr.S
+++ b/runtime/interpreter/mterp/arm/binopWide2addr.S
@@ -15,17 +15,17 @@
      */
     /* binop/2addr vA, vB */
     mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
     ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
     .if $chkzero
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
     $preinstr                           @ optional op; may set condition codes
     $instr                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/fbinopWide.S b/runtime/interpreter/mterp/arm/fbinopWide.S
index 1bed817..ca13bfb 100644
--- a/runtime/interpreter/mterp/arm/fbinopWide.S
+++ b/runtime/interpreter/mterp/arm/fbinopWide.S
@@ -14,9 +14,9 @@
     VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
     fldd    d1, [r3]                    @ d1<- vCC
     fldd    d0, [r2]                    @ d0<- vBB
-
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
     $instr                              @ s2<- op
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
     fstd    d2, [r9]                    @ vAA<- d2
diff --git a/runtime/interpreter/mterp/arm/fbinopWide2addr.S b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
index 9f56986..4e7401d 100644
--- a/runtime/interpreter/mterp/arm/fbinopWide2addr.S
+++ b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
@@ -12,10 +12,10 @@
     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
     and     r9, r9, #15                 @ r9<- A
     fldd    d1, [r3]                    @ d1<- vB
+    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     fldd    d0, [r9]                    @ d0<- vA
-
     $instr                              @ d2<- op
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     fstd    d2, [r9]                    @ vAA<- d2
diff --git a/runtime/interpreter/mterp/arm/funopWider.S b/runtime/interpreter/mterp/arm/funopWider.S
index 087a1f2..450ba3a 100644
--- a/runtime/interpreter/mterp/arm/funopWider.S
+++ b/runtime/interpreter/mterp/arm/funopWider.S
@@ -12,6 +12,7 @@
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     and     r9, r9, #15                 @ r9<- A
     $instr                              @ d0<- op
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
     fstd    d0, [r9]                    @ vA<- d0
diff --git a/runtime/interpreter/mterp/arm/header.S b/runtime/interpreter/mterp/arm/header.S
index 14319d9..b2370bf 100644
--- a/runtime/interpreter/mterp/arm/header.S
+++ b/runtime/interpreter/mterp/arm/header.S
@@ -263,6 +263,19 @@
     str     \reg, [rFP, \vreg, lsl #2]
     str     \reg, [rREFS, \vreg, lsl #2]
 .endm
+.macro SET_VREG_SHADOW reg, vreg
+    str     \reg, [rREFS, \vreg, lsl #2]
+.endm
+
+/*
+ * Clear the corresponding shadow regs for a vreg pair
+ */
+.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
+    mov     \tmp1, #0
+    add     \tmp2, \vreg, #1
+    SET_VREG_SHADOW \tmp1, \vreg
+    SET_VREG_SHADOW \tmp1, \tmp2
+.endm
 
 /*
  * Convert a virtual register index into an address.
diff --git a/runtime/interpreter/mterp/arm/op_aget_wide.S b/runtime/interpreter/mterp/arm/op_aget_wide.S
index caaec71..e1430b4 100644
--- a/runtime/interpreter/mterp/arm/op_aget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_aget_wide.S
@@ -10,6 +10,7 @@
     mov     r3, r0, lsr #8              @ r3<- CC
     GET_VREG r0, r2                     @ r0<- vBB (array object)
     GET_VREG r1, r3                     @ r1<- vCC (requested index)
+    CLEAR_SHADOW_PAIR r9, r2, r3        @ Zero out the shadow regs
     cmp     r0, #0                      @ null array object?
     beq     common_errNullObject        @ yes, bail
     ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
diff --git a/runtime/interpreter/mterp/arm/op_const_wide.S b/runtime/interpreter/mterp/arm/op_const_wide.S
index 2cdc426..12394b6 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide.S
@@ -6,6 +6,7 @@
     FETCH r3, 4                         @ r3<- HHHH (high)
     mov     r9, rINST, lsr #8           @ r9<- AA
     orr     r1, r2, r3, lsl #16         @ r1<- HHHHhhhh (high word)
+    CLEAR_SHADOW_PAIR r9, r2, r3        @ Zero out the shadow regs
     FETCH_ADVANCE_INST 5                @ advance rPC, load rINST
     add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
     GET_INST_OPCODE ip                  @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_16.S b/runtime/interpreter/mterp/arm/op_const_wide_16.S
index 56bfc17..3811d86 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide_16.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide_16.S
@@ -3,6 +3,7 @@
     mov     r3, rINST, lsr #8           @ r3<- AA
     mov     r1, r0, asr #31             @ r1<- ssssssss
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     stmia   r3, {r0-r1}                 @ vAA<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_32.S b/runtime/interpreter/mterp/arm/op_const_wide_32.S
index 36d4628..0b6f1cc 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide_32.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide_32.S
@@ -4,6 +4,7 @@
     FETCH_S r2, 2                       @ r2<- ssssBBBB (high)
     FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
     orr     r0, r0, r2, lsl #16         @ r0<- BBBBbbbb
+    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
     mov     r1, r0, asr #31             @ r1<- ssssssss
     GET_INST_OPCODE ip                  @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_high16.S b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
index bee592d..b9796eb 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide_high16.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
@@ -4,6 +4,7 @@
     mov     r0, #0                      @ r0<- 00000000
     mov     r1, r1, lsl #16             @ r1<- BBBB0000
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    CLEAR_SHADOW_PAIR r3, r0, r2        @ Zero shadow regs
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     stmia   r3, {r0-r1}                 @ vAA<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide.S b/runtime/interpreter/mterp/arm/op_iget_wide.S
index f8d2f41..859ffac 100644
--- a/runtime/interpreter/mterp/arm/op_iget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_iget_wide.S
@@ -15,8 +15,9 @@
     PREFETCH_INST 2
     cmp      r3, #0
     bne      MterpException                @ bail out
-    add     r3, rFP, r2, lsl #2            @ r3<- &fp[A]
-    stmia   r3, {r0-r1}                    @ fp[A]<- r0/r1
+    CLEAR_SHADOW_PAIR r2, ip, lr           @ Zero out the shadow regs
+    add      r3, rFP, r2, lsl #2           @ r3<- &fp[A]
+    stmia    r3, {r0-r1}                   @ fp[A]<- r0/r1
     ADVANCE 2
     GET_INST_OPCODE ip                     @ extract opcode from rINST
     GOTO_OPCODE ip                         @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
index 4d6976e..07f854a 100644
--- a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
+++ b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
@@ -8,6 +8,7 @@
     ldrd    r0, [r3, ip]                @ r0<- obj.field (64 bits, aligned)
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
     add     r3, rFP, r2, lsl #2         @ r3<- &fp[A]
+    CLEAR_SHADOW_PAIR r2, ip, lr        @ Zero out the shadow regs
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     stmia   r3, {r0-r1}                 @ fp[A]<- r0/r1
     GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_result_wide.S b/runtime/interpreter/mterp/arm/op_move_result_wide.S
index c64103c..1845ccf 100644
--- a/runtime/interpreter/mterp/arm/op_move_result_wide.S
+++ b/runtime/interpreter/mterp/arm/op_move_result_wide.S
@@ -1,8 +1,9 @@
     /* move-result-wide vAA */
-    mov     r2, rINST, lsr #8           @ r2<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     ldr     r3, [rFP, #OFF_FP_RESULT_REGISTER]
-    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
+    add     r2, rFP, rINST, lsl #2      @ r2<- &fp[AA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
     GET_INST_OPCODE ip                  @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/op_move_wide.S b/runtime/interpreter/mterp/arm/op_move_wide.S
index 1345b95..f5d156d 100644
--- a/runtime/interpreter/mterp/arm/op_move_wide.S
+++ b/runtime/interpreter/mterp/arm/op_move_wide.S
@@ -1,10 +1,11 @@
     /* move-wide vA, vB */
     /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
     mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r2, rINST, #8, #4           @ r2<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
-    add     r2, rFP, r2, lsl #2         @ r2<- &fp[A]
+    add     r2, rFP, rINST, lsl #2      @ r2<- &fp[A]
     ldmia   r3, {r0-r1}                 @ r0/r1<- fp[B]
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     stmia   r2, {r0-r1}                 @ fp[A]<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_16.S b/runtime/interpreter/mterp/arm/op_move_wide_16.S
index 133a4c3..8a55c4b 100644
--- a/runtime/interpreter/mterp/arm/op_move_wide_16.S
+++ b/runtime/interpreter/mterp/arm/op_move_wide_16.S
@@ -3,9 +3,10 @@
     FETCH r3, 2                         @ r3<- BBBB
     FETCH r2, 1                         @ r2<- AAAA
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[BBBB]
-    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AAAA]
+    add     lr, rFP, r2, lsl #2         @ r2<- &fp[AAAA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
     FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    stmia   r2, {r0-r1}                 @ fp[AAAA]<- r0/r1
+    CLEAR_SHADOW_PAIR r2, r3, ip        @ Zero out the shadow regs
+    stmia   lr, {r0-r1}                 @ fp[AAAA]<- r0/r1
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_from16.S b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
index f2ae785..b65259d 100644
--- a/runtime/interpreter/mterp/arm/op_move_wide_from16.S
+++ b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
@@ -1,10 +1,11 @@
     /* move-wide/from16 vAA, vBBBB */
     /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
     FETCH r3, 1                         @ r3<- BBBB
-    mov     r2, rINST, lsr #8           @ r2<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[BBBB]
-    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
+    add     r2, rFP, rINST, lsl #2      @ r2<- &fp[AA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S
index 97db05f..3a50908 100644
--- a/runtime/interpreter/mterp/arm/op_sget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_sget_wide.S
@@ -12,10 +12,11 @@
     bl    artGet64StaticFromCode
     ldr   r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
     mov   r9, rINST, lsr #8             @ r9<- AA
-    add   r9, rFP, r9, lsl #2           @ r9<- &fp[AA]
+    add   lr, rFP, r9, lsl #2           @ r9<- &fp[AA]
     cmp   r3, #0                        @ Fail to resolve?
     bne   MterpException                @ bail out
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    CLEAR_SHADOW_PAIR r9, r2, ip        @ Zero out the shadow regs
+    stmia lr, {r0-r1}                   @ vAA/vAA+1<- r0/r1
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/unopWide.S b/runtime/interpreter/mterp/arm/unopWide.S
index 7b8739c..a074234 100644
--- a/runtime/interpreter/mterp/arm/unopWide.S
+++ b/runtime/interpreter/mterp/arm/unopWide.S
@@ -8,10 +8,11 @@
      */
     /* unop vA, vB */
     mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     $preinstr                           @ optional op; may set condition codes
     $instr                              @ r0/r1<- op, r2-r3 changed
diff --git a/runtime/interpreter/mterp/arm/unopWider.S b/runtime/interpreter/mterp/arm/unopWider.S
index 657a395..23b6b9d 100644
--- a/runtime/interpreter/mterp/arm/unopWider.S
+++ b/runtime/interpreter/mterp/arm/unopWider.S
@@ -8,10 +8,11 @@
      */
     /* unop vA, vB */
     mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     GET_VREG r0, r3                     @ r0<- vB
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     $preinstr                           @ optional op; may set condition codes
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     $instr                              @ r0<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
index 5fab379..f1501e1 100644
--- a/runtime/interpreter/mterp/config_x86
+++ b/runtime/interpreter/mterp/config_x86
@@ -19,6 +19,10 @@
 handler-style computed-goto
 handler-size 128
 
+function-type-format FUNCTION_TYPE(%s)
+function-size-format SIZE(%s,%s)
+global-name-format SYMBOL(%s)
+
 # source for alternate entry stub
 asm-alt-stub x86/alt_stub.S
 
diff --git a/runtime/interpreter/mterp/gen_mterp.py b/runtime/interpreter/mterp/gen_mterp.py
index f56d8bd..5839b5f 100755
--- a/runtime/interpreter/mterp/gen_mterp.py
+++ b/runtime/interpreter/mterp/gen_mterp.py
@@ -41,6 +41,9 @@
 alt_label_prefix = ".L_ALT" # use ".L" to hide labels from gdb
 style = None                # interpreter style
 generate_alt_table = False
+function_type_format = ".type   %s, %%function"
+function_size_format = ".size   %s, .-%s"
+global_name_format = "%s"
 
 # Exception class.
 class DataParseError(SyntaxError):
@@ -147,7 +150,24 @@
         raise DataParseError("import requires one argument")
     default_alt_stub = tokens[1]
     generate_alt_table = True
-
+#
+# Change the default function type format
+#
+def setFunctionTypeFormat(tokens):
+    global function_type_format
+    function_type_format = tokens[1]
+#
+# Change the default function size format
+#
+def setFunctionSizeFormat(tokens):
+    global function_size_format
+    function_size_format = tokens[1]
+#
+# Change the global name format
+#
+def setGlobalNameFormat(tokens):
+    global global_name_format
+    global_name_format = tokens[1]
 #
 # Parse arch config file --
 # Start of opcode list.
@@ -259,12 +279,12 @@
     sister_list = []
     assert len(opcodes) == kNumPackedOpcodes
     need_dummy_start = False
-    start_label = "artMterpAsmInstructionStart"
-    end_label = "artMterpAsmInstructionEnd"
+    start_label = global_name_format % "artMterpAsmInstructionStart"
+    end_label = global_name_format % "artMterpAsmInstructionEnd"
 
     # point MterpAsmInstructionStart at the first handler or stub
     asm_fp.write("\n    .global %s\n" % start_label)
-    asm_fp.write("    .type   %s, %%function\n" % start_label)
+    asm_fp.write("    " + (function_type_format % start_label) + "\n");
     asm_fp.write("%s = " % start_label + label_prefix + "_op_nop\n")
     asm_fp.write("    .text\n\n")
 
@@ -290,21 +310,23 @@
         asm_fp.write(label_prefix + "_op_nop:   /* dummy */\n");
 
     emitAlign()
-    asm_fp.write("    .size   %s, .-%s\n" % (start_label, start_label))
+    asm_fp.write("    " + (function_size_format % (start_label, start_label)) + "\n")
     asm_fp.write("    .global %s\n" % end_label)
     asm_fp.write("%s:\n" % end_label)
 
     if style == "computed-goto":
+        start_sister_label = global_name_format % "artMterpAsmSisterStart"
+        end_sister_label = global_name_format % "artMterpAsmSisterEnd"
         emitSectionComment("Sister implementations", asm_fp)
-        asm_fp.write("    .global artMterpAsmSisterStart\n")
-        asm_fp.write("    .type   artMterpAsmSisterStart, %function\n")
+        asm_fp.write("    .global %s\n" % start_sister_label)
+        asm_fp.write("    " + (function_type_format % start_sister_label) + "\n");
         asm_fp.write("    .text\n")
         asm_fp.write("    .balign 4\n")
-        asm_fp.write("artMterpAsmSisterStart:\n")
+        asm_fp.write("%s:\n" % start_sister_label)
         asm_fp.writelines(sister_list)
-        asm_fp.write("\n    .size   artMterpAsmSisterStart, .-artMterpAsmSisterStart\n")
-        asm_fp.write("    .global artMterpAsmSisterEnd\n")
-        asm_fp.write("artMterpAsmSisterEnd:\n\n")
+        asm_fp.write("\n    " + (function_size_format % (start_sister_label, start_sister_label)) + "\n")
+        asm_fp.write("    .global %s\n" % end_sister_label)
+        asm_fp.write("%s:\n\n" % end_sister_label)
 
 #
 # Load an alternate entry stub
@@ -324,12 +346,12 @@
 #
 def loadAndEmitAltOpcodes():
     assert len(opcodes) == kNumPackedOpcodes
-    start_label = "artMterpAsmAltInstructionStart"
-    end_label = "artMterpAsmAltInstructionEnd"
+    start_label = global_name_format % "artMterpAsmAltInstructionStart"
+    end_label = global_name_format % "artMterpAsmAltInstructionEnd"
 
     # point MterpAsmInstructionStart at the first handler or stub
     asm_fp.write("\n    .global %s\n" % start_label)
-    asm_fp.write("    .type   %s, %%function\n" % start_label)
+    asm_fp.write("    " + (function_type_format % start_label) + "\n");
     asm_fp.write("    .text\n\n")
     asm_fp.write("%s = " % start_label + label_prefix + "_ALT_op_nop\n")
 
@@ -342,7 +364,7 @@
         loadAndEmitAltStub(source, i)
 
     emitAlign()
-    asm_fp.write("    .size   %s, .-%s\n" % (start_label, start_label))
+    asm_fp.write("    " + (function_size_format % (start_label, start_label)) + "\n")
     asm_fp.write("    .global %s\n" % end_label)
     asm_fp.write("%s:\n" % end_label)
 
@@ -579,6 +601,12 @@
                 splitops = True
             elif tokens[0] == "fallback-stub":
                setFallbackStub(tokens)
+            elif tokens[0] == "function-type-format":
+               setFunctionTypeFormat(tokens)
+            elif tokens[0] == "function-size-format":
+               setFunctionSizeFormat(tokens)
+            elif tokens[0] == "global-name-format":
+               setGlobalNameFormat(tokens)
             else:
                 raise DataParseError, "unrecognized command '%s'" % tokens[0]
             if style == None:
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 78c784b..ee19559 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -270,6 +270,19 @@
     str     \reg, [rFP, \vreg, lsl #2]
     str     \reg, [rREFS, \vreg, lsl #2]
 .endm
+.macro SET_VREG_SHADOW reg, vreg
+    str     \reg, [rREFS, \vreg, lsl #2]
+.endm
+
+/*
+ * Clear the corresponding shadow regs for a vreg pair
+ */
+.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
+    mov     \tmp1, #0
+    add     \tmp2, \vreg, #1
+    SET_VREG_SHADOW \tmp1, \vreg
+    SET_VREG_SHADOW \tmp1, \tmp2
+.endm
 
 /*
  * Convert a virtual register index into an address.
@@ -426,10 +439,11 @@
     /* move-wide vA, vB */
     /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
     mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r2, rINST, #8, #4           @ r2<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
-    add     r2, rFP, r2, lsl #2         @ r2<- &fp[A]
+    add     r2, rFP, rINST, lsl #2      @ r2<- &fp[A]
     ldmia   r3, {r0-r1}                 @ r0/r1<- fp[B]
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     stmia   r2, {r0-r1}                 @ fp[A]<- r0/r1
@@ -442,10 +456,11 @@
     /* move-wide/from16 vAA, vBBBB */
     /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
     FETCH r3, 1                         @ r3<- BBBB
-    mov     r2, rINST, lsr #8           @ r2<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[BBBB]
-    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
+    add     r2, rFP, rINST, lsl #2      @ r2<- &fp[AA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
@@ -460,10 +475,11 @@
     FETCH r3, 2                         @ r3<- BBBB
     FETCH r2, 1                         @ r2<- AAAA
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[BBBB]
-    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AAAA]
+    add     lr, rFP, r2, lsl #2         @ r2<- &fp[AAAA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- fp[BBBB]
     FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
-    stmia   r2, {r0-r1}                 @ fp[AAAA]<- r0/r1
+    CLEAR_SHADOW_PAIR r2, r3, ip        @ Zero out the shadow regs
+    stmia   lr, {r0-r1}                 @ fp[AAAA]<- r0/r1
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     GOTO_OPCODE ip                      @ jump to next instruction
 
@@ -550,10 +566,11 @@
 .L_op_move_result_wide: /* 0x0b */
 /* File: arm/op_move_result_wide.S */
     /* move-result-wide vAA */
-    mov     r2, rINST, lsr #8           @ r2<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     ldr     r3, [rFP, #OFF_FP_RESULT_REGISTER]
-    add     r2, rFP, r2, lsl #2         @ r2<- &fp[AA]
+    add     r2, rFP, rINST, lsl #2      @ r2<- &fp[AA]
     ldmia   r3, {r0-r1}                 @ r0/r1<- retval.j
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     stmia   r2, {r0-r1}                 @ fp[AA]<- r0/r1
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -731,6 +748,7 @@
     mov     r3, rINST, lsr #8           @ r3<- AA
     mov     r1, r0, asr #31             @ r1<- ssssssss
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     stmia   r3, {r0-r1}                 @ vAA<- r0/r1
@@ -746,6 +764,7 @@
     FETCH_S r2, 2                       @ r2<- ssssBBBB (high)
     FETCH_ADVANCE_INST 3                @ advance rPC, load rINST
     orr     r0, r0, r2, lsl #16         @ r0<- BBBBbbbb
+    CLEAR_SHADOW_PAIR r3, r2, lr        @ Zero out the shadow regs
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
     mov     r1, r0, asr #31             @ r1<- ssssssss
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -764,6 +783,7 @@
     FETCH r3, 4                         @ r3<- HHHH (high)
     mov     r9, rINST, lsr #8           @ r9<- AA
     orr     r1, r2, r3, lsl #16         @ r1<- HHHHhhhh (high word)
+    CLEAR_SHADOW_PAIR r9, r2, r3        @ Zero out the shadow regs
     FETCH_ADVANCE_INST 5                @ advance rPC, load rINST
     add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -780,6 +800,7 @@
     mov     r0, #0                      @ r0<- 00000000
     mov     r1, r1, lsl #16             @ r1<- BBBB0000
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
+    CLEAR_SHADOW_PAIR r3, r0, r2        @ Zero shadow regs
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[AA]
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     stmia   r3, {r0-r1}                 @ vAA<- r0/r1
@@ -2068,6 +2089,7 @@
     mov     r3, r0, lsr #8              @ r3<- CC
     GET_VREG r0, r2                     @ r0<- vBB (array object)
     GET_VREG r1, r3                     @ r1<- vCC (requested index)
+    CLEAR_SHADOW_PAIR r9, r2, r3        @ Zero out the shadow regs
     cmp     r0, #0                      @ null array object?
     beq     common_errNullObject        @ yes, bail
     ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
@@ -2519,8 +2541,9 @@
     PREFETCH_INST 2
     cmp      r3, #0
     bne      MterpException                @ bail out
-    add     r3, rFP, r2, lsl #2            @ r3<- &fp[A]
-    stmia   r3, {r0-r1}                    @ fp[A]<- r0/r1
+    CLEAR_SHADOW_PAIR r2, ip, lr           @ Zero out the shadow regs
+    add      r3, rFP, r2, lsl #2           @ r3<- &fp[A]
+    stmia    r3, {r0-r1}                   @ fp[A]<- r0/r1
     ADVANCE 2
     GET_INST_OPCODE ip                     @ extract opcode from rINST
     GOTO_OPCODE ip                         @ jump to next instruction
@@ -2909,11 +2932,12 @@
     bl    artGet64StaticFromCode
     ldr   r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
     mov   r9, rINST, lsr #8             @ r9<- AA
-    add   r9, rFP, r9, lsl #2           @ r9<- &fp[AA]
+    add   lr, rFP, r9, lsl #2           @ r9<- &fp[AA]
     cmp   r3, #0                        @ Fail to resolve?
     bne   MterpException                @ bail out
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-    stmia   r9, {r0-r1}                 @ vAA/vAA+1<- r0/r1
+    CLEAR_SHADOW_PAIR r9, r2, ip        @ Zero out the shadow regs
+    stmia lr, {r0-r1}                   @ vAA/vAA+1<- r0/r1
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     GOTO_OPCODE ip                      @ jump to next instruction
 
@@ -3622,10 +3646,11 @@
      */
     /* unop vA, vB */
     mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     rsbs    r0, r0, #0                           @ optional op; may set condition codes
     rsc     r1, r1, #0                              @ r0/r1<- op, r2-r3 changed
@@ -3649,10 +3674,11 @@
      */
     /* unop vA, vB */
     mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     mvn     r0, r0                           @ optional op; may set condition codes
     mvn     r1, r1                              @ r0/r1<- op, r2-r3 changed
@@ -3702,10 +3728,11 @@
      */
     /* unop vA, vB */
     mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
                                @ optional op; may set condition codes
     add     r1, r1, #0x80000000                              @ r0/r1<- op, r2-r3 changed
@@ -3729,10 +3756,11 @@
      */
     /* unop vA, vB */
     mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     GET_VREG r0, r3                     @ r0<- vB
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
                                @ optional op; may set condition codes
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     mov     r1, r0, asr #31                              @ r0<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -3785,6 +3813,7 @@
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     and     r9, r9, #15                 @ r9<- A
     fsitod  d0, s0                              @ d0<- op
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
     fstd    d0, [r9]                    @ vA<- d0
@@ -3912,10 +3941,11 @@
      */
     /* unop vA, vB */
     mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     GET_VREG r0, r3                     @ r0<- vB
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
                                @ optional op; may set condition codes
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     bl      f2l_doconv                              @ r0<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -3944,6 +3974,7 @@
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     and     r9, r9, #15                 @ r9<- A
     fcvtds  d0, s0                              @ d0<- op
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
     fstd    d0, [r9]                    @ vA<- d0
@@ -3990,10 +4021,11 @@
      */
     /* unop vA, vB */
     mov     r3, rINST, lsr #12          @ r3<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r3, {r0-r1}                 @ r0/r1<- vAA
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
                                @ optional op; may set condition codes
     bl      d2l_doconv                              @ r0/r1<- op, r2-r3 changed
@@ -4570,10 +4602,10 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     and     r2, r0, #255                @ r2<- BB
     mov     r3, r0, lsr #8              @ r3<- CC
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[AA]
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
     ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
@@ -4582,8 +4614,8 @@
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
     adds    r0, r0, r2                           @ optional op; may set condition codes
     adc     r1, r1, r3                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -4614,10 +4646,10 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     and     r2, r0, #255                @ r2<- BB
     mov     r3, r0, lsr #8              @ r3<- CC
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[AA]
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
     ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
@@ -4626,8 +4658,8 @@
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
     subs    r0, r0, r2                           @ optional op; may set condition codes
     sbc     r1, r1, r3                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -4699,10 +4731,10 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     and     r2, r0, #255                @ r2<- BB
     mov     r3, r0, lsr #8              @ r3<- CC
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[AA]
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
     ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
@@ -4711,8 +4743,8 @@
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
                                @ optional op; may set condition codes
     bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -4744,10 +4776,10 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     and     r2, r0, #255                @ r2<- BB
     mov     r3, r0, lsr #8              @ r3<- CC
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[AA]
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
     ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
@@ -4756,8 +4788,8 @@
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
                                @ optional op; may set condition codes
     bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -4788,10 +4820,10 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     and     r2, r0, #255                @ r2<- BB
     mov     r3, r0, lsr #8              @ r3<- CC
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[AA]
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
     ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
@@ -4800,8 +4832,8 @@
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
     and     r0, r0, r2                           @ optional op; may set condition codes
     and     r1, r1, r3                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -4832,10 +4864,10 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     and     r2, r0, #255                @ r2<- BB
     mov     r3, r0, lsr #8              @ r3<- CC
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[AA]
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
     ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
@@ -4844,8 +4876,8 @@
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
     orr     r0, r0, r2                           @ optional op; may set condition codes
     orr     r1, r1, r3                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -4876,10 +4908,10 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     and     r2, r0, #255                @ r2<- BB
     mov     r3, r0, lsr #8              @ r3<- CC
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[AA]
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
     ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
@@ -4888,8 +4920,8 @@
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
     eor     r0, r0, r2                           @ optional op; may set condition codes
     eor     r1, r1, r3                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -5177,9 +5209,9 @@
     VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
     fldd    d1, [r3]                    @ d1<- vCC
     fldd    d0, [r2]                    @ d0<- vBB
-
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
     faddd   d2, d0, d1                              @ s2<- op
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
     fstd    d2, [r9]                    @ vAA<- d2
@@ -5207,9 +5239,9 @@
     VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
     fldd    d1, [r3]                    @ d1<- vCC
     fldd    d0, [r2]                    @ d0<- vBB
-
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
     fsubd   d2, d0, d1                              @ s2<- op
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
     fstd    d2, [r9]                    @ vAA<- d2
@@ -5237,9 +5269,9 @@
     VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
     fldd    d1, [r3]                    @ d1<- vCC
     fldd    d0, [r2]                    @ d0<- vBB
-
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
     fmuld   d2, d0, d1                              @ s2<- op
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
     fstd    d2, [r9]                    @ vAA<- d2
@@ -5267,9 +5299,9 @@
     VREG_INDEX_TO_ADDR r2, r2           @ r2<- &vBB
     fldd    d1, [r3]                    @ d1<- vCC
     fldd    d0, [r2]                    @ d0<- vBB
-
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
     fdivd   d2, d0, d1                              @ s2<- op
+    CLEAR_SHADOW_PAIR r9, ip, lr        @ Zero shadow regs
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vAA
     fstd    d2, [r9]                    @ vAA<- d2
@@ -5299,10 +5331,10 @@
      */
     /* binop vAA, vBB, vCC */
     FETCH r0, 1                         @ r0<- CCBB
-    mov     r9, rINST, lsr #8           @ r9<- AA
+    mov     rINST, rINST, lsr #8        @ rINST<- AA
     and     r2, r0, #255                @ r2<- BB
     mov     r3, r0, lsr #8              @ r3<- CC
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[AA]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[AA]
     add     r2, rFP, r2, lsl #2         @ r2<- &fp[BB]
     add     r3, rFP, r3, lsl #2         @ r3<- &fp[CC]
     ldmia   r2, {r0-r1}                 @ r0/r1<- vBB/vBB+1
@@ -5311,8 +5343,8 @@
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, lr, ip     @ Zero out the shadow regs
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
-
                                @ optional op; may set condition codes
     bl      fmod                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -5754,17 +5786,17 @@
      */
     /* binop/2addr vA, vB */
     mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
     ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
     .if 0
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
     adds    r0, r0, r2                           @ optional op; may set condition codes
     adc     r1, r1, r3                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -5794,17 +5826,17 @@
      */
     /* binop/2addr vA, vB */
     mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
     ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
     .if 0
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
     subs    r0, r0, r2                           @ optional op; may set condition codes
     sbc     r1, r1, r3                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -5863,17 +5895,17 @@
      */
     /* binop/2addr vA, vB */
     mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
     ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
     .if 1
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
                                @ optional op; may set condition codes
     bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -5904,17 +5936,17 @@
      */
     /* binop/2addr vA, vB */
     mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
     ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
     .if 1
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
                                @ optional op; may set condition codes
     bl      __aeabi_ldivmod                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -5944,17 +5976,17 @@
      */
     /* binop/2addr vA, vB */
     mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
     ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
     .if 0
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
     and     r0, r0, r2                           @ optional op; may set condition codes
     and     r1, r1, r3                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -5984,17 +6016,17 @@
      */
     /* binop/2addr vA, vB */
     mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
     ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
     .if 0
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
     orr     r0, r0, r2                           @ optional op; may set condition codes
     orr     r1, r1, r3                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -6024,17 +6056,17 @@
      */
     /* binop/2addr vA, vB */
     mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
     ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
     .if 0
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
     eor     r0, r0, r2                           @ optional op; may set condition codes
     eor     r1, r1, r3                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -6294,10 +6326,10 @@
     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
     and     r9, r9, #15                 @ r9<- A
     fldd    d1, [r3]                    @ d1<- vB
+    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     fldd    d0, [r9]                    @ d0<- vA
-
     faddd   d2, d0, d1                              @ d2<- op
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     fstd    d2, [r9]                    @ vAA<- d2
@@ -6323,10 +6355,10 @@
     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
     and     r9, r9, #15                 @ r9<- A
     fldd    d1, [r3]                    @ d1<- vB
+    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     fldd    d0, [r9]                    @ d0<- vA
-
     fsubd   d2, d0, d1                              @ d2<- op
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     fstd    d2, [r9]                    @ vAA<- d2
@@ -6352,10 +6384,10 @@
     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
     and     r9, r9, #15                 @ r9<- A
     fldd    d1, [r3]                    @ d1<- vB
+    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     fldd    d0, [r9]                    @ d0<- vA
-
     fmuld   d2, d0, d1                              @ d2<- op
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     fstd    d2, [r9]                    @ vAA<- d2
@@ -6381,10 +6413,10 @@
     VREG_INDEX_TO_ADDR r3, r3           @ r3<- &vB
     and     r9, r9, #15                 @ r9<- A
     fldd    d1, [r3]                    @ d1<- vB
+    CLEAR_SHADOW_PAIR r9, ip, r0        @ Zero out shadow regs
     VREG_INDEX_TO_ADDR r9, r9           @ r9<- &vA
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
     fldd    d0, [r9]                    @ d0<- vA
-
     fdivd   d2, d0, d1                              @ d2<- op
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     fstd    d2, [r9]                    @ vAA<- d2
@@ -6413,17 +6445,17 @@
      */
     /* binop/2addr vA, vB */
     mov     r1, rINST, lsr #12          @ r1<- B
-    ubfx    r9, rINST, #8, #4           @ r9<- A
+    ubfx    rINST, rINST, #8, #4        @ rINST<- A
     add     r1, rFP, r1, lsl #2         @ r1<- &fp[B]
-    add     r9, rFP, r9, lsl #2         @ r9<- &fp[A]
+    add     r9, rFP, rINST, lsl #2      @ r9<- &fp[A]
     ldmia   r1, {r2-r3}                 @ r2/r3<- vBB/vBB+1
     ldmia   r9, {r0-r1}                 @ r0/r1<- vAA/vAA+1
     .if 0
     orrs    ip, r2, r3                  @ second arg (r2-r3) is zero?
     beq     common_errDivideByZero
     .endif
+    CLEAR_SHADOW_PAIR rINST, ip, lr     @ Zero shadow regs
     FETCH_ADVANCE_INST 1                @ advance rPC, load rINST
-
                                @ optional op; may set condition codes
     bl      fmod                              @ result<- op, r0-r3 changed
     GET_INST_OPCODE ip                  @ extract opcode from rINST
@@ -7155,6 +7187,7 @@
     ldrd    r0, [r3, ip]                @ r0<- obj.field (64 bits, aligned)
     FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
     add     r3, rFP, r2, lsl #2         @ r3<- &fp[A]
+    CLEAR_SHADOW_PAIR r2, ip, lr        @ Zero out the shadow regs
     GET_INST_OPCODE ip                  @ extract opcode from rINST
     stmia   r3, {r0-r1}                 @ fp[A]<- r0/r1
     GOTO_OPCODE ip                      @ jump to next instruction
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index e2918dc..96229ce 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -96,6 +96,22 @@
  */
 #include "asm_support.h"
 
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+    #define MACRO_LITERAL(value) $(value)
+    #define FUNCTION_TYPE(name)
+    #define SIZE(start,end)
+    // Mac OS' symbols have an _ prefix.
+    #define SYMBOL(name) _ ## name
+#else
+    #define MACRO_LITERAL(value) $value
+    #define FUNCTION_TYPE(name) .type name, @function
+    #define SIZE(start,end) .size start, .-end
+    #define SYMBOL(name) name
+#endif
+
 /* Frame size must be 16-byte aligned.
  * Remember about 4 bytes for return address
  */
@@ -199,7 +215,7 @@
  */
 .macro REFRESH_INST _opnum
     movb    rINSTbl, rINSTbh
-    movb    $\_opnum, rINSTbl
+    movb    MACRO_LITERAL(\_opnum), rINSTbl
 .endm
 
 /*
@@ -215,7 +231,7 @@
 .macro GOTO_NEXT
     movzx   rINSTbl,%eax
     movzbl  rINSTbh,rINST
-    shll    $7, %eax
+    shll    MACRO_LITERAL(7), %eax
     addl    rIBASE, %eax
     jmp     *%eax
 .endm
@@ -255,7 +271,7 @@
 
 .macro SET_VREG _reg _vreg
     movl    \_reg, (rFP,\_vreg,4)
-    movl    $0, (rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0), (rREFS,\_vreg,4)
 .endm
 
 /* Write wide value from xmm. xmm is clobbered. */
@@ -276,16 +292,16 @@
 
 .macro SET_VREG_HIGH _reg _vreg
     movl    \_reg, 4(rFP,\_vreg,4)
-    movl    $0, 4(rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
 .endm
 
 .macro CLEAR_REF _vreg
-    movl    $0,  (rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
 .endm
 
 .macro CLEAR_WIDE_REF _vreg
-    movl    $0,  (rREFS,\_vreg,4)
-    movl    $0, 4(rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
 .endm
 
 /* File: x86/entry.S */
@@ -309,8 +325,8 @@
  */
 
     .text
-    .global ExecuteMterpImpl
-    .type   ExecuteMterpImpl, %function
+    .global SYMBOL(ExecuteMterpImpl)
+    FUNCTION_TYPE(ExecuteMterpImpl)
 
 /*
  * On entry:
@@ -321,7 +337,7 @@
  *
  */
 
-ExecuteMterpImpl:
+SYMBOL(ExecuteMterpImpl):
     .cfi_startproc
     /* Allocate frame */
     subl    $FRAME_SIZE, %esp
@@ -362,9 +378,9 @@
     /* NOTE: no fallthrough */
 
 
-    .global artMterpAsmInstructionStart
-    .type   artMterpAsmInstructionStart, %function
-artMterpAsmInstructionStart = .L_op_nop
+    .global SYMBOL(artMterpAsmInstructionStart)
+    FUNCTION_TYPE(SYMBOL(artMterpAsmInstructionStart))
+SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
     .text
 
 /* ------------------------------ */
@@ -382,11 +398,11 @@
     movzbl  rINSTbl, %eax                   # eax <- BA
     andb    $0xf, %al                      # eax <- A
     shrl    $4, rINST                      # rINST <- B
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     .if 0
-    SET_VREG_OBJECT rINST %eax              # fp[A] <- fp[B]
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
     .else
-    SET_VREG rINST %eax                     # fp[A] <- fp[B]
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -398,11 +414,11 @@
     /* op vAA, vBBBB */
     movzx   rINSTbl, %eax                   # eax <- AA
     movw    2(rPC), rINSTw                  # rINSTw <- BBBB
-    GET_VREG rINST rINST                    # rINST <- fp[BBBB]
+    GET_VREG rINST, rINST                   # rINST <- fp[BBBB]
     .if 0
-    SET_VREG_OBJECT rINST %eax              # fp[A] <- fp[B]
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
     .else
-    SET_VREG rINST %eax                     # fp[A] <- fp[B]
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -414,11 +430,11 @@
     /* op vAAAA, vBBBB */
     movzwl  4(rPC), %ecx                    # ecx <- BBBB
     movzwl  2(rPC), %eax                    # eax <- AAAA
-    GET_VREG rINST %ecx
+    GET_VREG rINST, %ecx
     .if 0
-    SET_VREG_OBJECT rINST %eax              # fp[A] <- fp[B]
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
     .else
-    SET_VREG rINST %eax                     # fp[A] <- fp[B]
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
 
@@ -431,8 +447,8 @@
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, rINST                      # rINST <- B
     andb    $0xf, %cl                      # ecx <- A
-    GET_WIDE_FP_VREG %xmm0 rINST            # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0 %ecx             # v[A] <- xmm0
+    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0 <- v[B]
+    SET_WIDE_FP_VREG %xmm0, %ecx            # v[A] <- xmm0
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 /* ------------------------------ */
@@ -443,8 +459,8 @@
     /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
     movzwl  2(rPC), %ecx                    # ecx <- BBBB
     movzbl  rINSTbl, %eax                   # eax <- AAAA
-    GET_WIDE_FP_VREG %xmm0 %ecx             # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0 %eax             # v[A] <- xmm0
+    GET_WIDE_FP_VREG %xmm0, %ecx            # xmm0 <- v[B]
+    SET_WIDE_FP_VREG %xmm0, %eax            # v[A] <- xmm0
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -455,8 +471,8 @@
     /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
     movzwl  4(rPC), %ecx                    # ecx<- BBBB
     movzwl  2(rPC), %eax                    # eax<- AAAA
-    GET_WIDE_FP_VREG %xmm0 %ecx             # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0 %eax             # v[A] <- xmm0
+    GET_WIDE_FP_VREG %xmm0, %ecx            # xmm0 <- v[B]
+    SET_WIDE_FP_VREG %xmm0, %eax            # v[A] <- xmm0
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
 
 /* ------------------------------ */
@@ -469,11 +485,11 @@
     movzbl  rINSTbl, %eax                   # eax <- BA
     andb    $0xf, %al                      # eax <- A
     shrl    $4, rINST                      # rINST <- B
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     .if 1
-    SET_VREG_OBJECT rINST %eax              # fp[A] <- fp[B]
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
     .else
-    SET_VREG rINST %eax                     # fp[A] <- fp[B]
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -487,11 +503,11 @@
     /* op vAA, vBBBB */
     movzx   rINSTbl, %eax                   # eax <- AA
     movw    2(rPC), rINSTw                  # rINSTw <- BBBB
-    GET_VREG rINST rINST                    # rINST <- fp[BBBB]
+    GET_VREG rINST, rINST                   # rINST <- fp[BBBB]
     .if 1
-    SET_VREG_OBJECT rINST %eax              # fp[A] <- fp[B]
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
     .else
-    SET_VREG rINST %eax                     # fp[A] <- fp[B]
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -505,11 +521,11 @@
     /* op vAAAA, vBBBB */
     movzwl  4(rPC), %ecx                    # ecx <- BBBB
     movzwl  2(rPC), %eax                    # eax <- AAAA
-    GET_VREG rINST %ecx
+    GET_VREG rINST, %ecx
     .if 1
-    SET_VREG_OBJECT rINST %eax              # fp[A] <- fp[B]
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
     .else
-    SET_VREG rINST %eax                     # fp[A] <- fp[B]
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
 
@@ -523,9 +539,9 @@
     movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
     movl    (%eax), %eax                    # r0 <- result.i.
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- fp[B]
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- fp[B]
     .else
-    SET_VREG %eax rINST                     # fp[A] <- fp[B]
+    SET_VREG %eax, rINST                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -537,8 +553,8 @@
     movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
     movl    4(%eax), %ecx                   # Get high
     movl    (%eax), %eax                    # Get low
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
-    SET_VREG_HIGH %ecx rINST                # v[AA+1] <- ecx
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
+    SET_VREG_HIGH %ecx, rINST               # v[AA+1] <- ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 /* ------------------------------ */
@@ -551,9 +567,9 @@
     movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
     movl    (%eax), %eax                    # r0 <- result.i.
     .if 1
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- fp[B]
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- fp[B]
     .else
-    SET_VREG %eax rINST                     # fp[A] <- fp[B]
+    SET_VREG %eax, rINST                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -565,7 +581,7 @@
     /* move-exception vAA */
     movl    rSELF, %ecx
     movl    THREAD_EXCEPTION_OFFSET(%ecx), %eax
-    SET_VREG_OBJECT %eax rINST              # fp[AA] <- exception object
+    SET_VREG_OBJECT %eax, rINST             # fp[AA] <- exception object
     movl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -574,12 +590,12 @@
 .L_op_return_void: /* 0x0e */
 /* File: x86/op_return_void.S */
     .extern MterpThreadFenceForConstructor
-    call    MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
     testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
 1:
     xorl    %eax, %eax
     xorl    %ecx, %ecx
@@ -596,14 +612,14 @@
  */
     /* op vAA */
     .extern MterpThreadFenceForConstructor
-    call    MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
     testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
 1:
-    GET_VREG %eax rINST                     # eax <- vAA
+    GET_VREG %eax, rINST                    # eax <- vAA
     xorl    %ecx, %ecx
     jmp     MterpReturn
 
@@ -616,15 +632,15 @@
  */
     /* return-wide vAA */
     .extern MterpThreadFenceForConstructor
-    call    MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
     testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
 1:
-    GET_VREG %eax rINST                     # eax <- v[AA+0]
-    GET_VREG_HIGH %ecx rINST                # ecx <- v[AA+1]
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
+    GET_VREG_HIGH %ecx, rINST               # ecx <- v[AA+1]
     jmp     MterpReturn
 
 /* ------------------------------ */
@@ -639,14 +655,14 @@
  */
     /* op vAA */
     .extern MterpThreadFenceForConstructor
-    call    MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
     testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
 1:
-    GET_VREG %eax rINST                     # eax <- vAA
+    GET_VREG %eax, rINST                    # eax <- vAA
     xorl    %ecx, %ecx
     jmp     MterpReturn
 
@@ -660,7 +676,7 @@
     movl    $0xf, rINST
     andl    %eax, rINST                     # rINST <- A
     sarl    $4, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 /* ------------------------------ */
@@ -669,7 +685,7 @@
 /* File: x86/op_const_16.S */
     /* const/16 vAA, #+BBBB */
     movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
-    SET_VREG %ecx rINST                     # vAA <- ssssBBBB
+    SET_VREG %ecx, rINST                    # vAA <- ssssBBBB
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -678,7 +694,7 @@
 /* File: x86/op_const.S */
     /* const vAA, #+BBBBbbbb */
     movl    2(rPC), %eax                    # grab all 32 bits at once
-    SET_VREG %eax rINST                     # vAA<- eax
+    SET_VREG %eax, rINST                    # vAA<- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
 
 /* ------------------------------ */
@@ -688,7 +704,7 @@
     /* const/high16 vAA, #+BBBB0000 */
     movzwl  2(rPC), %eax                    # eax <- 0000BBBB
     sall    $16, %eax                      # eax <- BBBB0000
-    SET_VREG %eax rINST                     # vAA <- eax
+    SET_VREG %eax, rINST                    # vAA <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -699,8 +715,8 @@
     movswl  2(rPC), %eax                    # eax <- ssssBBBB
     movl    rIBASE, %ecx                    # preserve rIBASE (cltd trashes it)
     cltd                                    # rIBASE:eax <- ssssssssssssBBBB
-    SET_VREG_HIGH rIBASE rINST              # store msw
-    SET_VREG %eax rINST                     # store lsw
+    SET_VREG_HIGH rIBASE, rINST             # store msw
+    SET_VREG %eax, rINST                    # store lsw
     movl    %ecx, rIBASE                    # restore rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -712,8 +728,8 @@
     movl    2(rPC), %eax                    # eax <- BBBBbbbb
     movl    rIBASE, %ecx                    # preserve rIBASE (cltd trashes it)
     cltd                                    # rIBASE:eax <- ssssssssssssBBBB
-    SET_VREG_HIGH rIBASE rINST              # store msw
-    SET_VREG %eax rINST                     # store lsw
+    SET_VREG_HIGH rIBASE, rINST             # store msw
+    SET_VREG %eax, rINST                    # store lsw
     movl    %ecx, rIBASE                    # restore rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
 
@@ -725,8 +741,8 @@
     movl    2(rPC), %eax                    # eax <- lsw
     movzbl  rINSTbl, %ecx                   # ecx <- AA
     movl    6(rPC), rINST                   # rINST <- msw
-    SET_VREG %eax %ecx
-    SET_VREG_HIGH  rINST %ecx
+    SET_VREG %eax, %ecx
+    SET_VREG_HIGH  rINST, %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
 
 /* ------------------------------ */
@@ -736,9 +752,9 @@
     /* const-wide/high16 vAA, #+BBBB000000000000 */
     movzwl  2(rPC), %eax                    # eax <- 0000BBBB
     sall    $16, %eax                      # eax <- BBBB0000
-    SET_VREG_HIGH %eax rINST                # v[AA+1] <- eax
+    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
     xorl    %eax, %eax
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -754,7 +770,7 @@
     movl    %eax, OUT_ARG2(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG3(%esp)
-    call    MterpConstString                # (index, tgt_reg, shadow_frame, self)
+    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpPossibleException
@@ -773,7 +789,7 @@
     movl    %eax, OUT_ARG2(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG3(%esp)
-    call    MterpConstString                # (index, tgt_reg, shadow_frame, self)
+    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpPossibleException
@@ -792,7 +808,7 @@
     movl    %eax, OUT_ARG2(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG3(%esp)
-    call    MterpConstClass                 # (index, tgt_reg, shadow_frame, self)
+    call    SYMBOL(MterpConstClass)         # (index, tgt_reg, shadow_frame, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpPossibleException
@@ -807,11 +823,11 @@
  */
     /* monitor-enter vAA */
     EXPORT_PC
-    GET_VREG %ecx rINST
+    GET_VREG %ecx, rINST
     movl    %ecx, OUT_ARG0(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    artLockObjectFromCode           # (object, self)
+    call    SYMBOL(artLockObjectFromCode)   # (object, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpException
@@ -830,11 +846,11 @@
  */
     /* monitor-exit vAA */
     EXPORT_PC
-    GET_VREG %ecx rINST
+    GET_VREG %ecx, rINST
     movl    %ecx, OUT_ARG0(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    artUnlockObjectFromCode         # (object, self)
+    call    SYMBOL(artUnlockObjectFromCode) # (object, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpException
@@ -857,7 +873,7 @@
     movl    %eax, OUT_ARG2(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)
-    call    MterpCheckCast                  # (index, &obj, method, self)
+    call    SYMBOL(MterpCheckCast)          # (index, &obj, method, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpPossibleException
@@ -885,13 +901,13 @@
     movl    %eax, OUT_ARG2(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)
-    call    MterpInstanceOf                 # (index, &obj, method, self)
+    call    SYMBOL(MterpInstanceOf)         # (index, &obj, method, self)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
     andb    $0xf, rINSTbl                  # rINSTbl <- A
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -903,12 +919,12 @@
  */
     mov     rINST, %eax                     # eax <- BA
     sarl    $4, rINST                      # rINST <- B
-    GET_VREG %ecx rINST                     # ecx <- vB (object ref)
+    GET_VREG %ecx, rINST                    # ecx <- vB (object ref)
     testl   %ecx, %ecx                      # is null?
     je      common_errNullObject
     andb    $0xf, %al                      # eax <- A
     movl    MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
-    SET_VREG rINST %eax
+    SET_VREG rINST, %eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 /* ------------------------------ */
@@ -926,7 +942,7 @@
     movl    %ecx, OUT_ARG1(%esp)
     REFRESH_INST 34
     movl    rINST, OUT_ARG2(%esp)
-    call    MterpNewInstance
+    call    SYMBOL(MterpNewInstance)
     REFRESH_IBASE
     testl   %eax, %eax                 # 0 means an exception is thrown
     jz      MterpPossibleException
@@ -952,7 +968,7 @@
     movl    rINST, OUT_ARG2(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)
-    call    MterpNewArray
+    call    SYMBOL(MterpNewArray)
     REFRESH_IBASE
     testl   %eax, %eax                      # 0 means an exception is thrown
     jz      MterpPossibleException
@@ -976,7 +992,7 @@
     movl    rPC, OUT_ARG1(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)
-    call    MterpFilledNewArray
+    call    SYMBOL(MterpFilledNewArray)
     REFRESH_IBASE
     testl   %eax, %eax                      # 0 means an exception is thrown
     jz      MterpPossibleException
@@ -1001,7 +1017,7 @@
     movl    rPC, OUT_ARG1(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)
-    call    MterpFilledNewArrayRange
+    call    SYMBOL(MterpFilledNewArrayRange)
     REFRESH_IBASE
     testl   %eax, %eax                      # 0 means an exception is thrown
     jz      MterpPossibleException
@@ -1016,10 +1032,10 @@
     EXPORT_PC
     movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
     leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
-    GET_VREG %eax rINST                     # eax <- vAA (array object)
+    GET_VREG %eax, rINST                    # eax <- vAA (array object)
     movl    %eax, OUT_ARG0(%esp)
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpFillArrayData              # (obj, payload)
+    call    SYMBOL(MterpFillArrayData)      # (obj, payload)
     REFRESH_IBASE
     testl   %eax, %eax                      # 0 means an exception is thrown
     jz      MterpPossibleException
@@ -1034,7 +1050,7 @@
  */
     /* throw vAA */
     EXPORT_PC
-    GET_VREG %eax rINST                     # eax<- vAA (exception object)
+    GET_VREG %eax, rINST                    # eax<- vAA (exception object)
     testl   %eax, %eax
     jz      common_errNullObject
     movl    rSELF,%ecx
@@ -1133,11 +1149,11 @@
  */
     /* op vAA, +BBBB */
     movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
-    GET_VREG %eax rINST                     # eax <- vAA
+    GET_VREG %eax, rINST                    # eax <- vAA
     leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
     movl    %eax, OUT_ARG1(%esp)            # ARG1 <- vAA
     movl    %ecx, OUT_ARG0(%esp)            # ARG0 <- switchData
-    call    MterpDoPackedSwitch
+    call    SYMBOL(MterpDoPackedSwitch)
     addl    %eax, %eax
     leal    (rPC, %eax), rPC
     FETCH_INST
@@ -1167,11 +1183,11 @@
  */
     /* op vAA, +BBBB */
     movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
-    GET_VREG %eax rINST                     # eax <- vAA
+    GET_VREG %eax, rINST                    # eax <- vAA
     leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
     movl    %eax, OUT_ARG1(%esp)            # ARG1 <- vAA
     movl    %ecx, OUT_ARG0(%esp)            # ARG0 <- switchData
-    call    MterpDoSparseSwitch
+    call    SYMBOL(MterpDoSparseSwitch)
     addl    %eax, %eax
     leal    (rPC, %eax), rPC
     FETCH_INST
@@ -1223,7 +1239,7 @@
 .Lop_cmpl_float_less:
     decl    %eax
 .Lop_cmpl_float_finish:
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -1264,7 +1280,7 @@
 .Lop_cmpg_float_less:
     decl    %eax
 .Lop_cmpg_float_finish:
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -1305,7 +1321,7 @@
 .Lop_cmpl_double_less:
     decl    %eax
 .Lop_cmpl_double_finish:
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -1346,7 +1362,7 @@
 .Lop_cmpg_double_less:
     decl    %eax
 .Lop_cmpg_double_finish:
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -1361,17 +1377,17 @@
     /* cmp-long vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG_HIGH %eax %eax                 # eax <- v[BB+1], BB is clobbered
+    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1], BB is clobbered
     cmpl    VREG_HIGH_ADDRESS(%ecx), %eax
     jl      .Lop_cmp_long_smaller
     jg      .Lop_cmp_long_bigger
     movzbl  2(rPC), %eax                    # eax <- BB, restore BB
-    GET_VREG %eax %eax                      # eax <- v[BB]
+    GET_VREG %eax, %eax                     # eax <- v[BB]
     sub     VREG_ADDRESS(%ecx), %eax
     ja      .Lop_cmp_long_bigger
     jb      .Lop_cmp_long_smaller
 .Lop_cmp_long_finish:
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 .Lop_cmp_long_bigger:
@@ -1397,7 +1413,7 @@
     /* if-cmp vA, vB, +CCCC */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax %ecx                      # eax <- vA
+    GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
     movl    $2, %eax                       # assume not taken
@@ -1432,7 +1448,7 @@
     /* if-cmp vA, vB, +CCCC */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax %ecx                      # eax <- vA
+    GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
     movl    $2, %eax                       # assume not taken
@@ -1467,7 +1483,7 @@
     /* if-cmp vA, vB, +CCCC */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax %ecx                      # eax <- vA
+    GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
     movl    $2, %eax                       # assume not taken
@@ -1502,7 +1518,7 @@
     /* if-cmp vA, vB, +CCCC */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax %ecx                      # eax <- vA
+    GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
     movl    $2, %eax                       # assume not taken
@@ -1537,7 +1553,7 @@
     /* if-cmp vA, vB, +CCCC */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax %ecx                      # eax <- vA
+    GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
     movl    $2, %eax                       # assume not taken
@@ -1572,7 +1588,7 @@
     /* if-cmp vA, vB, +CCCC */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     andb    $0xf, %cl                      # ecx <- A
-    GET_VREG %eax %ecx                      # eax <- vA
+    GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
     movl    $2, %eax                       # assume not taken
@@ -1857,14 +1873,14 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     movl   MIRROR_INT_ARRAY_DATA_OFFSET(%eax,%ecx,4), %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -1877,15 +1893,15 @@
     /* aget-wide vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     leal    MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
     movq    (%eax), %xmm0                   # xmm0 <- vBB[vCC]
-    SET_WIDE_FP_VREG %xmm0 rINST            # vAA <- xmm0
+    SET_WIDE_FP_VREG %xmm0, rINST           # vAA <- xmm0
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -1900,17 +1916,17 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecs <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecs <- vCC (requested index)
     EXPORT_PC
     movl    %eax, OUT_ARG0(%esp)
     movl    %ecx, OUT_ARG1(%esp)
-    call    artAGetObjectFromMterp          # (array, index)
+    call    SYMBOL(artAGetObjectFromMterp)  # (array, index)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
-    SET_VREG_OBJECT %eax rINST
+    SET_VREG_OBJECT %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -1927,14 +1943,14 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     movzbl   MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -1952,14 +1968,14 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     movsbl   MIRROR_BYTE_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -1977,14 +1993,14 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     movzwl   MIRROR_CHAR_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -2002,14 +2018,14 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     movswl   MIRROR_SHORT_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -2026,14 +2042,14 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     leal    MIRROR_INT_ARRAY_DATA_OFFSET(%eax,%ecx,4), %eax
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movl  rINST, (%eax)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2048,14 +2064,14 @@
     /* aput-wide vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     leal    MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
-    GET_WIDE_FP_VREG %xmm0 rINST            # xmm0 <- vAA
+    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0 <- vAA
     movq    %xmm0, (%eax)                   # vBB[vCC] <- xmm0
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2073,7 +2089,7 @@
     movl    rPC, OUT_ARG1(%esp)
     REFRESH_INST 77
     movl    rINST, OUT_ARG2(%esp)
-    call    MterpAputObject            # (array, index)
+    call    SYMBOL(MterpAputObject)         # (array, index)
     REFRESH_IBASE
     testl   %eax, %eax
     jz      MterpPossibleException
@@ -2093,14 +2109,14 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     leal    MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movb  rINSTbl, (%eax)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2119,14 +2135,14 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     leal    MIRROR_BYTE_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movb  rINSTbl, (%eax)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2145,14 +2161,14 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     leal    MIRROR_CHAR_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movw  rINSTw, (%eax)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2171,14 +2187,14 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     leal    MIRROR_SHORT_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movw  rINSTw, (%eax)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2203,16 +2219,16 @@
     movl    %eax, OUT_ARG2(%esp)            # referrer
     mov     rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artGet32InstanceFromCode
+    call    SYMBOL(artGet32InstanceFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $0xf, rINSTbl                  # rINST <- A
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <-value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <-value
     .else
-    SET_VREG %eax rINST                     # fp[A] <-value
+    SET_VREG %eax, rINST                    # fp[A] <-value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2236,13 +2252,13 @@
     movl    %eax, OUT_ARG2(%esp)            # referrer
     mov     rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artGet64InstanceFromCode
+    call    SYMBOL(artGet64InstanceFromCode)
     mov     rSELF, %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $0xf, rINSTbl                  # rINST <- A
-    SET_VREG %eax rINST
-    SET_VREG_HIGH %edx rINST
+    SET_VREG %eax, rINST
+    SET_VREG_HIGH %edx, rINST
     REFRESH_IBASE_FROM_SELF %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2267,16 +2283,16 @@
     movl    %eax, OUT_ARG2(%esp)            # referrer
     mov     rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artGetObjInstanceFromCode
+    call    SYMBOL(artGetObjInstanceFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $0xf, rINSTbl                  # rINST <- A
     .if 1
-    SET_VREG_OBJECT %eax rINST              # fp[A] <-value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <-value
     .else
-    SET_VREG %eax rINST                     # fp[A] <-value
+    SET_VREG %eax, rINST                    # fp[A] <-value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2302,16 +2318,16 @@
     movl    %eax, OUT_ARG2(%esp)            # referrer
     mov     rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artGetBooleanInstanceFromCode
+    call    SYMBOL(artGetBooleanInstanceFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $0xf, rINSTbl                  # rINST <- A
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <-value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <-value
     .else
-    SET_VREG %eax rINST                     # fp[A] <-value
+    SET_VREG %eax, rINST                    # fp[A] <-value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2337,16 +2353,16 @@
     movl    %eax, OUT_ARG2(%esp)            # referrer
     mov     rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artGetByteInstanceFromCode
+    call    SYMBOL(artGetByteInstanceFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $0xf, rINSTbl                  # rINST <- A
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <-value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <-value
     .else
-    SET_VREG %eax rINST                     # fp[A] <-value
+    SET_VREG %eax, rINST                    # fp[A] <-value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2372,16 +2388,16 @@
     movl    %eax, OUT_ARG2(%esp)            # referrer
     mov     rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artGetCharInstanceFromCode
+    call    SYMBOL(artGetCharInstanceFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $0xf, rINSTbl                  # rINST <- A
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <-value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <-value
     .else
-    SET_VREG %eax rINST                     # fp[A] <-value
+    SET_VREG %eax, rINST                    # fp[A] <-value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2407,16 +2423,16 @@
     movl    %eax, OUT_ARG2(%esp)            # referrer
     mov     rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artGetShortInstanceFromCode
+    call    SYMBOL(artGetShortInstanceFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $0xf, rINSTbl                  # rINST <- A
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <-value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <-value
     .else
-    SET_VREG %eax rINST                     # fp[A] <-value
+    SET_VREG %eax, rINST                    # fp[A] <-value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2444,7 +2460,7 @@
     movl    %eax, OUT_ARG2(%esp)            # fp[A]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG3(%esp)            # referrer
-    call    artSet32InstanceFromMterp
+    call    SYMBOL(artSet32InstanceFromMterp)
     testl   %eax, %eax
     jnz     MterpPossibleException
     REFRESH_IBASE
@@ -2468,7 +2484,7 @@
     movl    %eax, OUT_ARG2(%esp)            # &fp[A]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG3(%esp)            # referrer
-    call    artSet64InstanceFromMterp
+    call    SYMBOL(artSet64InstanceFromMterp)
     testl   %eax, %eax
     jnz     MterpPossibleException
     REFRESH_IBASE
@@ -2486,7 +2502,7 @@
     movl    rINST, OUT_ARG2(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG3(%esp)
-    call    MterpIputObject
+    call    SYMBOL(MterpIputObject)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -2516,7 +2532,7 @@
     movl    %eax, OUT_ARG2(%esp)            # fp[A]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG3(%esp)            # referrer
-    call    artSet8InstanceFromMterp
+    call    SYMBOL(artSet8InstanceFromMterp)
     testl   %eax, %eax
     jnz     MterpPossibleException
     REFRESH_IBASE
@@ -2547,7 +2563,7 @@
     movl    %eax, OUT_ARG2(%esp)            # fp[A]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG3(%esp)            # referrer
-    call    artSet8InstanceFromMterp
+    call    SYMBOL(artSet8InstanceFromMterp)
     testl   %eax, %eax
     jnz     MterpPossibleException
     REFRESH_IBASE
@@ -2578,7 +2594,7 @@
     movl    %eax, OUT_ARG2(%esp)            # fp[A]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG3(%esp)            # referrer
-    call    artSet16InstanceFromMterp
+    call    SYMBOL(artSet16InstanceFromMterp)
     testl   %eax, %eax
     jnz     MterpPossibleException
     REFRESH_IBASE
@@ -2609,7 +2625,7 @@
     movl    %eax, OUT_ARG2(%esp)            # fp[A]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG3(%esp)            # referrer
-    call    artSet16InstanceFromMterp
+    call    SYMBOL(artSet16InstanceFromMterp)
     testl   %eax, %eax
     jnz     MterpPossibleException
     REFRESH_IBASE
@@ -2634,15 +2650,15 @@
     movl    %eax, OUT_ARG1(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)            # self
-    call    artGet32StaticFromCode
+    call    SYMBOL(artGet32StaticFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
     .else
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2663,12 +2679,12 @@
     movl    %eax, OUT_ARG1(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)            # self
-    call    artGet64StaticFromCode
+    call    SYMBOL(artGet64StaticFromCode)
     movl    rSELF, %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
-    SET_VREG %eax rINST                     # fp[A]<- low part
-    SET_VREG_HIGH %edx rINST                # fp[A+1]<- high part
+    SET_VREG %eax, rINST                    # fp[A]<- low part
+    SET_VREG_HIGH %edx, rINST               # fp[A+1]<- high part
     REFRESH_IBASE_FROM_SELF %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2691,15 +2707,15 @@
     movl    %eax, OUT_ARG1(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)            # self
-    call    artGetObjStaticFromCode
+    call    SYMBOL(artGetObjStaticFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
     .if 1
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
     .else
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2723,15 +2739,15 @@
     movl    %eax, OUT_ARG1(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)            # self
-    call    artGetBooleanStaticFromCode
+    call    SYMBOL(artGetBooleanStaticFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
     .else
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2755,15 +2771,15 @@
     movl    %eax, OUT_ARG1(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)            # self
-    call    artGetByteStaticFromCode
+    call    SYMBOL(artGetByteStaticFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
     .else
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2787,15 +2803,15 @@
     movl    %eax, OUT_ARG1(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)            # self
-    call    artGetCharStaticFromCode
+    call    SYMBOL(artGetCharStaticFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
     .else
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2819,15 +2835,15 @@
     movl    %eax, OUT_ARG1(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)            # self
-    call    artGetShortStaticFromCode
+    call    SYMBOL(artGetShortStaticFromCode)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
     .if 0
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
     .else
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -2846,13 +2862,13 @@
     EXPORT_PC
     movzwl  2(rPC), %eax
     movl    %eax, OUT_ARG0(%esp)            # field ref BBBB
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movl    rINST, OUT_ARG1(%esp)           # fp[AA]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG2(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artSet32StaticFromCode
+    call    SYMBOL(artSet32StaticFromCode)
     testl   %eax, %eax
     jnz     MterpException
     REFRESH_IBASE
@@ -2877,7 +2893,7 @@
     movl    %eax, OUT_ARG2(%esp)            # &fp[AA]
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artSet64IndirectStaticFromMterp
+    call    SYMBOL(artSet64IndirectStaticFromMterp)
     testl   %eax, %eax
     jnz     MterpException
     REFRESH_IBASE
@@ -2895,7 +2911,7 @@
     movl    rINST, OUT_ARG2(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)
-    call    MterpSputObject
+    call    SYMBOL(MterpSputObject)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -2916,13 +2932,13 @@
     EXPORT_PC
     movzwl  2(rPC), %eax
     movl    %eax, OUT_ARG0(%esp)            # field ref BBBB
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movl    rINST, OUT_ARG1(%esp)           # fp[AA]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG2(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artSet8StaticFromCode
+    call    SYMBOL(artSet8StaticFromCode)
     testl   %eax, %eax
     jnz     MterpException
     REFRESH_IBASE
@@ -2944,13 +2960,13 @@
     EXPORT_PC
     movzwl  2(rPC), %eax
     movl    %eax, OUT_ARG0(%esp)            # field ref BBBB
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movl    rINST, OUT_ARG1(%esp)           # fp[AA]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG2(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artSet8StaticFromCode
+    call    SYMBOL(artSet8StaticFromCode)
     testl   %eax, %eax
     jnz     MterpException
     REFRESH_IBASE
@@ -2972,13 +2988,13 @@
     EXPORT_PC
     movzwl  2(rPC), %eax
     movl    %eax, OUT_ARG0(%esp)            # field ref BBBB
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movl    rINST, OUT_ARG1(%esp)           # fp[AA]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG2(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artSet16StaticFromCode
+    call    SYMBOL(artSet16StaticFromCode)
     testl   %eax, %eax
     jnz     MterpException
     REFRESH_IBASE
@@ -3000,13 +3016,13 @@
     EXPORT_PC
     movzwl  2(rPC), %eax
     movl    %eax, OUT_ARG0(%esp)            # field ref BBBB
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movl    rINST, OUT_ARG1(%esp)           # fp[AA]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG2(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artSet16StaticFromCode
+    call    SYMBOL(artSet16StaticFromCode)
     testl   %eax, %eax
     jnz     MterpException
     REFRESH_IBASE
@@ -3032,7 +3048,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 110
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeVirtual
+    call    SYMBOL(MterpInvokeVirtual)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -3065,7 +3081,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 111
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeSuper
+    call    SYMBOL(MterpInvokeSuper)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -3098,7 +3114,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 112
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeDirect
+    call    SYMBOL(MterpInvokeDirect)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -3124,7 +3140,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 113
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeStatic
+    call    SYMBOL(MterpInvokeStatic)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -3151,7 +3167,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 114
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeInterface
+    call    SYMBOL(MterpInvokeInterface)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -3173,7 +3189,7 @@
     testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
 1:
     xorl    %eax, %eax
     xorl    %ecx, %ecx
@@ -3198,7 +3214,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 116
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeVirtualRange
+    call    SYMBOL(MterpInvokeVirtualRange)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -3224,7 +3240,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 117
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeSuperRange
+    call    SYMBOL(MterpInvokeSuperRange)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -3250,7 +3266,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 118
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeDirectRange
+    call    SYMBOL(MterpInvokeDirectRange)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -3276,7 +3292,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 119
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeStaticRange
+    call    SYMBOL(MterpInvokeStaticRange)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -3302,7 +3318,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 120
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeInterfaceRange
+    call    SYMBOL(MterpInvokeInterfaceRange)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -3343,10 +3359,10 @@
     /* unop vA, vB */
     movzbl  rINSTbl,%ecx                    # ecx <- A+
     sarl    $4,rINST                       # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf,%cl                       # ecx <- A
     negl    %eax
-    SET_VREG %eax %ecx
+    SET_VREG %eax, %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 
@@ -3362,10 +3378,10 @@
     /* unop vA, vB */
     movzbl  rINSTbl,%ecx                    # ecx <- A+
     sarl    $4,rINST                       # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf,%cl                       # ecx <- A
     notl %eax
-    SET_VREG %eax %ecx
+    SET_VREG %eax, %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 
@@ -3377,13 +3393,13 @@
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax %ecx                      # eax <- v[B+0]
-    GET_VREG_HIGH %ecx %ecx                 # ecx <- v[B+1]
+    GET_VREG %eax, %ecx                     # eax <- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
     negl    %eax
     adcl    $0, %ecx
     negl    %ecx
-    SET_VREG %eax rINST                     # v[A+0] <- eax
-    SET_VREG_HIGH %ecx rINST                # v[A+1] <- ecx
+    SET_VREG %eax, rINST                    # v[A+0] <- eax
+    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 
@@ -3395,12 +3411,12 @@
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax %ecx                      # eax <- v[B+0]
-    GET_VREG_HIGH %ecx %ecx                 # ecx <- v[B+1]
+    GET_VREG %eax, %ecx                     # eax <- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
     notl    %eax
     notl    %ecx
-    SET_VREG %eax rINST                     # v[A+0] <- eax
-    SET_VREG_HIGH %ecx rINST                # v[A+1] <- ecx
+    SET_VREG %eax, rINST                    # v[A+0] <- eax
+    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 /* ------------------------------ */
@@ -3456,12 +3472,12 @@
     /* int to long vA, vB */
     movzbl  rINSTbl, %eax                   # eax <- +A
     sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     andb    $0xf, rINSTbl                  # rINST <- A
     movl    rIBASE, %ecx                    # cltd trashes rIBASE/edx
     cltd                                    # rINST:eax<- sssssssBBBBBBBB
-    SET_VREG_HIGH rIBASE rINST              # v[A+1] <- rIBASE
-    SET_VREG %eax rINST                     # v[A+0] <- %eax
+    SET_VREG_HIGH rIBASE, rINST             # v[A+1] <- rIBASE
+    SET_VREG %eax, rINST                    # v[A+0] <- %eax
     movl    %ecx, rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -3523,11 +3539,11 @@
     movzbl  rINSTbl, %eax                   # eax <- BA
     andb    $0xf, %al                      # eax <- A
     shrl    $4, rINST                      # rINST <- B
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     .if 0
-    SET_VREG_OBJECT rINST %eax              # fp[A] <- fp[B]
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
     .else
-    SET_VREG rINST %eax                     # fp[A] <- fp[B]
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -3904,10 +3920,10 @@
     /* unop vA, vB */
     movzbl  rINSTbl,%ecx                    # ecx <- A+
     sarl    $4,rINST                       # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf,%cl                       # ecx <- A
     movsbl  %al, %eax
-    SET_VREG %eax %ecx
+    SET_VREG %eax, %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 
@@ -3923,10 +3939,10 @@
     /* unop vA, vB */
     movzbl  rINSTbl,%ecx                    # ecx <- A+
     sarl    $4,rINST                       # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf,%cl                       # ecx <- A
     movzwl  %ax,%eax
-    SET_VREG %eax %ecx
+    SET_VREG %eax, %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 
@@ -3942,10 +3958,10 @@
     /* unop vA, vB */
     movzbl  rINSTbl,%ecx                    # ecx <- A+
     sarl    $4,rINST                       # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf,%cl                       # ecx <- A
     movswl %ax, %eax
-    SET_VREG %eax %ecx
+    SET_VREG %eax, %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 
@@ -3966,9 +3982,9 @@
     /* binop vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
     addl    (rFP,%ecx,4), %eax                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -3989,9 +4005,9 @@
     /* binop vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
     subl    (rFP,%ecx,4), %eax                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4005,11 +4021,11 @@
     /* mul vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
     mov     rIBASE, LOCAL0(%esp)
     imull   (rFP,%ecx,4), %eax              # trashes rIBASE/edx
     mov     LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -4024,8 +4040,8 @@
     /* div/rem vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
-    GET_VREG %ecx %ecx                      # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- vBB
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
     mov     rIBASE, LOCAL0(%esp)
     testl   %ecx, %ecx
     je      common_errDivideByZero
@@ -4061,7 +4077,7 @@
     xorl    %edx, %edx                      # Clear %edx before divide
     div     %cx
 .Lop_div_int_finish:
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -4078,8 +4094,8 @@
     /* div/rem vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
-    GET_VREG %ecx %ecx                      # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- vBB
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
     mov     rIBASE, LOCAL0(%esp)
     testl   %ecx, %ecx
     je      common_errDivideByZero
@@ -4115,7 +4131,7 @@
     xorl    %edx, %edx                      # Clear %edx before divide
     div     %cx
 .Lop_rem_int_finish:
-    SET_VREG rIBASE rINST
+    SET_VREG rIBASE, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -4137,9 +4153,9 @@
     /* binop vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
     andl    (rFP,%ecx,4), %eax                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4160,9 +4176,9 @@
     /* binop vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
     orl     (rFP,%ecx,4), %eax                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4183,9 +4199,9 @@
     /* binop vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
     xorl    (rFP,%ecx,4), %eax                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4201,10 +4217,10 @@
     /* binop vAA, vBB, vCC */
     movzbl  2(rPC),%eax                     # eax <- BB
     movzbl  3(rPC),%ecx                     # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     sall    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4220,10 +4236,10 @@
     /* binop vAA, vBB, vCC */
     movzbl  2(rPC),%eax                     # eax <- BB
     movzbl  3(rPC),%ecx                     # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     sarl    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4239,10 +4255,10 @@
     /* binop vAA, vBB, vCC */
     movzbl  2(rPC),%eax                     # eax <- BB
     movzbl  3(rPC),%ecx                     # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     shrl    %cl, %eax                                  # ex: addl    %ecx,%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4255,16 +4271,16 @@
  * Generic 64-bit binary operation.
  */
     /* binop vAA, vBB, vCC */
-    movzbl  2(rPC),%eax                     # eax <- BB
-    movzbl  3(rPC),%ecx                     # ecx <- CC
-    movl    rIBASE,LOCAL0(%esp)             # save rIBASE
-    GET_VREG rIBASE %eax                    # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax %eax                 # eax <- v[BB+1]
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
+    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
+    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
     addl    (rFP,%ecx,4), rIBASE                                 # ex: addl   (rFP,%ecx,4),rIBASE
     adcl    4(rFP,%ecx,4), %eax                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE rINST                   # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp),rIBASE             # restore rIBASE
-    SET_VREG_HIGH %eax rINST                # v[AA+1] <- eax
+    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
+    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4277,16 +4293,16 @@
  * Generic 64-bit binary operation.
  */
     /* binop vAA, vBB, vCC */
-    movzbl  2(rPC),%eax                     # eax <- BB
-    movzbl  3(rPC),%ecx                     # ecx <- CC
-    movl    rIBASE,LOCAL0(%esp)             # save rIBASE
-    GET_VREG rIBASE %eax                    # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax %eax                 # eax <- v[BB+1]
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
+    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
+    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
     subl    (rFP,%ecx,4), rIBASE                                 # ex: addl   (rFP,%ecx,4),rIBASE
     sbbl    4(rFP,%ecx,4), %eax                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE rINST                   # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp),rIBASE             # restore rIBASE
-    SET_VREG_HIGH %eax rINST                # v[AA+1] <- eax
+    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
+    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4323,9 +4339,9 @@
     mov     LOCAL0(%esp), rPC               # restore Interpreter PC
     mov     LOCAL1(%esp), rFP               # restore FP
     leal    (%ecx,rIBASE), rIBASE           # full result now in rIBASE:%eax
-    SET_VREG_HIGH rIBASE rINST              # v[B+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[B+1] <- rIBASE
     mov     LOCAL2(%esp), rIBASE            # restore IBASE
-    SET_VREG %eax rINST                     # v[B] <- eax
+    SET_VREG %eax, rINST                    # v[B] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -4340,18 +4356,18 @@
     mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
     mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
     movzbl  3(rPC), %eax                    # eax <- CC
-    GET_VREG %ecx %eax
-    GET_VREG_HIGH %ebx %eax
+    GET_VREG %ecx, %eax
+    GET_VREG_HIGH %ebx, %eax
     movl    %ecx, %edx
     orl     %ebx, %ecx
     jz      common_errDivideByZero
     movzbl  2(rPC), %eax                    # eax <- BB
-    GET_VREG_HIGH %ecx %eax
-    GET_VREG %eax %eax
-    call    art_quick_ldiv
+    GET_VREG_HIGH %ecx, %eax
+    GET_VREG %eax, %eax
+    call    SYMBOL(art_quick_ldiv)
     mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE rINST
-    SET_VREG %eax rINST
+    SET_VREG_HIGH rIBASE, rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -4368,18 +4384,18 @@
     mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
     mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
     movzbl  3(rPC), %eax                    # eax <- CC
-    GET_VREG %ecx %eax
-    GET_VREG_HIGH %ebx %eax
+    GET_VREG %ecx, %eax
+    GET_VREG_HIGH %ebx, %eax
     movl    %ecx, %edx
     orl     %ebx, %ecx
     jz      common_errDivideByZero
     movzbl  2(rPC), %eax                    # eax <- BB
-    GET_VREG_HIGH %ecx %eax
-    GET_VREG %eax %eax
-    call    art_quick_lmod
+    GET_VREG_HIGH %ecx, %eax
+    GET_VREG %eax, %eax
+    call    SYMBOL(art_quick_lmod)
     mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE rINST
-    SET_VREG %eax rINST
+    SET_VREG_HIGH rIBASE, rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -4393,16 +4409,16 @@
  * Generic 64-bit binary operation.
  */
     /* binop vAA, vBB, vCC */
-    movzbl  2(rPC),%eax                     # eax <- BB
-    movzbl  3(rPC),%ecx                     # ecx <- CC
-    movl    rIBASE,LOCAL0(%esp)             # save rIBASE
-    GET_VREG rIBASE %eax                    # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax %eax                 # eax <- v[BB+1]
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
+    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
+    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
     andl    (rFP,%ecx,4), rIBASE                                 # ex: addl   (rFP,%ecx,4),rIBASE
     andl    4(rFP,%ecx,4), %eax                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE rINST                   # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp),rIBASE             # restore rIBASE
-    SET_VREG_HIGH %eax rINST                # v[AA+1] <- eax
+    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
+    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4415,16 +4431,16 @@
  * Generic 64-bit binary operation.
  */
     /* binop vAA, vBB, vCC */
-    movzbl  2(rPC),%eax                     # eax <- BB
-    movzbl  3(rPC),%ecx                     # ecx <- CC
-    movl    rIBASE,LOCAL0(%esp)             # save rIBASE
-    GET_VREG rIBASE %eax                    # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax %eax                 # eax <- v[BB+1]
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
+    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
+    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
     orl     (rFP,%ecx,4), rIBASE                                 # ex: addl   (rFP,%ecx,4),rIBASE
     orl     4(rFP,%ecx,4), %eax                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE rINST                   # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp),rIBASE             # restore rIBASE
-    SET_VREG_HIGH %eax rINST                # v[AA+1] <- eax
+    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
+    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4437,16 +4453,16 @@
  * Generic 64-bit binary operation.
  */
     /* binop vAA, vBB, vCC */
-    movzbl  2(rPC),%eax                     # eax <- BB
-    movzbl  3(rPC),%ecx                     # ecx <- CC
-    movl    rIBASE,LOCAL0(%esp)             # save rIBASE
-    GET_VREG rIBASE %eax                    # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax %eax                 # eax <- v[BB+1]
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
+    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
+    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
     xorl    (rFP,%ecx,4), rIBASE                                 # ex: addl   (rFP,%ecx,4),rIBASE
     xorl    4(rFP,%ecx,4), %eax                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE rINST                   # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp),rIBASE             # restore rIBASE
-    SET_VREG_HIGH %eax rINST                # v[AA+1] <- eax
+    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
+    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -4469,9 +4485,9 @@
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE %eax               # ecx <- v[BB+1]
-    GET_VREG %ecx %ecx                      # ecx <- vCC
-    GET_VREG %eax %eax                      # eax <- v[BB+0]
+    GET_VREG_HIGH rIBASE, %eax              # ecx <- v[BB+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- v[BB+0]
     shldl   %eax,rIBASE
     sall    %cl, %eax
     testb   $32, %cl
@@ -4479,9 +4495,9 @@
     movl    %eax, rIBASE
     xorl    %eax, %eax
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[AA+0] <- %eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- %eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -4503,9 +4519,9 @@
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE %eax               # rIBASE<- v[BB+1]
-    GET_VREG %ecx %ecx                      # ecx <- vCC
-    GET_VREG %eax %eax                      # eax <- v[BB+0]
+    GET_VREG_HIGH rIBASE, %eax              # rIBASE<- v[BB+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- v[BB+0]
     shrdl   rIBASE, %eax
     sarl    %cl, rIBASE
     testb   $32, %cl
@@ -4513,9 +4529,9 @@
     movl    rIBASE, %eax
     sarl    $31, rIBASE
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -4537,9 +4553,9 @@
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE %eax               # rIBASE <- v[BB+1]
-    GET_VREG %ecx %ecx                      # ecx <- vCC
-    GET_VREG %eax %eax                      # eax <- v[BB+0]
+    GET_VREG_HIGH rIBASE, %eax              # rIBASE <- v[BB+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- v[BB+0]
     shrdl   rIBASE, %eax
     shrl    %cl, rIBASE
     testb   $32, %cl
@@ -4547,9 +4563,9 @@
     movl    rIBASE, %eax
     xorl    rIBASE, rIBASE
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[BB+0] <- eax
+    SET_VREG %eax, rINST                    # v[BB+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -4728,7 +4744,7 @@
     /* binop/2addr vA, vB */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf, %cl                      # ecx <- A
     addl    %eax, (rFP,%ecx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
     CLEAR_REF %ecx
@@ -4753,7 +4769,7 @@
     /* binop/2addr vA, vB */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf, %cl                      # ecx <- A
     subl    %eax, (rFP,%ecx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
     CLEAR_REF %ecx
@@ -4767,12 +4783,12 @@
     /* mul vA, vB */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf, %cl                      # ecx <- A
     mov     rIBASE, LOCAL0(%esp)
     imull   (rFP,%ecx,4), %eax              # trashes rIBASE/edx
     mov     LOCAL0(%esp), rIBASE
-    SET_VREG %eax %ecx
+    SET_VREG %eax, %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 /* ------------------------------ */
@@ -4788,9 +4804,9 @@
     movzx   rINSTbl, %ecx                   # eax <- BA
     mov     rIBASE, LOCAL0(%esp)
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- vBB
+    GET_VREG %eax, rINST                    # eax <- vBB
     testl   %ecx, %ecx
     je      common_errDivideByZero
     cmpl    $-1, %ecx
@@ -4798,14 +4814,14 @@
     cmpl    $0x80000000, %eax
     jne     .Lop_div_int_2addr_continue_div2addr
     movl    $0x80000000, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 .Lop_div_int_2addr_continue_div2addr:
     cltd
     idivl   %ecx
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -4823,9 +4839,9 @@
     movzx   rINSTbl, %ecx                   # eax <- BA
     mov     rIBASE, LOCAL0(%esp)
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- vBB
+    GET_VREG %eax, rINST                    # eax <- vBB
     testl   %ecx, %ecx
     je      common_errDivideByZero
     cmpl    $-1, %ecx
@@ -4833,14 +4849,14 @@
     cmpl    $0x80000000, %eax
     jne     .Lop_rem_int_2addr_continue_div2addr
     movl    $0, rIBASE
-    SET_VREG rIBASE rINST
+    SET_VREG rIBASE, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 .Lop_rem_int_2addr_continue_div2addr:
     cltd
     idivl   %ecx
-    SET_VREG rIBASE rINST
+    SET_VREG rIBASE, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -4863,7 +4879,7 @@
     /* binop/2addr vA, vB */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf, %cl                      # ecx <- A
     andl    %eax, (rFP,%ecx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
     CLEAR_REF %ecx
@@ -4888,7 +4904,7 @@
     /* binop/2addr vA, vB */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf, %cl                      # ecx <- A
     orl     %eax, (rFP,%ecx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
     CLEAR_REF %ecx
@@ -4913,7 +4929,7 @@
     /* binop/2addr vA, vB */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     sarl    $4, rINST                      # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $0xf, %cl                      # ecx <- A
     xorl    %eax, (rFP,%ecx,4)                                  # for ex: addl   %eax,(rFP,%ecx,4)
     CLEAR_REF %ecx
@@ -4931,11 +4947,11 @@
     /* shift/2addr vA, vB */
     movzx   rINSTbl, %ecx                   # eax <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- vAA
+    GET_VREG %eax, rINST                    # eax <- vAA
     sall    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 
@@ -4950,11 +4966,11 @@
     /* shift/2addr vA, vB */
     movzx   rINSTbl, %ecx                   # eax <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- vAA
+    GET_VREG %eax, rINST                    # eax <- vAA
     sarl    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 
@@ -4969,11 +4985,11 @@
     /* shift/2addr vA, vB */
     movzx   rINSTbl, %ecx                   # eax <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- vAA
+    GET_VREG %eax, rINST                    # eax <- vAA
     shrl    %cl, %eax                                  # ex: sarl %cl, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 
@@ -4986,11 +5002,11 @@
  * Generic 64-bit binary operation.
  */
     /* binop/2addr vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx<- BA
-    sarl    $4,%ecx                        # ecx<- B
-    GET_VREG %eax %ecx                      # eax<- v[B+0]
-    GET_VREG_HIGH %ecx %ecx                 # eax<- v[B+1]
-    andb    $0xF,rINSTbl                   # rINST<- A
+    movzbl  rINSTbl, %ecx                   # ecx<- BA
+    sarl    $4, %ecx                       # ecx<- B
+    GET_VREG %eax, %ecx                     # eax<- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
+    andb    $0xF, rINSTbl                  # rINST<- A
     addl    %eax, (rFP,rINST,4)                                 # ex: addl   %eax,(rFP,rINST,4)
     adcl    %ecx, 4(rFP,rINST,4)                                 # ex: adcl   %ecx,4(rFP,rINST,4)
     CLEAR_WIDE_REF rINST
@@ -5006,11 +5022,11 @@
  * Generic 64-bit binary operation.
  */
     /* binop/2addr vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx<- BA
-    sarl    $4,%ecx                        # ecx<- B
-    GET_VREG %eax %ecx                      # eax<- v[B+0]
-    GET_VREG_HIGH %ecx %ecx                 # eax<- v[B+1]
-    andb    $0xF,rINSTbl                   # rINST<- A
+    movzbl  rINSTbl, %ecx                   # ecx<- BA
+    sarl    $4, %ecx                       # ecx<- B
+    GET_VREG %eax, %ecx                     # eax<- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
+    andb    $0xF, rINSTbl                  # rINST<- A
     subl    %eax, (rFP,rINST,4)                                 # ex: addl   %eax,(rFP,rINST,4)
     sbbl    %ecx, 4(rFP,rINST,4)                                 # ex: adcl   %ecx,4(rFP,rINST,4)
     CLEAR_WIDE_REF rINST
@@ -5072,17 +5088,17 @@
     andb    $0xf, rINSTbl                  # rINST <- A
     mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
     movl    %ebx, %ecx
-    GET_VREG %edx %eax
-    GET_VREG_HIGH %ebx %eax
+    GET_VREG %edx, %eax
+    GET_VREG_HIGH %ebx, %eax
     movl    %edx, %eax
     orl     %ebx, %eax
     jz      common_errDivideByZero
-    GET_VREG %eax %ecx
-    GET_VREG_HIGH %ecx %ecx
-    call    art_quick_ldiv
+    GET_VREG %eax, %ecx
+    GET_VREG_HIGH %ecx, %ecx
+    call    SYMBOL(art_quick_ldiv)
     mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE rINST
-    SET_VREG %eax rINST
+    SET_VREG_HIGH rIBASE, rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -5102,17 +5118,17 @@
     andb    $0xf, rINSTbl                  # rINST <- A
     mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
     movl    %ebx, %ecx
-    GET_VREG %edx %eax
-    GET_VREG_HIGH %ebx %eax
+    GET_VREG %edx, %eax
+    GET_VREG_HIGH %ebx, %eax
     movl    %edx, %eax
     orl     %ebx, %eax
     jz      common_errDivideByZero
-    GET_VREG %eax %ecx
-    GET_VREG_HIGH %ecx %ecx
-    call    art_quick_lmod
+    GET_VREG %eax, %ecx
+    GET_VREG_HIGH %ecx, %ecx
+    call    SYMBOL(art_quick_lmod)
     mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE rINST
-    SET_VREG %eax rINST
+    SET_VREG_HIGH rIBASE, rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
@@ -5126,11 +5142,11 @@
  * Generic 64-bit binary operation.
  */
     /* binop/2addr vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx<- BA
-    sarl    $4,%ecx                        # ecx<- B
-    GET_VREG %eax %ecx                      # eax<- v[B+0]
-    GET_VREG_HIGH %ecx %ecx                 # eax<- v[B+1]
-    andb    $0xF,rINSTbl                   # rINST<- A
+    movzbl  rINSTbl, %ecx                   # ecx<- BA
+    sarl    $4, %ecx                       # ecx<- B
+    GET_VREG %eax, %ecx                     # eax<- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
+    andb    $0xF, rINSTbl                  # rINST<- A
     andl    %eax, (rFP,rINST,4)                                 # ex: addl   %eax,(rFP,rINST,4)
     andl    %ecx, 4(rFP,rINST,4)                                 # ex: adcl   %ecx,4(rFP,rINST,4)
     CLEAR_WIDE_REF rINST
@@ -5146,11 +5162,11 @@
  * Generic 64-bit binary operation.
  */
     /* binop/2addr vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx<- BA
-    sarl    $4,%ecx                        # ecx<- B
-    GET_VREG %eax %ecx                      # eax<- v[B+0]
-    GET_VREG_HIGH %ecx %ecx                 # eax<- v[B+1]
-    andb    $0xF,rINSTbl                   # rINST<- A
+    movzbl  rINSTbl, %ecx                   # ecx<- BA
+    sarl    $4, %ecx                       # ecx<- B
+    GET_VREG %eax, %ecx                     # eax<- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
+    andb    $0xF, rINSTbl                  # rINST<- A
     orl     %eax, (rFP,rINST,4)                                 # ex: addl   %eax,(rFP,rINST,4)
     orl     %ecx, 4(rFP,rINST,4)                                 # ex: adcl   %ecx,4(rFP,rINST,4)
     CLEAR_WIDE_REF rINST
@@ -5166,11 +5182,11 @@
  * Generic 64-bit binary operation.
  */
     /* binop/2addr vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx<- BA
-    sarl    $4,%ecx                        # ecx<- B
-    GET_VREG %eax %ecx                      # eax<- v[B+0]
-    GET_VREG_HIGH %ecx %ecx                 # eax<- v[B+1]
-    andb    $0xF,rINSTbl                   # rINST<- A
+    movzbl  rINSTbl, %ecx                   # ecx<- BA
+    sarl    $4, %ecx                       # ecx<- B
+    GET_VREG %eax, %ecx                     # eax<- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
+    andb    $0xF, rINSTbl                  # rINST<- A
     xorl    %eax, (rFP,rINST,4)                                 # ex: addl   %eax,(rFP,rINST,4)
     xorl    %ecx, 4(rFP,rINST,4)                                 # ex: adcl   %ecx,4(rFP,rINST,4)
     CLEAR_WIDE_REF rINST
@@ -5191,11 +5207,11 @@
     /* rINSTw gets AA */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- v[AA+0]
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
     sarl    $4, %ecx                       # ecx <- B
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE rINST              # rIBASE <- v[AA+1]
-    GET_VREG %ecx %ecx                      # ecx <- vBB
+    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vBB
     shldl   %eax, rIBASE
     sall    %cl, %eax
     testb   $32, %cl
@@ -5203,9 +5219,9 @@
     movl    %eax, rIBASE
     xorl    %eax, %eax
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 /* ------------------------------ */
@@ -5222,11 +5238,11 @@
     /* rINSTw gets AA */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- v[AA+0]
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
     sarl    $4, %ecx                       # ecx <- B
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE rINST              # rIBASE <- v[AA+1]
-    GET_VREG %ecx %ecx                      # ecx <- vBB
+    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vBB
     shrdl   rIBASE, %eax
     sarl    %cl, rIBASE
     testb   $32, %cl
@@ -5234,9 +5250,9 @@
     movl    rIBASE, %eax
     sarl    $31, rIBASE
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 /* ------------------------------ */
@@ -5253,11 +5269,11 @@
     /* rINSTw gets AA */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- v[AA+0]
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
     sarl    $4, %ecx                       # ecx <- B
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE rINST              # rIBASE <- v[AA+1]
-    GET_VREG %ecx %ecx                      # ecx <- vBB
+    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vBB
     shrdl   rIBASE, %eax
     shrl    %cl, rIBASE
     testb   $32, %cl
@@ -5265,9 +5281,9 @@
     movl    rIBASE, %eax
     xorl    rIBASE, rIBASE
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 /* ------------------------------ */
@@ -5455,11 +5471,11 @@
     /* binop/lit16 vA, vB, #+CCCC */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $0xf, rINSTbl                  # rINST <- A
     addl    %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5481,11 +5497,11 @@
     /* binop/lit16 vA, vB, #+CCCC */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $0xf, rINSTbl                  # rINST <- A
     subl    %eax, %ecx                                  # for example: addl %ecx, %eax
-    SET_VREG %ecx rINST
+    SET_VREG %ecx, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5497,13 +5513,13 @@
     /* Need A in rINST, ssssCCCC in ecx, vB in eax */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $0xf, rINSTbl                  # rINST <- A
     mov     rIBASE, LOCAL0(%esp)
     imull   %ecx, %eax                      # trashes rIBASE/edx
     mov     LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -5519,7 +5535,7 @@
     /* Need A in rINST, ssssCCCC in ecx, vB in eax */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $0xf, rINSTbl                  # rINST <- A
     testl   %ecx, %ecx
@@ -5529,14 +5545,14 @@
     cmpl    $0x80000000, %eax
     jne     .Lop_div_int_lit16_continue_div
     movl    $0x80000000, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 .Lop_div_int_lit16_continue_div:
     mov     rIBASE, LOCAL0(%esp)
     cltd
     idivl   %ecx
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -5554,7 +5570,7 @@
     /* Need A in rINST, ssssCCCC in ecx, vB in eax */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $0xf, rINSTbl                  # rINST <- A
     testl   %ecx, %ecx
@@ -5564,14 +5580,14 @@
     cmpl    $0x80000000, %eax
     jne     .Lop_rem_int_lit16_continue_div
     movl    $0, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 .Lop_rem_int_lit16_continue_div:
     mov     rIBASE, LOCAL0(%esp)
     cltd
     idivl   %ecx
-    SET_VREG rIBASE rINST
+    SET_VREG rIBASE, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -5593,11 +5609,11 @@
     /* binop/lit16 vA, vB, #+CCCC */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $0xf, rINSTbl                  # rINST <- A
     andl    %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5618,11 +5634,11 @@
     /* binop/lit16 vA, vB, #+CCCC */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $0xf, rINSTbl                  # rINST <- A
     orl     %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5643,11 +5659,11 @@
     /* binop/lit16 vA, vB, #+CCCC */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $0xf, rINSTbl                  # rINST <- A
     xorl    %ecx, %eax                                  # for example: addl %ecx, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5669,9 +5685,9 @@
     /* binop/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax %eax                      # eax <- rBB
+    GET_VREG %eax, %eax                     # eax <- rBB
     addl    %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5693,9 +5709,9 @@
     /* binop/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax %eax                      # eax <- rBB
+    GET_VREG %eax, %eax                     # eax <- rBB
     subl    %eax, %ecx                                  # ex: addl %ecx,%eax
-    SET_VREG %ecx rINST
+    SET_VREG %ecx, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5706,11 +5722,11 @@
     /* mul/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax  %eax                    # eax <- rBB
+    GET_VREG  %eax, %eax                    # eax <- rBB
     mov     rIBASE, LOCAL0(%esp)
     imull   %ecx, %eax                      # trashes rIBASE/edx
     mov     LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -5725,7 +5741,7 @@
     /* div/rem/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax %eax                     # eax <- rBB
+    GET_VREG  %eax, %eax                    # eax <- rBB
     testl   %ecx, %ecx
     je      common_errDivideByZero
     cmpl    $0x80000000, %eax
@@ -5733,14 +5749,14 @@
     cmpl    $-1, %ecx
     jne     .Lop_div_int_lit8_continue_div
     movl    $0x80000000, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 .Lop_div_int_lit8_continue_div:
     mov     rIBASE, LOCAL0(%esp)
     cltd
     idivl   %ecx
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -5757,7 +5773,7 @@
     /* div/rem/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax %eax                     # eax <- rBB
+    GET_VREG  %eax, %eax                    # eax <- rBB
     testl   %ecx, %ecx
     je      common_errDivideByZero
     cmpl    $0x80000000, %eax
@@ -5765,14 +5781,14 @@
     cmpl    $-1, %ecx
     jne     .Lop_rem_int_lit8_continue_div
     movl    $0, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 .Lop_rem_int_lit8_continue_div:
     mov     rIBASE, LOCAL0(%esp)
     cltd
     idivl   %ecx
-    SET_VREG rIBASE rINST
+    SET_VREG rIBASE, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -5795,9 +5811,9 @@
     /* binop/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax %eax                      # eax <- rBB
+    GET_VREG %eax, %eax                     # eax <- rBB
     andl    %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5819,9 +5835,9 @@
     /* binop/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax %eax                      # eax <- rBB
+    GET_VREG %eax, %eax                     # eax <- rBB
     orl     %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5843,9 +5859,9 @@
     /* binop/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax %eax                      # eax <- rBB
+    GET_VREG %eax, %eax                     # eax <- rBB
     xorl    %ecx, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5867,9 +5883,9 @@
     /* binop/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax %eax                      # eax <- rBB
+    GET_VREG %eax, %eax                     # eax <- rBB
     sall    %cl, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5891,9 +5907,9 @@
     /* binop/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax %eax                      # eax <- rBB
+    GET_VREG %eax, %eax                     # eax <- rBB
     sarl    %cl, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5915,9 +5931,9 @@
     /* binop/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax %eax                      # eax <- rBB
+    GET_VREG %eax, %eax                     # eax <- rBB
     shrl    %cl, %eax                                  # ex: addl %ecx,%eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -5929,13 +5945,13 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     movl (%ecx,%eax,1), %eax
     andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -5945,13 +5961,13 @@
     /* iget-wide-quick vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     movq    (%ecx,%eax,1), %xmm0
     andb    $0xf, rINSTbl                  # rINST <- A
-    SET_WIDE_FP_VREG %xmm0 rINST
+    SET_WIDE_FP_VREG %xmm0, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -5962,18 +5978,18 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     movl    %ecx, OUT_ARG0(%esp)
     movl    %eax, OUT_ARG1(%esp)
     EXPORT_PC
-    call    artIGetObjectFromMterp          # (obj, offset)
+    call    SYMBOL(artIGetObjectFromMterp)  # (obj, offset)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 /* ------------------------------ */
@@ -5984,11 +6000,11 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST rINST                    # rINST <- v[A]
+    GET_VREG rINST, rINST                   # rINST <- v[A]
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     movl    rINST, (%ecx,%eax,1)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6000,13 +6016,13 @@
     /* iput-wide-quick vA, vB, offset@CCCC */
     movzbl    rINSTbl, %ecx                 # ecx<- BA
     sarl      $4, %ecx                     # ecx<- B
-    GET_VREG  %ecx %ecx                     # vB (object we're operating on)
+    GET_VREG  %ecx, %ecx                    # vB (object we're operating on)
     testl     %ecx, %ecx                    # is object null?
     je        common_errNullObject
     movzwl    2(rPC), %eax                  # eax<- field byte offset
     leal      (%ecx,%eax,1), %ecx           # ecx<- Address of 64-bit target
     andb      $0xf, rINSTbl                # rINST<- A
-    GET_WIDE_FP_VREG %xmm0 rINST            # xmm0<- fp[A]/fp[A+1]
+    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0<- fp[A]/fp[A+1]
     movq      %xmm0, (%ecx)                 # obj.field<- r0/r1
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
@@ -6020,7 +6036,7 @@
     movl    rPC, OUT_ARG1(%esp)
     REFRESH_INST 232
     movl    rINST, OUT_ARG2(%esp)
-    call    MterpIputObjectQuick
+    call    SYMBOL(MterpIputObjectQuick)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -6045,7 +6061,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 233
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeVirtualQuick
+    call    SYMBOL(MterpInvokeVirtualQuick)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -6071,7 +6087,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST 234
     movl    rINST, OUT_ARG3(%esp)
-    call    MterpInvokeVirtualQuickRange
+    call    SYMBOL(MterpInvokeVirtualQuickRange)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
@@ -6087,11 +6103,11 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST rINST                    # rINST <- v[A]
+    GET_VREG rINST, rINST                   # rINST <- v[A]
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     movb    rINSTbl, (%ecx,%eax,1)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6106,11 +6122,11 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST rINST                    # rINST <- v[A]
+    GET_VREG rINST, rINST                   # rINST <- v[A]
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     movb    rINSTbl, (%ecx,%eax,1)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6125,11 +6141,11 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST rINST                    # rINST <- v[A]
+    GET_VREG rINST, rINST                   # rINST <- v[A]
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     movw    rINSTw, (%ecx,%eax,1)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6144,11 +6160,11 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     andb    $0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST rINST                    # rINST <- v[A]
+    GET_VREG rINST, rINST                   # rINST <- v[A]
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     movw    rINSTw, (%ecx,%eax,1)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6163,13 +6179,13 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     movsbl (%ecx,%eax,1), %eax
     andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -6182,13 +6198,13 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     movsbl (%ecx,%eax,1), %eax
     andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -6201,13 +6217,13 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     movzwl (%ecx,%eax,1), %eax
     andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -6220,13 +6236,13 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     movswl (%ecx,%eax,1), %eax
     andb    $0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 
@@ -6350,31 +6366,31 @@
 
 
     .balign 128
-    .size   artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
+    SIZE(SYMBOL(artMterpAsmInstructionStart),SYMBOL(artMterpAsmInstructionStart))
+    .global SYMBOL(artMterpAsmInstructionEnd)
+SYMBOL(artMterpAsmInstructionEnd):
 
 /*
  * ===========================================================================
  *  Sister implementations
  * ===========================================================================
  */
-    .global artMterpAsmSisterStart
-    .type   artMterpAsmSisterStart, %function
+    .global SYMBOL(artMterpAsmSisterStart)
+    FUNCTION_TYPE(SYMBOL(artMterpAsmSisterStart))
     .text
     .balign 4
-artMterpAsmSisterStart:
+SYMBOL(artMterpAsmSisterStart):
 
-    .size   artMterpAsmSisterStart, .-artMterpAsmSisterStart
-    .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
+    SIZE(SYMBOL(artMterpAsmSisterStart),SYMBOL(artMterpAsmSisterStart))
+    .global SYMBOL(artMterpAsmSisterEnd)
+SYMBOL(artMterpAsmSisterEnd):
 
 
-    .global artMterpAsmAltInstructionStart
-    .type   artMterpAsmAltInstructionStart, %function
+    .global SYMBOL(artMterpAsmAltInstructionStart)
+    FUNCTION_TYPE(SYMBOL(artMterpAsmAltInstructionStart))
     .text
 
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
+SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
 /* ------------------------------ */
     .balign 128
 .L_ALT_op_nop: /* 0x00 */
@@ -6396,7 +6412,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(0*128)
 
@@ -6421,7 +6437,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(1*128)
 
@@ -6446,7 +6462,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(2*128)
 
@@ -6471,7 +6487,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(3*128)
 
@@ -6496,7 +6512,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(4*128)
 
@@ -6521,7 +6537,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(5*128)
 
@@ -6546,7 +6562,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(6*128)
 
@@ -6571,7 +6587,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(7*128)
 
@@ -6596,7 +6612,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(8*128)
 
@@ -6621,7 +6637,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(9*128)
 
@@ -6646,7 +6662,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(10*128)
 
@@ -6671,7 +6687,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(11*128)
 
@@ -6696,7 +6712,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(12*128)
 
@@ -6721,7 +6737,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(13*128)
 
@@ -6746,7 +6762,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(14*128)
 
@@ -6771,7 +6787,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(15*128)
 
@@ -6796,7 +6812,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(16*128)
 
@@ -6821,7 +6837,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(17*128)
 
@@ -6846,7 +6862,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(18*128)
 
@@ -6871,7 +6887,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(19*128)
 
@@ -6896,7 +6912,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(20*128)
 
@@ -6921,7 +6937,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(21*128)
 
@@ -6946,7 +6962,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(22*128)
 
@@ -6971,7 +6987,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(23*128)
 
@@ -6996,7 +7012,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(24*128)
 
@@ -7021,7 +7037,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(25*128)
 
@@ -7046,7 +7062,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(26*128)
 
@@ -7071,7 +7087,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(27*128)
 
@@ -7096,7 +7112,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(28*128)
 
@@ -7121,7 +7137,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(29*128)
 
@@ -7146,7 +7162,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(30*128)
 
@@ -7171,7 +7187,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(31*128)
 
@@ -7196,7 +7212,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(32*128)
 
@@ -7221,7 +7237,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(33*128)
 
@@ -7246,7 +7262,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(34*128)
 
@@ -7271,7 +7287,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(35*128)
 
@@ -7296,7 +7312,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(36*128)
 
@@ -7321,7 +7337,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(37*128)
 
@@ -7346,7 +7362,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(38*128)
 
@@ -7371,7 +7387,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(39*128)
 
@@ -7396,7 +7412,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(40*128)
 
@@ -7421,7 +7437,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(41*128)
 
@@ -7446,7 +7462,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(42*128)
 
@@ -7471,7 +7487,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(43*128)
 
@@ -7496,7 +7512,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(44*128)
 
@@ -7521,7 +7537,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(45*128)
 
@@ -7546,7 +7562,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(46*128)
 
@@ -7571,7 +7587,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(47*128)
 
@@ -7596,7 +7612,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(48*128)
 
@@ -7621,7 +7637,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(49*128)
 
@@ -7646,7 +7662,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(50*128)
 
@@ -7671,7 +7687,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(51*128)
 
@@ -7696,7 +7712,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(52*128)
 
@@ -7721,7 +7737,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(53*128)
 
@@ -7746,7 +7762,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(54*128)
 
@@ -7771,7 +7787,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(55*128)
 
@@ -7796,7 +7812,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(56*128)
 
@@ -7821,7 +7837,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(57*128)
 
@@ -7846,7 +7862,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(58*128)
 
@@ -7871,7 +7887,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(59*128)
 
@@ -7896,7 +7912,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(60*128)
 
@@ -7921,7 +7937,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(61*128)
 
@@ -7946,7 +7962,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(62*128)
 
@@ -7971,7 +7987,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(63*128)
 
@@ -7996,7 +8012,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(64*128)
 
@@ -8021,7 +8037,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(65*128)
 
@@ -8046,7 +8062,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(66*128)
 
@@ -8071,7 +8087,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(67*128)
 
@@ -8096,7 +8112,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(68*128)
 
@@ -8121,7 +8137,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(69*128)
 
@@ -8146,7 +8162,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(70*128)
 
@@ -8171,7 +8187,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(71*128)
 
@@ -8196,7 +8212,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(72*128)
 
@@ -8221,7 +8237,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(73*128)
 
@@ -8246,7 +8262,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(74*128)
 
@@ -8271,7 +8287,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(75*128)
 
@@ -8296,7 +8312,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(76*128)
 
@@ -8321,7 +8337,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(77*128)
 
@@ -8346,7 +8362,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(78*128)
 
@@ -8371,7 +8387,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(79*128)
 
@@ -8396,7 +8412,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(80*128)
 
@@ -8421,7 +8437,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(81*128)
 
@@ -8446,7 +8462,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(82*128)
 
@@ -8471,7 +8487,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(83*128)
 
@@ -8496,7 +8512,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(84*128)
 
@@ -8521,7 +8537,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(85*128)
 
@@ -8546,7 +8562,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(86*128)
 
@@ -8571,7 +8587,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(87*128)
 
@@ -8596,7 +8612,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(88*128)
 
@@ -8621,7 +8637,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(89*128)
 
@@ -8646,7 +8662,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(90*128)
 
@@ -8671,7 +8687,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(91*128)
 
@@ -8696,7 +8712,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(92*128)
 
@@ -8721,7 +8737,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(93*128)
 
@@ -8746,7 +8762,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(94*128)
 
@@ -8771,7 +8787,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(95*128)
 
@@ -8796,7 +8812,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(96*128)
 
@@ -8821,7 +8837,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(97*128)
 
@@ -8846,7 +8862,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(98*128)
 
@@ -8871,7 +8887,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(99*128)
 
@@ -8896,7 +8912,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(100*128)
 
@@ -8921,7 +8937,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(101*128)
 
@@ -8946,7 +8962,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(102*128)
 
@@ -8971,7 +8987,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(103*128)
 
@@ -8996,7 +9012,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(104*128)
 
@@ -9021,7 +9037,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(105*128)
 
@@ -9046,7 +9062,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(106*128)
 
@@ -9071,7 +9087,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(107*128)
 
@@ -9096,7 +9112,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(108*128)
 
@@ -9121,7 +9137,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(109*128)
 
@@ -9146,7 +9162,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(110*128)
 
@@ -9171,7 +9187,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(111*128)
 
@@ -9196,7 +9212,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(112*128)
 
@@ -9221,7 +9237,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(113*128)
 
@@ -9246,7 +9262,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(114*128)
 
@@ -9271,7 +9287,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(115*128)
 
@@ -9296,7 +9312,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(116*128)
 
@@ -9321,7 +9337,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(117*128)
 
@@ -9346,7 +9362,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(118*128)
 
@@ -9371,7 +9387,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(119*128)
 
@@ -9396,7 +9412,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(120*128)
 
@@ -9421,7 +9437,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(121*128)
 
@@ -9446,7 +9462,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(122*128)
 
@@ -9471,7 +9487,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(123*128)
 
@@ -9496,7 +9512,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(124*128)
 
@@ -9521,7 +9537,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(125*128)
 
@@ -9546,7 +9562,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(126*128)
 
@@ -9571,7 +9587,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(127*128)
 
@@ -9596,7 +9612,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(128*128)
 
@@ -9621,7 +9637,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(129*128)
 
@@ -9646,7 +9662,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(130*128)
 
@@ -9671,7 +9687,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(131*128)
 
@@ -9696,7 +9712,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(132*128)
 
@@ -9721,7 +9737,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(133*128)
 
@@ -9746,7 +9762,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(134*128)
 
@@ -9771,7 +9787,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(135*128)
 
@@ -9796,7 +9812,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(136*128)
 
@@ -9821,7 +9837,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(137*128)
 
@@ -9846,7 +9862,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(138*128)
 
@@ -9871,7 +9887,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(139*128)
 
@@ -9896,7 +9912,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(140*128)
 
@@ -9921,7 +9937,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(141*128)
 
@@ -9946,7 +9962,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(142*128)
 
@@ -9971,7 +9987,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(143*128)
 
@@ -9996,7 +10012,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(144*128)
 
@@ -10021,7 +10037,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(145*128)
 
@@ -10046,7 +10062,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(146*128)
 
@@ -10071,7 +10087,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(147*128)
 
@@ -10096,7 +10112,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(148*128)
 
@@ -10121,7 +10137,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(149*128)
 
@@ -10146,7 +10162,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(150*128)
 
@@ -10171,7 +10187,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(151*128)
 
@@ -10196,7 +10212,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(152*128)
 
@@ -10221,7 +10237,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(153*128)
 
@@ -10246,7 +10262,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(154*128)
 
@@ -10271,7 +10287,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(155*128)
 
@@ -10296,7 +10312,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(156*128)
 
@@ -10321,7 +10337,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(157*128)
 
@@ -10346,7 +10362,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(158*128)
 
@@ -10371,7 +10387,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(159*128)
 
@@ -10396,7 +10412,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(160*128)
 
@@ -10421,7 +10437,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(161*128)
 
@@ -10446,7 +10462,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(162*128)
 
@@ -10471,7 +10487,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(163*128)
 
@@ -10496,7 +10512,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(164*128)
 
@@ -10521,7 +10537,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(165*128)
 
@@ -10546,7 +10562,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(166*128)
 
@@ -10571,7 +10587,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(167*128)
 
@@ -10596,7 +10612,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(168*128)
 
@@ -10621,7 +10637,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(169*128)
 
@@ -10646,7 +10662,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(170*128)
 
@@ -10671,7 +10687,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(171*128)
 
@@ -10696,7 +10712,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(172*128)
 
@@ -10721,7 +10737,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(173*128)
 
@@ -10746,7 +10762,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(174*128)
 
@@ -10771,7 +10787,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(175*128)
 
@@ -10796,7 +10812,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(176*128)
 
@@ -10821,7 +10837,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(177*128)
 
@@ -10846,7 +10862,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(178*128)
 
@@ -10871,7 +10887,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(179*128)
 
@@ -10896,7 +10912,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(180*128)
 
@@ -10921,7 +10937,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(181*128)
 
@@ -10946,7 +10962,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(182*128)
 
@@ -10971,7 +10987,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(183*128)
 
@@ -10996,7 +11012,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(184*128)
 
@@ -11021,7 +11037,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(185*128)
 
@@ -11046,7 +11062,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(186*128)
 
@@ -11071,7 +11087,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(187*128)
 
@@ -11096,7 +11112,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(188*128)
 
@@ -11121,7 +11137,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(189*128)
 
@@ -11146,7 +11162,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(190*128)
 
@@ -11171,7 +11187,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(191*128)
 
@@ -11196,7 +11212,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(192*128)
 
@@ -11221,7 +11237,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(193*128)
 
@@ -11246,7 +11262,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(194*128)
 
@@ -11271,7 +11287,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(195*128)
 
@@ -11296,7 +11312,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(196*128)
 
@@ -11321,7 +11337,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(197*128)
 
@@ -11346,7 +11362,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(198*128)
 
@@ -11371,7 +11387,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(199*128)
 
@@ -11396,7 +11412,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(200*128)
 
@@ -11421,7 +11437,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(201*128)
 
@@ -11446,7 +11462,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(202*128)
 
@@ -11471,7 +11487,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(203*128)
 
@@ -11496,7 +11512,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(204*128)
 
@@ -11521,7 +11537,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(205*128)
 
@@ -11546,7 +11562,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(206*128)
 
@@ -11571,7 +11587,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(207*128)
 
@@ -11596,7 +11612,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(208*128)
 
@@ -11621,7 +11637,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(209*128)
 
@@ -11646,7 +11662,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(210*128)
 
@@ -11671,7 +11687,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(211*128)
 
@@ -11696,7 +11712,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(212*128)
 
@@ -11721,7 +11737,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(213*128)
 
@@ -11746,7 +11762,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(214*128)
 
@@ -11771,7 +11787,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(215*128)
 
@@ -11796,7 +11812,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(216*128)
 
@@ -11821,7 +11837,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(217*128)
 
@@ -11846,7 +11862,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(218*128)
 
@@ -11871,7 +11887,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(219*128)
 
@@ -11896,7 +11912,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(220*128)
 
@@ -11921,7 +11937,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(221*128)
 
@@ -11946,7 +11962,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(222*128)
 
@@ -11971,7 +11987,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(223*128)
 
@@ -11996,7 +12012,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(224*128)
 
@@ -12021,7 +12037,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(225*128)
 
@@ -12046,7 +12062,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(226*128)
 
@@ -12071,7 +12087,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(227*128)
 
@@ -12096,7 +12112,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(228*128)
 
@@ -12121,7 +12137,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(229*128)
 
@@ -12146,7 +12162,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(230*128)
 
@@ -12171,7 +12187,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(231*128)
 
@@ -12196,7 +12212,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(232*128)
 
@@ -12221,7 +12237,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(233*128)
 
@@ -12246,7 +12262,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(234*128)
 
@@ -12271,7 +12287,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(235*128)
 
@@ -12296,7 +12312,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(236*128)
 
@@ -12321,7 +12337,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(237*128)
 
@@ -12346,7 +12362,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(238*128)
 
@@ -12371,7 +12387,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(239*128)
 
@@ -12396,7 +12412,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(240*128)
 
@@ -12421,7 +12437,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(241*128)
 
@@ -12446,7 +12462,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(242*128)
 
@@ -12471,7 +12487,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(243*128)
 
@@ -12496,7 +12512,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(244*128)
 
@@ -12521,7 +12537,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(245*128)
 
@@ -12546,7 +12562,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(246*128)
 
@@ -12571,7 +12587,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(247*128)
 
@@ -12596,7 +12612,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(248*128)
 
@@ -12621,7 +12637,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(249*128)
 
@@ -12646,7 +12662,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(250*128)
 
@@ -12671,7 +12687,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(251*128)
 
@@ -12696,7 +12712,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(252*128)
 
@@ -12721,7 +12737,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(253*128)
 
@@ -12746,7 +12762,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(254*128)
 
@@ -12771,14 +12787,14 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(255*128)
 
     .balign 128
-    .size   artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
-    .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
+    SIZE(SYMBOL(artMterpAsmAltInstructionStart),SYMBOL(artMterpAsmAltInstructionStart))
+    .global SYMBOL(artMterpAsmAltInstructionEnd)
+SYMBOL(artMterpAsmAltInstructionEnd):
 /* File: x86/footer.S */
 /*
  * ===========================================================================
@@ -12802,7 +12818,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogDivideByZeroException
+    call    SYMBOL(MterpLogDivideByZeroException)
 #endif
     jmp     MterpCommonFallback
 
@@ -12813,7 +12829,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogArrayIndexException
+    call    SYMBOL(MterpLogArrayIndexException)
 #endif
     jmp     MterpCommonFallback
 
@@ -12824,7 +12840,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogNegativeArraySizeException
+    call    SYMBOL(MterpLogNegativeArraySizeException)
 #endif
     jmp     MterpCommonFallback
 
@@ -12835,7 +12851,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogNoSuchMethodException
+    call    SYMBOL(MterpLogNoSuchMethodException)
 #endif
     jmp     MterpCommonFallback
 
@@ -12846,7 +12862,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogNullObjectException
+    call    SYMBOL(MterpLogNullObjectException)
 #endif
     jmp     MterpCommonFallback
 
@@ -12857,7 +12873,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG0(%esp)
-    call    MterpLogExceptionThrownException
+    call    SYMBOL(MterpLogExceptionThrownException)
 #endif
     jmp     MterpCommonFallback
 
@@ -12870,7 +12886,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     movl    THREAD_FLAGS_OFFSET(%eax), %eax
     movl    %eax, OUT_ARG2(%esp)
-    call    MterpLogSuspendFallback
+    call    SYMBOL(MterpLogSuspendFallback)
 #endif
     jmp     MterpCommonFallback
 
@@ -12895,7 +12911,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpHandleException
+    call    SYMBOL(MterpHandleException)
     testl   %eax, %eax
     jz      MterpExceptionReturn
     REFRESH_IBASE
@@ -12919,7 +12935,7 @@
     testl   $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
     REFRESH_IBASE
 1:
     GOTO_NEXT
@@ -12934,7 +12950,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogFallback
+    call    SYMBOL(MterpLogFallback)
 #endif
 MterpCommonFallback:
     xor     %eax, %eax
@@ -12965,5 +12981,5 @@
     ret
 
     .cfi_endproc
-    .size   ExecuteMterpImpl, .-ExecuteMterpImpl
+    SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
 
diff --git a/runtime/interpreter/mterp/x86/alt_stub.S b/runtime/interpreter/mterp/x86/alt_stub.S
index 6462fc5..5a91167 100644
--- a/runtime/interpreter/mterp/x86/alt_stub.S
+++ b/runtime/interpreter/mterp/x86/alt_stub.S
@@ -15,6 +15,6 @@
     movl    %ecx, OUT_ARG0(%esp)
     leal    OFF_FP_SHADOWFRAME(rFP), %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    MterpCheckBefore                # (self, shadow_frame)
+    call    SYMBOL(MterpCheckBefore)        # (self, shadow_frame)
     REFRESH_IBASE
     jmp     .L_op_nop+(${opnum}*${handler_size_bytes})
diff --git a/runtime/interpreter/mterp/x86/bincmp.S b/runtime/interpreter/mterp/x86/bincmp.S
index a9a8c3a..27cf6ea 100644
--- a/runtime/interpreter/mterp/x86/bincmp.S
+++ b/runtime/interpreter/mterp/x86/bincmp.S
@@ -8,7 +8,7 @@
     /* if-cmp vA, vB, +CCCC */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     andb    $$0xf, %cl                      # ecx <- A
-    GET_VREG %eax %ecx                      # eax <- vA
+    GET_VREG %eax, %ecx                     # eax <- vA
     sarl    $$4, rINST                      # rINST <- B
     cmpl    VREG_ADDRESS(rINST), %eax       # compare (vA, vB)
     movl    $$2, %eax                       # assume not taken
diff --git a/runtime/interpreter/mterp/x86/bindiv.S b/runtime/interpreter/mterp/x86/bindiv.S
index 742f758..bb5b319 100644
--- a/runtime/interpreter/mterp/x86/bindiv.S
+++ b/runtime/interpreter/mterp/x86/bindiv.S
@@ -6,8 +6,8 @@
     /* div/rem vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
-    GET_VREG %ecx %ecx                      # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- vBB
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
     mov     rIBASE, LOCAL0(%esp)
     testl   %ecx, %ecx
     je      common_errDivideByZero
@@ -43,6 +43,6 @@
     xorl    %edx, %edx                      # Clear %edx before divide
     div     %cx
 .L${opcode}_finish:
-    SET_VREG $result rINST
+    SET_VREG $result, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bindiv2addr.S b/runtime/interpreter/mterp/x86/bindiv2addr.S
index ee7c523..e620996 100644
--- a/runtime/interpreter/mterp/x86/bindiv2addr.S
+++ b/runtime/interpreter/mterp/x86/bindiv2addr.S
@@ -7,9 +7,9 @@
     movzx   rINSTbl, %ecx                   # eax <- BA
     mov     rIBASE, LOCAL0(%esp)
     sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- vBB
+    GET_VREG %eax, rINST                    # eax <- vBB
     testl   %ecx, %ecx
     je      common_errDivideByZero
     cmpl    $$-1, %ecx
@@ -17,13 +17,13 @@
     cmpl    $$0x80000000, %eax
     jne     .L${opcode}_continue_div2addr
     movl    $special, $result
-    SET_VREG $result rINST
+    SET_VREG $result, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 .L${opcode}_continue_div2addr:
     cltd
     idivl   %ecx
-    SET_VREG $result rINST
+    SET_VREG $result, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/bindivLit16.S b/runtime/interpreter/mterp/x86/bindivLit16.S
index a2c4334..be094ae 100644
--- a/runtime/interpreter/mterp/x86/bindivLit16.S
+++ b/runtime/interpreter/mterp/x86/bindivLit16.S
@@ -7,7 +7,7 @@
     /* Need A in rINST, ssssCCCC in ecx, vB in eax */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $$4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $$0xf, rINSTbl                  # rINST <- A
     testl   %ecx, %ecx
@@ -17,13 +17,13 @@
     cmpl    $$0x80000000, %eax
     jne     .L${opcode}_continue_div
     movl    $special, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 .L${opcode}_continue_div:
     mov     rIBASE, LOCAL0(%esp)
     cltd
     idivl   %ecx
-    SET_VREG $result rINST
+    SET_VREG $result, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bindivLit8.S b/runtime/interpreter/mterp/x86/bindivLit8.S
index 61bee06..fddb545 100644
--- a/runtime/interpreter/mterp/x86/bindivLit8.S
+++ b/runtime/interpreter/mterp/x86/bindivLit8.S
@@ -6,7 +6,7 @@
     /* div/rem/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax %eax                     # eax <- rBB
+    GET_VREG  %eax, %eax                    # eax <- rBB
     testl   %ecx, %ecx
     je      common_errDivideByZero
     cmpl    $$0x80000000, %eax
@@ -14,13 +14,13 @@
     cmpl    $$-1, %ecx
     jne     .L${opcode}_continue_div
     movl    $special, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 .L${opcode}_continue_div:
     mov     rIBASE, LOCAL0(%esp)
     cltd
     idivl   %ecx
-    SET_VREG $result rINST
+    SET_VREG $result, rINST
     mov     LOCAL0(%esp), rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop.S b/runtime/interpreter/mterp/x86/binop.S
index 5383f25..d895235 100644
--- a/runtime/interpreter/mterp/x86/binop.S
+++ b/runtime/interpreter/mterp/x86/binop.S
@@ -11,7 +11,7 @@
     /* binop vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
     $instr                                  # ex: addl    (rFP,%ecx,4),%eax
-    SET_VREG $result rINST
+    SET_VREG $result, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop1.S b/runtime/interpreter/mterp/x86/binop1.S
index cd51d0c..5049bb3 100644
--- a/runtime/interpreter/mterp/x86/binop1.S
+++ b/runtime/interpreter/mterp/x86/binop1.S
@@ -6,8 +6,8 @@
     /* binop vAA, vBB, vCC */
     movzbl  2(rPC),%eax                     # eax <- BB
     movzbl  3(rPC),%ecx                     # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     $instr                                  # ex: addl    %ecx,%eax
-    SET_VREG $result rINST
+    SET_VREG $result, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop2addr.S b/runtime/interpreter/mterp/x86/binop2addr.S
index abee4db..f126234 100644
--- a/runtime/interpreter/mterp/x86/binop2addr.S
+++ b/runtime/interpreter/mterp/x86/binop2addr.S
@@ -12,7 +12,7 @@
     /* binop/2addr vA, vB */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     sarl    $$4, rINST                      # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $$0xf, %cl                      # ecx <- A
     $instr                                  # for ex: addl   %eax,(rFP,%ecx,4)
     CLEAR_REF %ecx
diff --git a/runtime/interpreter/mterp/x86/binopLit16.S b/runtime/interpreter/mterp/x86/binopLit16.S
index 6c7fe61..2fd59de 100644
--- a/runtime/interpreter/mterp/x86/binopLit16.S
+++ b/runtime/interpreter/mterp/x86/binopLit16.S
@@ -11,9 +11,9 @@
     /* binop/lit16 vA, vB, #+CCCC */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $$4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $$0xf, rINSTbl                  # rINST <- A
     $instr                                  # for example: addl %ecx, %eax
-    SET_VREG $result rINST
+    SET_VREG $result, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopLit8.S b/runtime/interpreter/mterp/x86/binopLit8.S
index 924685d..67cead2 100644
--- a/runtime/interpreter/mterp/x86/binopLit8.S
+++ b/runtime/interpreter/mterp/x86/binopLit8.S
@@ -12,7 +12,7 @@
     /* binop/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG %eax %eax                      # eax <- rBB
+    GET_VREG %eax, %eax                     # eax <- rBB
     $instr                                  # ex: addl %ecx,%eax
-    SET_VREG $result rINST
+    SET_VREG $result, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopWide.S b/runtime/interpreter/mterp/x86/binopWide.S
index 9f7106e..da1293d 100644
--- a/runtime/interpreter/mterp/x86/binopWide.S
+++ b/runtime/interpreter/mterp/x86/binopWide.S
@@ -2,14 +2,14 @@
  * Generic 64-bit binary operation.
  */
     /* binop vAA, vBB, vCC */
-    movzbl  2(rPC),%eax                     # eax <- BB
-    movzbl  3(rPC),%ecx                     # ecx <- CC
-    movl    rIBASE,LOCAL0(%esp)             # save rIBASE
-    GET_VREG rIBASE %eax                    # rIBASE <- v[BB+0]
-    GET_VREG_HIGH %eax %eax                 # eax <- v[BB+1]
+    movzbl  2(rPC), %eax                    # eax <- BB
+    movzbl  3(rPC), %ecx                    # ecx <- CC
+    movl    rIBASE, LOCAL0(%esp)            # save rIBASE
+    GET_VREG rIBASE, %eax                   # rIBASE <- v[BB+0]
+    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1]
     $instr1                                 # ex: addl   (rFP,%ecx,4),rIBASE
     $instr2                                 # ex: adcl   4(rFP,%ecx,4),%eax
-    SET_VREG rIBASE rINST                   # v[AA+0] <- rIBASE
-    movl    LOCAL0(%esp),rIBASE             # restore rIBASE
-    SET_VREG_HIGH %eax rINST                # v[AA+1] <- eax
+    SET_VREG rIBASE, rINST                  # v[AA+0] <- rIBASE
+    movl    LOCAL0(%esp), rIBASE            # restore rIBASE
+    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopWide2addr.S b/runtime/interpreter/mterp/x86/binopWide2addr.S
index 7560af4..da816f4 100644
--- a/runtime/interpreter/mterp/x86/binopWide2addr.S
+++ b/runtime/interpreter/mterp/x86/binopWide2addr.S
@@ -2,11 +2,11 @@
  * Generic 64-bit binary operation.
  */
     /* binop/2addr vA, vB */
-    movzbl  rINSTbl,%ecx                    # ecx<- BA
-    sarl    $$4,%ecx                        # ecx<- B
-    GET_VREG %eax %ecx                      # eax<- v[B+0]
-    GET_VREG_HIGH %ecx %ecx                 # eax<- v[B+1]
-    andb    $$0xF,rINSTbl                   # rINST<- A
+    movzbl  rINSTbl, %ecx                   # ecx<- BA
+    sarl    $$4, %ecx                       # ecx<- B
+    GET_VREG %eax, %ecx                     # eax<- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # eax<- v[B+1]
+    andb    $$0xF, rINSTbl                  # rINST<- A
     $instr1                                 # ex: addl   %eax,(rFP,rINST,4)
     $instr2                                 # ex: adcl   %ecx,4(rFP,rINST,4)
     CLEAR_WIDE_REF rINST
diff --git a/runtime/interpreter/mterp/x86/entry.S b/runtime/interpreter/mterp/x86/entry.S
index a24ef70..b83f7e1 100644
--- a/runtime/interpreter/mterp/x86/entry.S
+++ b/runtime/interpreter/mterp/x86/entry.S
@@ -18,8 +18,8 @@
  */
 
     .text
-    .global ExecuteMterpImpl
-    .type   ExecuteMterpImpl, %function
+    .global SYMBOL(ExecuteMterpImpl)
+    FUNCTION_TYPE(ExecuteMterpImpl)
 
 /*
  * On entry:
@@ -30,7 +30,7 @@
  *
  */
 
-ExecuteMterpImpl:
+SYMBOL(ExecuteMterpImpl):
     .cfi_startproc
     /* Allocate frame */
     subl    $$FRAME_SIZE, %esp
diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S
index a2a36c4..385e784 100644
--- a/runtime/interpreter/mterp/x86/footer.S
+++ b/runtime/interpreter/mterp/x86/footer.S
@@ -20,7 +20,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogDivideByZeroException
+    call    SYMBOL(MterpLogDivideByZeroException)
 #endif
     jmp     MterpCommonFallback
 
@@ -31,7 +31,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogArrayIndexException
+    call    SYMBOL(MterpLogArrayIndexException)
 #endif
     jmp     MterpCommonFallback
 
@@ -42,7 +42,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogNegativeArraySizeException
+    call    SYMBOL(MterpLogNegativeArraySizeException)
 #endif
     jmp     MterpCommonFallback
 
@@ -53,7 +53,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogNoSuchMethodException
+    call    SYMBOL(MterpLogNoSuchMethodException)
 #endif
     jmp     MterpCommonFallback
 
@@ -64,7 +64,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogNullObjectException
+    call    SYMBOL(MterpLogNullObjectException)
 #endif
     jmp     MterpCommonFallback
 
@@ -75,7 +75,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG0(%esp)
-    call    MterpLogExceptionThrownException
+    call    SYMBOL(MterpLogExceptionThrownException)
 #endif
     jmp     MterpCommonFallback
 
@@ -88,7 +88,7 @@
     movl    %ecx, OUT_ARG0(%esp)
     movl    THREAD_FLAGS_OFFSET(%eax), %eax
     movl    %eax, OUT_ARG2(%esp)
-    call    MterpLogSuspendFallback
+    call    SYMBOL(MterpLogSuspendFallback)
 #endif
     jmp     MterpCommonFallback
 
@@ -113,7 +113,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpHandleException
+    call    SYMBOL(MterpHandleException)
     testl   %eax, %eax
     jz      MterpExceptionReturn
     REFRESH_IBASE
@@ -137,7 +137,7 @@
     testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
     REFRESH_IBASE
 1:
     GOTO_NEXT
@@ -152,7 +152,7 @@
     movl    %eax, OUT_ARG0(%esp)
     lea     OFF_FP_SHADOWFRAME(rFP), %ecx
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpLogFallback
+    call    SYMBOL(MterpLogFallback)
 #endif
 MterpCommonFallback:
     xor     %eax, %eax
@@ -183,4 +183,4 @@
     ret
 
     .cfi_endproc
-    .size   ExecuteMterpImpl, .-ExecuteMterpImpl
+    SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86/fpcmp.S b/runtime/interpreter/mterp/x86/fpcmp.S
index 2b98667..5f9eef9 100644
--- a/runtime/interpreter/mterp/x86/fpcmp.S
+++ b/runtime/interpreter/mterp/x86/fpcmp.S
@@ -31,5 +31,5 @@
 .L${opcode}_less:
     decl    %eax
 .L${opcode}_finish:
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/header.S b/runtime/interpreter/mterp/x86/header.S
index 2481785..0977b90 100644
--- a/runtime/interpreter/mterp/x86/header.S
+++ b/runtime/interpreter/mterp/x86/header.S
@@ -89,6 +89,22 @@
  */
 #include "asm_support.h"
 
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+    #define MACRO_LITERAL(value) $$(value)
+    #define FUNCTION_TYPE(name)
+    #define SIZE(start,end)
+    // Mac OS' symbols have an _ prefix.
+    #define SYMBOL(name) _ ## name
+#else
+    #define MACRO_LITERAL(value) $$value
+    #define FUNCTION_TYPE(name) .type name, @function
+    #define SIZE(start,end) .size start, .-end
+    #define SYMBOL(name) name
+#endif
+
 /* Frame size must be 16-byte aligned.
  * Remember about 4 bytes for return address
  */
@@ -192,7 +208,7 @@
  */
 .macro REFRESH_INST _opnum
     movb    rINSTbl, rINSTbh
-    movb    $$\_opnum, rINSTbl
+    movb    MACRO_LITERAL(\_opnum), rINSTbl
 .endm
 
 /*
@@ -208,7 +224,7 @@
 .macro GOTO_NEXT
     movzx   rINSTbl,%eax
     movzbl  rINSTbh,rINST
-    shll    $$${handler_size_bits}, %eax
+    shll    MACRO_LITERAL(${handler_size_bits}), %eax
     addl    rIBASE, %eax
     jmp     *%eax
 .endm
@@ -248,7 +264,7 @@
 
 .macro SET_VREG _reg _vreg
     movl    \_reg, (rFP,\_vreg,4)
-    movl    $$0, (rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0), (rREFS,\_vreg,4)
 .endm
 
 /* Write wide value from xmm. xmm is clobbered. */
@@ -269,14 +285,14 @@
 
 .macro SET_VREG_HIGH _reg _vreg
     movl    \_reg, 4(rFP,\_vreg,4)
-    movl    $$0, 4(rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
 .endm
 
 .macro CLEAR_REF _vreg
-    movl    $$0,  (rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
 .endm
 
 .macro CLEAR_WIDE_REF _vreg
-    movl    $$0,  (rREFS,\_vreg,4)
-    movl    $$0, 4(rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
+    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
 .endm
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
index 80f7822..054fbfd 100644
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ b/runtime/interpreter/mterp/x86/invoke.S
@@ -13,7 +13,7 @@
     movl    rPC, OUT_ARG2(%esp)
     REFRESH_INST ${opnum}
     movl    rINST, OUT_ARG3(%esp)
-    call    $helper
+    call    SYMBOL($helper)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_aget.S b/runtime/interpreter/mterp/x86/op_aget.S
index 52b5236..338386f 100644
--- a/runtime/interpreter/mterp/x86/op_aget.S
+++ b/runtime/interpreter/mterp/x86/op_aget.S
@@ -8,12 +8,12 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     $load   $data_offset(%eax,%ecx,$shift), %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aget_object.S b/runtime/interpreter/mterp/x86/op_aget_object.S
index 61f3e91..cbfb50c 100644
--- a/runtime/interpreter/mterp/x86/op_aget_object.S
+++ b/runtime/interpreter/mterp/x86/op_aget_object.S
@@ -6,15 +6,15 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecs <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecs <- vCC (requested index)
     EXPORT_PC
     movl    %eax, OUT_ARG0(%esp)
     movl    %ecx, OUT_ARG1(%esp)
-    call    artAGetObjectFromMterp          # (array, index)
+    call    SYMBOL(artAGetObjectFromMterp)  # (array, index)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
-    SET_VREG_OBJECT %eax rINST
+    SET_VREG_OBJECT %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aget_wide.S b/runtime/interpreter/mterp/x86/op_aget_wide.S
index 663adc6..92c612a 100644
--- a/runtime/interpreter/mterp/x86/op_aget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_aget_wide.S
@@ -4,13 +4,13 @@
     /* aget-wide vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     leal    MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
     movq    (%eax), %xmm0                   # xmm0 <- vBB[vCC]
-    SET_WIDE_FP_VREG %xmm0 rINST            # vAA <- xmm0
+    SET_WIDE_FP_VREG %xmm0, rINST           # vAA <- xmm0
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aput.S b/runtime/interpreter/mterp/x86/op_aput.S
index 2ea465d..9d8c52d 100644
--- a/runtime/interpreter/mterp/x86/op_aput.S
+++ b/runtime/interpreter/mterp/x86/op_aput.S
@@ -8,13 +8,13 @@
     /* op vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     leal    $data_offset(%eax,%ecx,$shift), %eax
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     $store  $reg, (%eax)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aput_object.S b/runtime/interpreter/mterp/x86/op_aput_object.S
index 2af5acb..9cfc221 100644
--- a/runtime/interpreter/mterp/x86/op_aput_object.S
+++ b/runtime/interpreter/mterp/x86/op_aput_object.S
@@ -8,7 +8,7 @@
     movl    rPC, OUT_ARG1(%esp)
     REFRESH_INST ${opnum}
     movl    rINST, OUT_ARG2(%esp)
-    call    MterpAputObject            # (array, index)
+    call    SYMBOL(MterpAputObject)         # (array, index)
     REFRESH_IBASE
     testl   %eax, %eax
     jz      MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_aput_wide.S b/runtime/interpreter/mterp/x86/op_aput_wide.S
index 7a33371..43ef64a 100644
--- a/runtime/interpreter/mterp/x86/op_aput_wide.S
+++ b/runtime/interpreter/mterp/x86/op_aput_wide.S
@@ -5,13 +5,13 @@
     /* aput-wide vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB (array object)
-    GET_VREG %ecx %ecx                      # ecx <- vCC (requested index)
+    GET_VREG %eax, %eax                     # eax <- vBB (array object)
+    GET_VREG %ecx, %ecx                     # ecx <- vCC (requested index)
     testl   %eax, %eax                      # null array object?
     je      common_errNullObject            # bail if so
     cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
     jae     common_errArrayIndex            # index >= length, bail.
     leal    MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
-    GET_WIDE_FP_VREG %xmm0 rINST            # xmm0 <- vAA
+    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0 <- vAA
     movq    %xmm0, (%eax)                   # vBB[vCC] <- xmm0
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_array_length.S b/runtime/interpreter/mterp/x86/op_array_length.S
index 3e42a7c..60ed80b 100644
--- a/runtime/interpreter/mterp/x86/op_array_length.S
+++ b/runtime/interpreter/mterp/x86/op_array_length.S
@@ -3,10 +3,10 @@
  */
     mov     rINST, %eax                     # eax <- BA
     sarl    $$4, rINST                      # rINST <- B
-    GET_VREG %ecx rINST                     # ecx <- vB (object ref)
+    GET_VREG %ecx, rINST                    # ecx <- vB (object ref)
     testl   %ecx, %ecx                      # is null?
     je      common_errNullObject
     andb    $$0xf, %al                      # eax <- A
     movl    MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
-    SET_VREG rINST %eax
+    SET_VREG rINST, %eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_check_cast.S b/runtime/interpreter/mterp/x86/op_check_cast.S
index 018432a..ae2ff9e 100644
--- a/runtime/interpreter/mterp/x86/op_check_cast.S
+++ b/runtime/interpreter/mterp/x86/op_check_cast.S
@@ -11,7 +11,7 @@
     movl    %eax, OUT_ARG2(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)
-    call    MterpCheckCast                  # (index, &obj, method, self)
+    call    SYMBOL(MterpCheckCast)          # (index, &obj, method, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_cmp_long.S b/runtime/interpreter/mterp/x86/op_cmp_long.S
index bd86738..1f729b0 100644
--- a/runtime/interpreter/mterp/x86/op_cmp_long.S
+++ b/runtime/interpreter/mterp/x86/op_cmp_long.S
@@ -5,17 +5,17 @@
     /* cmp-long vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG_HIGH %eax %eax                 # eax <- v[BB+1], BB is clobbered
+    GET_VREG_HIGH %eax, %eax                # eax <- v[BB+1], BB is clobbered
     cmpl    VREG_HIGH_ADDRESS(%ecx), %eax
     jl      .L${opcode}_smaller
     jg      .L${opcode}_bigger
     movzbl  2(rPC), %eax                    # eax <- BB, restore BB
-    GET_VREG %eax %eax                      # eax <- v[BB]
+    GET_VREG %eax, %eax                     # eax <- v[BB]
     sub     VREG_ADDRESS(%ecx), %eax
     ja      .L${opcode}_bigger
     jb      .L${opcode}_smaller
 .L${opcode}_finish:
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 .L${opcode}_bigger:
diff --git a/runtime/interpreter/mterp/x86/op_const.S b/runtime/interpreter/mterp/x86/op_const.S
index dc69530..544d63b 100644
--- a/runtime/interpreter/mterp/x86/op_const.S
+++ b/runtime/interpreter/mterp/x86/op_const.S
@@ -1,4 +1,4 @@
     /* const vAA, #+BBBBbbbb */
     movl    2(rPC), %eax                    # grab all 32 bits at once
-    SET_VREG %eax rINST                     # vAA<- eax
+    SET_VREG %eax, rINST                    # vAA<- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_const_16.S b/runtime/interpreter/mterp/x86/op_const_16.S
index f5707cf..97cd5fa 100644
--- a/runtime/interpreter/mterp/x86/op_const_16.S
+++ b/runtime/interpreter/mterp/x86/op_const_16.S
@@ -1,4 +1,4 @@
     /* const/16 vAA, #+BBBB */
     movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
-    SET_VREG %ecx rINST                     # vAA <- ssssBBBB
+    SET_VREG %ecx, rINST                    # vAA <- ssssBBBB
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_4.S b/runtime/interpreter/mterp/x86/op_const_4.S
index c336411..a60ba96 100644
--- a/runtime/interpreter/mterp/x86/op_const_4.S
+++ b/runtime/interpreter/mterp/x86/op_const_4.S
@@ -3,5 +3,5 @@
     movl    $$0xf, rINST
     andl    %eax, rINST                     # rINST <- A
     sarl    $$4, %eax
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_const_class.S b/runtime/interpreter/mterp/x86/op_const_class.S
index eceb8bc..343e110 100644
--- a/runtime/interpreter/mterp/x86/op_const_class.S
+++ b/runtime/interpreter/mterp/x86/op_const_class.S
@@ -7,7 +7,7 @@
     movl    %eax, OUT_ARG2(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG3(%esp)
-    call    MterpConstClass                 # (index, tgt_reg, shadow_frame, self)
+    call    SYMBOL(MterpConstClass)         # (index, tgt_reg, shadow_frame, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_const_high16.S b/runtime/interpreter/mterp/x86/op_const_high16.S
index da78d1b..576967a 100644
--- a/runtime/interpreter/mterp/x86/op_const_high16.S
+++ b/runtime/interpreter/mterp/x86/op_const_high16.S
@@ -1,5 +1,5 @@
     /* const/high16 vAA, #+BBBB0000 */
     movzwl  2(rPC), %eax                    # eax <- 0000BBBB
     sall    $$16, %eax                      # eax <- BBBB0000
-    SET_VREG %eax rINST                     # vAA <- eax
+    SET_VREG %eax, rINST                    # vAA <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_string.S b/runtime/interpreter/mterp/x86/op_const_string.S
index 9acd6fe..bbac69c 100644
--- a/runtime/interpreter/mterp/x86/op_const_string.S
+++ b/runtime/interpreter/mterp/x86/op_const_string.S
@@ -7,7 +7,7 @@
     movl    %eax, OUT_ARG2(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG3(%esp)
-    call    MterpConstString                # (index, tgt_reg, shadow_frame, self)
+    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_const_string_jumbo.S b/runtime/interpreter/mterp/x86/op_const_string_jumbo.S
index 5c728b2..4236807 100644
--- a/runtime/interpreter/mterp/x86/op_const_string_jumbo.S
+++ b/runtime/interpreter/mterp/x86/op_const_string_jumbo.S
@@ -7,7 +7,7 @@
     movl    %eax, OUT_ARG2(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG3(%esp)
-    call    MterpConstString                # (index, tgt_reg, shadow_frame, self)
+    call    SYMBOL(MterpConstString)        # (index, tgt_reg, shadow_frame, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_const_wide.S b/runtime/interpreter/mterp/x86/op_const_wide.S
index 745490e..3750728 100644
--- a/runtime/interpreter/mterp/x86/op_const_wide.S
+++ b/runtime/interpreter/mterp/x86/op_const_wide.S
@@ -2,6 +2,6 @@
     movl    2(rPC), %eax                    # eax <- lsw
     movzbl  rINSTbl, %ecx                   # ecx <- AA
     movl    6(rPC), rINST                   # rINST <- msw
-    SET_VREG %eax %ecx
-    SET_VREG_HIGH  rINST %ecx
+    SET_VREG %eax, %ecx
+    SET_VREG_HIGH  rINST, %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_16.S b/runtime/interpreter/mterp/x86/op_const_wide_16.S
index 8029cfe..1331c32 100644
--- a/runtime/interpreter/mterp/x86/op_const_wide_16.S
+++ b/runtime/interpreter/mterp/x86/op_const_wide_16.S
@@ -2,7 +2,7 @@
     movswl  2(rPC), %eax                    # eax <- ssssBBBB
     movl    rIBASE, %ecx                    # preserve rIBASE (cltd trashes it)
     cltd                                    # rIBASE:eax <- ssssssssssssBBBB
-    SET_VREG_HIGH rIBASE rINST              # store msw
-    SET_VREG %eax rINST                     # store lsw
+    SET_VREG_HIGH rIBASE, rINST             # store msw
+    SET_VREG %eax, rINST                    # store lsw
     movl    %ecx, rIBASE                    # restore rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_32.S b/runtime/interpreter/mterp/x86/op_const_wide_32.S
index 3e23d3a..ed7d62b 100644
--- a/runtime/interpreter/mterp/x86/op_const_wide_32.S
+++ b/runtime/interpreter/mterp/x86/op_const_wide_32.S
@@ -2,7 +2,7 @@
     movl    2(rPC), %eax                    # eax <- BBBBbbbb
     movl    rIBASE, %ecx                    # preserve rIBASE (cltd trashes it)
     cltd                                    # rIBASE:eax <- ssssssssssssBBBB
-    SET_VREG_HIGH rIBASE rINST              # store msw
-    SET_VREG %eax rINST                     # store lsw
+    SET_VREG_HIGH rIBASE, rINST             # store msw
+    SET_VREG %eax, rINST                    # store lsw
     movl    %ecx, rIBASE                    # restore rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_high16.S b/runtime/interpreter/mterp/x86/op_const_wide_high16.S
index d2a1119..11b9310 100644
--- a/runtime/interpreter/mterp/x86/op_const_wide_high16.S
+++ b/runtime/interpreter/mterp/x86/op_const_wide_high16.S
@@ -1,7 +1,7 @@
     /* const-wide/high16 vAA, #+BBBB000000000000 */
     movzwl  2(rPC), %eax                    # eax <- 0000BBBB
     sall    $$16, %eax                      # eax <- BBBB0000
-    SET_VREG_HIGH %eax rINST                # v[AA+1] <- eax
+    SET_VREG_HIGH %eax, rINST               # v[AA+1] <- eax
     xorl    %eax, %eax
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_div_long.S b/runtime/interpreter/mterp/x86/op_div_long.S
index 5772826..e56a035 100644
--- a/runtime/interpreter/mterp/x86/op_div_long.S
+++ b/runtime/interpreter/mterp/x86/op_div_long.S
@@ -7,17 +7,17 @@
     mov     rIBASE, LOCAL0(%esp)            # save rIBASE/%edx
     mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
     movzbl  3(rPC), %eax                    # eax <- CC
-    GET_VREG %ecx %eax
-    GET_VREG_HIGH %ebx %eax
+    GET_VREG %ecx, %eax
+    GET_VREG_HIGH %ebx, %eax
     movl    %ecx, %edx
     orl     %ebx, %ecx
     jz      common_errDivideByZero
     movzbl  2(rPC), %eax                    # eax <- BB
-    GET_VREG_HIGH %ecx %eax
-    GET_VREG %eax %eax
-    call    $routine
+    GET_VREG_HIGH %ecx, %eax
+    GET_VREG %eax, %eax
+    call    SYMBOL($routine)
     mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE rINST
-    SET_VREG %eax rINST
+    SET_VREG_HIGH rIBASE, rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_div_long_2addr.S b/runtime/interpreter/mterp/x86/op_div_long_2addr.S
index 2696042..159cc44 100644
--- a/runtime/interpreter/mterp/x86/op_div_long_2addr.S
+++ b/runtime/interpreter/mterp/x86/op_div_long_2addr.S
@@ -10,16 +10,16 @@
     andb    $$0xf, rINSTbl                  # rINST <- A
     mov     rINST, LOCAL1(%esp)             # save rINST/%ebx
     movl    %ebx, %ecx
-    GET_VREG %edx %eax
-    GET_VREG_HIGH %ebx %eax
+    GET_VREG %edx, %eax
+    GET_VREG_HIGH %ebx, %eax
     movl    %edx, %eax
     orl     %ebx, %eax
     jz      common_errDivideByZero
-    GET_VREG %eax %ecx
-    GET_VREG_HIGH %ecx %ecx
-    call    $routine
+    GET_VREG %eax, %ecx
+    GET_VREG_HIGH %ecx, %ecx
+    call    SYMBOL($routine)
     mov     LOCAL1(%esp), rINST             # restore rINST/%ebx
-    SET_VREG_HIGH rIBASE rINST
-    SET_VREG %eax rINST
+    SET_VREG_HIGH rIBASE, rINST
+    SET_VREG %eax, rINST
     mov     LOCAL0(%esp), rIBASE            # restore rIBASE/%edx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_fill_array_data.S b/runtime/interpreter/mterp/x86/op_fill_array_data.S
index 0cb05f6..004aed9 100644
--- a/runtime/interpreter/mterp/x86/op_fill_array_data.S
+++ b/runtime/interpreter/mterp/x86/op_fill_array_data.S
@@ -2,10 +2,10 @@
     EXPORT_PC
     movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
     leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
-    GET_VREG %eax rINST                     # eax <- vAA (array object)
+    GET_VREG %eax, rINST                    # eax <- vAA (array object)
     movl    %eax, OUT_ARG0(%esp)
     movl    %ecx, OUT_ARG1(%esp)
-    call    MterpFillArrayData              # (obj, payload)
+    call    SYMBOL(MterpFillArrayData)      # (obj, payload)
     REFRESH_IBASE
     testl   %eax, %eax                      # 0 means an exception is thrown
     jz      MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_filled_new_array.S b/runtime/interpreter/mterp/x86/op_filled_new_array.S
index c08b09f..a2bac29 100644
--- a/runtime/interpreter/mterp/x86/op_filled_new_array.S
+++ b/runtime/interpreter/mterp/x86/op_filled_new_array.S
@@ -13,7 +13,7 @@
     movl    rPC, OUT_ARG1(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)
-    call    $helper
+    call    SYMBOL($helper)
     REFRESH_IBASE
     testl   %eax, %eax                      # 0 means an exception is thrown
     jz      MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_iget.S b/runtime/interpreter/mterp/x86/op_iget.S
index 868ffd0..9932610 100644
--- a/runtime/interpreter/mterp/x86/op_iget.S
+++ b/runtime/interpreter/mterp/x86/op_iget.S
@@ -15,15 +15,15 @@
     movl    %eax, OUT_ARG2(%esp)            # referrer
     mov     rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    $helper
+    call    SYMBOL($helper)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $$0xf, rINSTbl                  # rINST <- A
     .if $is_object
-    SET_VREG_OBJECT %eax rINST              # fp[A] <-value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <-value
     .else
-    SET_VREG %eax rINST                     # fp[A] <-value
+    SET_VREG %eax, rINST                    # fp[A] <-value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_object_quick.S b/runtime/interpreter/mterp/x86/op_iget_object_quick.S
index b09772f..fe16694 100644
--- a/runtime/interpreter/mterp/x86/op_iget_object_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iget_object_quick.S
@@ -2,16 +2,16 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     movl    %ecx, OUT_ARG0(%esp)
     movl    %eax, OUT_ARG1(%esp)
     EXPORT_PC
-    call    artIGetObjectFromMterp          # (obj, offset)
+    call    SYMBOL(artIGetObjectFromMterp)  # (obj, offset)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $$0xf,rINSTbl                   # rINST <- A
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_quick.S b/runtime/interpreter/mterp/x86/op_iget_quick.S
index 372071c..1b7440f 100644
--- a/runtime/interpreter/mterp/x86/op_iget_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iget_quick.S
@@ -3,11 +3,11 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     ${load} (%ecx,%eax,1), %eax
     andb    $$0xf,rINSTbl                   # rINST <- A
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_wide.S b/runtime/interpreter/mterp/x86/op_iget_wide.S
index 58e5a65..92126b4 100644
--- a/runtime/interpreter/mterp/x86/op_iget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_iget_wide.S
@@ -14,12 +14,12 @@
     movl    %eax, OUT_ARG2(%esp)            # referrer
     mov     rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artGet64InstanceFromCode
+    call    SYMBOL(artGet64InstanceFromCode)
     mov     rSELF, %ecx
     cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException                  # bail out
     andb    $$0xf, rINSTbl                  # rINST <- A
-    SET_VREG %eax rINST
-    SET_VREG_HIGH %edx rINST
+    SET_VREG %eax, rINST
+    SET_VREG_HIGH %edx, rINST
     REFRESH_IBASE_FROM_SELF %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_wide_quick.S b/runtime/interpreter/mterp/x86/op_iget_wide_quick.S
index 8be336b..7ce74cc 100644
--- a/runtime/interpreter/mterp/x86/op_iget_wide_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iget_wide_quick.S
@@ -1,11 +1,11 @@
     /* iget-wide-quick vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     movq    (%ecx,%eax,1), %xmm0
     andb    $$0xf, rINSTbl                  # rINST <- A
-    SET_WIDE_FP_VREG %xmm0 rINST
+    SET_WIDE_FP_VREG %xmm0, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_instance_of.S b/runtime/interpreter/mterp/x86/op_instance_of.S
index c9bfba5..fd5bf44 100644
--- a/runtime/interpreter/mterp/x86/op_instance_of.S
+++ b/runtime/interpreter/mterp/x86/op_instance_of.S
@@ -16,11 +16,11 @@
     movl    %eax, OUT_ARG2(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)
-    call    MterpInstanceOf                 # (index, &obj, method, self)
+    call    SYMBOL(MterpInstanceOf)         # (index, &obj, method, self)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
     andb    $$0xf, rINSTbl                  # rINSTbl <- A
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_int_to_long.S b/runtime/interpreter/mterp/x86/op_int_to_long.S
index 736ea69..6f9ea26 100644
--- a/runtime/interpreter/mterp/x86/op_int_to_long.S
+++ b/runtime/interpreter/mterp/x86/op_int_to_long.S
@@ -1,12 +1,12 @@
     /* int to long vA, vB */
     movzbl  rINSTbl, %eax                   # eax <- +A
     sarl    $$4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     andb    $$0xf, rINSTbl                  # rINST <- A
     movl    rIBASE, %ecx                    # cltd trashes rIBASE/edx
     cltd                                    # rINST:eax<- sssssssBBBBBBBB
-    SET_VREG_HIGH rIBASE rINST              # v[A+1] <- rIBASE
-    SET_VREG %eax rINST                     # v[A+0] <- %eax
+    SET_VREG_HIGH rIBASE, rINST             # v[A+1] <- rIBASE
+    SET_VREG %eax, rINST                    # v[A+0] <- %eax
     movl    %ecx, rIBASE
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
diff --git a/runtime/interpreter/mterp/x86/op_iput.S b/runtime/interpreter/mterp/x86/op_iput.S
index f8a6549..13cfe5c 100644
--- a/runtime/interpreter/mterp/x86/op_iput.S
+++ b/runtime/interpreter/mterp/x86/op_iput.S
@@ -18,7 +18,7 @@
     movl    %eax, OUT_ARG2(%esp)            # fp[A]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG3(%esp)            # referrer
-    call    $handler
+    call    SYMBOL($handler)
     testl   %eax, %eax
     jnz     MterpPossibleException
     REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_iput_object.S b/runtime/interpreter/mterp/x86/op_iput_object.S
index 20d57aa..f63075c 100644
--- a/runtime/interpreter/mterp/x86/op_iput_object.S
+++ b/runtime/interpreter/mterp/x86/op_iput_object.S
@@ -6,7 +6,7 @@
     movl    rINST, OUT_ARG2(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG3(%esp)
-    call    MterpIputObject
+    call    SYMBOL(MterpIputObject)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_iput_object_quick.S b/runtime/interpreter/mterp/x86/op_iput_object_quick.S
index 4c7f4bd..d54b1b7 100644
--- a/runtime/interpreter/mterp/x86/op_iput_object_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iput_object_quick.S
@@ -4,7 +4,7 @@
     movl    rPC, OUT_ARG1(%esp)
     REFRESH_INST ${opnum}
     movl    rINST, OUT_ARG2(%esp)
-    call    MterpIputObjectQuick
+    call    SYMBOL(MterpIputObjectQuick)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_iput_quick.S b/runtime/interpreter/mterp/x86/op_iput_quick.S
index e2f7caf..b67cee0 100644
--- a/runtime/interpreter/mterp/x86/op_iput_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iput_quick.S
@@ -3,11 +3,11 @@
     /* op vA, vB, offset@CCCC */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # vB (object we're operating on)
+    GET_VREG %ecx, %ecx                     # vB (object we're operating on)
     testl   %ecx, %ecx                      # is object null?
     je      common_errNullObject
     andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG rINST rINST                    # rINST <- v[A]
+    GET_VREG rINST, rINST                   # rINST <- v[A]
     movzwl  2(rPC), %eax                    # eax <- field byte offset
     ${store}    ${reg}, (%ecx,%eax,1)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iput_wide.S b/runtime/interpreter/mterp/x86/op_iput_wide.S
index 92cb770..573e14d 100644
--- a/runtime/interpreter/mterp/x86/op_iput_wide.S
+++ b/runtime/interpreter/mterp/x86/op_iput_wide.S
@@ -12,7 +12,7 @@
     movl    %eax, OUT_ARG2(%esp)            # &fp[A]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG3(%esp)            # referrer
-    call    artSet64InstanceFromMterp
+    call    SYMBOL(artSet64InstanceFromMterp)
     testl   %eax, %eax
     jnz     MterpPossibleException
     REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_iput_wide_quick.S b/runtime/interpreter/mterp/x86/op_iput_wide_quick.S
index 72285c5..17de6f8 100644
--- a/runtime/interpreter/mterp/x86/op_iput_wide_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iput_wide_quick.S
@@ -1,12 +1,12 @@
     /* iput-wide-quick vA, vB, offset@CCCC */
     movzbl    rINSTbl, %ecx                 # ecx<- BA
     sarl      $$4, %ecx                     # ecx<- B
-    GET_VREG  %ecx %ecx                     # vB (object we're operating on)
+    GET_VREG  %ecx, %ecx                    # vB (object we're operating on)
     testl     %ecx, %ecx                    # is object null?
     je        common_errNullObject
     movzwl    2(rPC), %eax                  # eax<- field byte offset
     leal      (%ecx,%eax,1), %ecx           # ecx<- Address of 64-bit target
     andb      $$0xf, rINSTbl                # rINST<- A
-    GET_WIDE_FP_VREG %xmm0 rINST            # xmm0<- fp[A]/fp[A+1]
+    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0<- fp[A]/fp[A+1]
     movq      %xmm0, (%ecx)                 # obj.field<- r0/r1
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_monitor_enter.S b/runtime/interpreter/mterp/x86/op_monitor_enter.S
index 8236fb3..9e885bd 100644
--- a/runtime/interpreter/mterp/x86/op_monitor_enter.S
+++ b/runtime/interpreter/mterp/x86/op_monitor_enter.S
@@ -3,11 +3,11 @@
  */
     /* monitor-enter vAA */
     EXPORT_PC
-    GET_VREG %ecx rINST
+    GET_VREG %ecx, rINST
     movl    %ecx, OUT_ARG0(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    artLockObjectFromCode           # (object, self)
+    call    SYMBOL(artLockObjectFromCode)   # (object, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpException
diff --git a/runtime/interpreter/mterp/x86/op_monitor_exit.S b/runtime/interpreter/mterp/x86/op_monitor_exit.S
index 56d4eb3..0904800 100644
--- a/runtime/interpreter/mterp/x86/op_monitor_exit.S
+++ b/runtime/interpreter/mterp/x86/op_monitor_exit.S
@@ -7,11 +7,11 @@
  */
     /* monitor-exit vAA */
     EXPORT_PC
-    GET_VREG %ecx rINST
+    GET_VREG %ecx, rINST
     movl    %ecx, OUT_ARG0(%esp)
     movl    rSELF, %eax
     movl    %eax, OUT_ARG1(%esp)
-    call    artUnlockObjectFromCode         # (object, self)
+    call    SYMBOL(artUnlockObjectFromCode) # (object, self)
     REFRESH_IBASE
     testl   %eax, %eax
     jnz     MterpException
diff --git a/runtime/interpreter/mterp/x86/op_move.S b/runtime/interpreter/mterp/x86/op_move.S
index 0a531be..ea173b9 100644
--- a/runtime/interpreter/mterp/x86/op_move.S
+++ b/runtime/interpreter/mterp/x86/op_move.S
@@ -4,10 +4,10 @@
     movzbl  rINSTbl, %eax                   # eax <- BA
     andb    $$0xf, %al                      # eax <- A
     shrl    $$4, rINST                      # rINST <- B
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     .if $is_object
-    SET_VREG_OBJECT rINST %eax              # fp[A] <- fp[B]
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
     .else
-    SET_VREG rINST %eax                     # fp[A] <- fp[B]
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_16.S b/runtime/interpreter/mterp/x86/op_move_16.S
index 0773f41..454deb5 100644
--- a/runtime/interpreter/mterp/x86/op_move_16.S
+++ b/runtime/interpreter/mterp/x86/op_move_16.S
@@ -3,10 +3,10 @@
     /* op vAAAA, vBBBB */
     movzwl  4(rPC), %ecx                    # ecx <- BBBB
     movzwl  2(rPC), %eax                    # eax <- AAAA
-    GET_VREG rINST %ecx
+    GET_VREG rINST, %ecx
     .if $is_object
-    SET_VREG_OBJECT rINST %eax              # fp[A] <- fp[B]
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
     .else
-    SET_VREG rINST %eax                     # fp[A] <- fp[B]
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_move_exception.S b/runtime/interpreter/mterp/x86/op_move_exception.S
index e37cdfa..d8dc74f 100644
--- a/runtime/interpreter/mterp/x86/op_move_exception.S
+++ b/runtime/interpreter/mterp/x86/op_move_exception.S
@@ -1,6 +1,6 @@
     /* move-exception vAA */
     movl    rSELF, %ecx
     movl    THREAD_EXCEPTION_OFFSET(%ecx), %eax
-    SET_VREG_OBJECT %eax rINST              # fp[AA] <- exception object
+    SET_VREG_OBJECT %eax, rINST             # fp[AA] <- exception object
     movl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_from16.S b/runtime/interpreter/mterp/x86/op_move_from16.S
index 623a4d3..e869855 100644
--- a/runtime/interpreter/mterp/x86/op_move_from16.S
+++ b/runtime/interpreter/mterp/x86/op_move_from16.S
@@ -3,10 +3,10 @@
     /* op vAA, vBBBB */
     movzx   rINSTbl, %eax                   # eax <- AA
     movw    2(rPC), rINSTw                  # rINSTw <- BBBB
-    GET_VREG rINST rINST                    # rINST <- fp[BBBB]
+    GET_VREG rINST, rINST                   # rINST <- fp[BBBB]
     .if $is_object
-    SET_VREG_OBJECT rINST %eax              # fp[A] <- fp[B]
+    SET_VREG_OBJECT rINST, %eax             # fp[A] <- fp[B]
     .else
-    SET_VREG rINST %eax                     # fp[A] <- fp[B]
+    SET_VREG rINST, %eax                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_move_result.S b/runtime/interpreter/mterp/x86/op_move_result.S
index 414f2cb..f6f2129 100644
--- a/runtime/interpreter/mterp/x86/op_move_result.S
+++ b/runtime/interpreter/mterp/x86/op_move_result.S
@@ -4,8 +4,8 @@
     movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
     movl    (%eax), %eax                    # r0 <- result.i.
     .if $is_object
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- fp[B]
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- fp[B]
     .else
-    SET_VREG %eax rINST                     # fp[A] <- fp[B]
+    SET_VREG %eax, rINST                    # fp[A] <- fp[B]
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_result_wide.S b/runtime/interpreter/mterp/x86/op_move_result_wide.S
index 0c1683b..7818cce 100644
--- a/runtime/interpreter/mterp/x86/op_move_result_wide.S
+++ b/runtime/interpreter/mterp/x86/op_move_result_wide.S
@@ -2,6 +2,6 @@
     movl    OFF_FP_RESULT_REGISTER(rFP), %eax    # get pointer to result JType.
     movl    4(%eax), %ecx                   # Get high
     movl    (%eax), %eax                    # Get low
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
-    SET_VREG_HIGH %ecx rINST                # v[AA+1] <- ecx
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
+    SET_VREG_HIGH %ecx, rINST               # v[AA+1] <- ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_wide.S b/runtime/interpreter/mterp/x86/op_move_wide.S
index 9c0e985..79ce7b7 100644
--- a/runtime/interpreter/mterp/x86/op_move_wide.S
+++ b/runtime/interpreter/mterp/x86/op_move_wide.S
@@ -3,6 +3,6 @@
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $$4, rINST                      # rINST <- B
     andb    $$0xf, %cl                      # ecx <- A
-    GET_WIDE_FP_VREG %xmm0 rINST            # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0 %ecx             # v[A] <- xmm0
+    GET_WIDE_FP_VREG %xmm0, rINST           # xmm0 <- v[B]
+    SET_WIDE_FP_VREG %xmm0, %ecx            # v[A] <- xmm0
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_wide_16.S b/runtime/interpreter/mterp/x86/op_move_wide_16.S
index 7522c27..a6b8596 100644
--- a/runtime/interpreter/mterp/x86/op_move_wide_16.S
+++ b/runtime/interpreter/mterp/x86/op_move_wide_16.S
@@ -2,6 +2,6 @@
     /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
     movzwl  4(rPC), %ecx                    # ecx<- BBBB
     movzwl  2(rPC), %eax                    # eax<- AAAA
-    GET_WIDE_FP_VREG %xmm0 %ecx             # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0 %eax             # v[A] <- xmm0
+    GET_WIDE_FP_VREG %xmm0, %ecx            # xmm0 <- v[B]
+    SET_WIDE_FP_VREG %xmm0, %eax            # v[A] <- xmm0
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_move_wide_from16.S b/runtime/interpreter/mterp/x86/op_move_wide_from16.S
index 5ad2cb4..ec344de 100644
--- a/runtime/interpreter/mterp/x86/op_move_wide_from16.S
+++ b/runtime/interpreter/mterp/x86/op_move_wide_from16.S
@@ -2,6 +2,6 @@
     /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
     movzwl  2(rPC), %ecx                    # ecx <- BBBB
     movzbl  rINSTbl, %eax                   # eax <- AAAA
-    GET_WIDE_FP_VREG %xmm0 %ecx             # xmm0 <- v[B]
-    SET_WIDE_FP_VREG %xmm0 %eax             # v[A] <- xmm0
+    GET_WIDE_FP_VREG %xmm0, %ecx            # xmm0 <- v[B]
+    SET_WIDE_FP_VREG %xmm0, %eax            # v[A] <- xmm0
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_int.S b/runtime/interpreter/mterp/x86/op_mul_int.S
index a367ab7..77f4659 100644
--- a/runtime/interpreter/mterp/x86/op_mul_int.S
+++ b/runtime/interpreter/mterp/x86/op_mul_int.S
@@ -4,9 +4,9 @@
     /* mul vAA, vBB, vCC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
-    GET_VREG %eax %eax                      # eax <- vBB
+    GET_VREG %eax, %eax                     # eax <- vBB
     mov     rIBASE, LOCAL0(%esp)
     imull   (rFP,%ecx,4), %eax              # trashes rIBASE/edx
     mov     LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_2addr.S b/runtime/interpreter/mterp/x86/op_mul_int_2addr.S
index 6005075..f92a28e 100644
--- a/runtime/interpreter/mterp/x86/op_mul_int_2addr.S
+++ b/runtime/interpreter/mterp/x86/op_mul_int_2addr.S
@@ -1,10 +1,10 @@
     /* mul vA, vB */
     movzx   rINSTbl, %ecx                   # ecx <- A+
     sarl    $$4, rINST                      # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $$0xf, %cl                      # ecx <- A
     mov     rIBASE, LOCAL0(%esp)
     imull   (rFP,%ecx,4), %eax              # trashes rIBASE/edx
     mov     LOCAL0(%esp), rIBASE
-    SET_VREG %eax %ecx
+    SET_VREG %eax, %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_lit16.S b/runtime/interpreter/mterp/x86/op_mul_int_lit16.S
index 1c0fde3..31ab613 100644
--- a/runtime/interpreter/mterp/x86/op_mul_int_lit16.S
+++ b/runtime/interpreter/mterp/x86/op_mul_int_lit16.S
@@ -2,11 +2,11 @@
     /* Need A in rINST, ssssCCCC in ecx, vB in eax */
     movzbl  rINSTbl, %eax                   # eax <- 000000BA
     sarl    $$4, %eax                       # eax <- B
-    GET_VREG %eax %eax                      # eax <- vB
+    GET_VREG %eax, %eax                     # eax <- vB
     movswl  2(rPC), %ecx                    # ecx <- ssssCCCC
     andb    $$0xf, rINSTbl                  # rINST <- A
     mov     rIBASE, LOCAL0(%esp)
     imull   %ecx, %eax                      # trashes rIBASE/edx
     mov     LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_lit8.S b/runtime/interpreter/mterp/x86/op_mul_int_lit8.S
index 4d7a22d..6637aa7 100644
--- a/runtime/interpreter/mterp/x86/op_mul_int_lit8.S
+++ b/runtime/interpreter/mterp/x86/op_mul_int_lit8.S
@@ -1,9 +1,9 @@
     /* mul/lit8 vAA, vBB, #+CC */
     movzbl  2(rPC), %eax                    # eax <- BB
     movsbl  3(rPC), %ecx                    # ecx <- ssssssCC
-    GET_VREG  %eax  %eax                    # eax <- rBB
+    GET_VREG  %eax, %eax                    # eax <- rBB
     mov     rIBASE, LOCAL0(%esp)
     imull   %ecx, %eax                      # trashes rIBASE/edx
     mov     LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST
+    SET_VREG %eax, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_long.S b/runtime/interpreter/mterp/x86/op_mul_long.S
index 3746e41..f35ca13 100644
--- a/runtime/interpreter/mterp/x86/op_mul_long.S
+++ b/runtime/interpreter/mterp/x86/op_mul_long.S
@@ -27,7 +27,7 @@
     mov     LOCAL0(%esp), rPC               # restore Interpreter PC
     mov     LOCAL1(%esp), rFP               # restore FP
     leal    (%ecx,rIBASE), rIBASE           # full result now in rIBASE:%eax
-    SET_VREG_HIGH rIBASE rINST              # v[B+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[B+1] <- rIBASE
     mov     LOCAL2(%esp), rIBASE            # restore IBASE
-    SET_VREG %eax rINST                     # v[B] <- eax
+    SET_VREG %eax, rINST                    # v[B] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_neg_long.S b/runtime/interpreter/mterp/x86/op_neg_long.S
index 7cc17f0..30da247 100644
--- a/runtime/interpreter/mterp/x86/op_neg_long.S
+++ b/runtime/interpreter/mterp/x86/op_neg_long.S
@@ -2,12 +2,12 @@
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $$4, %ecx                       # ecx <- B
     andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax %ecx                      # eax <- v[B+0]
-    GET_VREG_HIGH %ecx %ecx                 # ecx <- v[B+1]
+    GET_VREG %eax, %ecx                     # eax <- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
     negl    %eax
     adcl    $$0, %ecx
     negl    %ecx
-    SET_VREG %eax rINST                     # v[A+0] <- eax
-    SET_VREG_HIGH %ecx rINST                # v[A+1] <- ecx
+    SET_VREG %eax, rINST                    # v[A+0] <- eax
+    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
diff --git a/runtime/interpreter/mterp/x86/op_new_array.S b/runtime/interpreter/mterp/x86/op_new_array.S
index 6852183..2490477 100644
--- a/runtime/interpreter/mterp/x86/op_new_array.S
+++ b/runtime/interpreter/mterp/x86/op_new_array.S
@@ -14,7 +14,7 @@
     movl    rINST, OUT_ARG2(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)
-    call    MterpNewArray
+    call    SYMBOL(MterpNewArray)
     REFRESH_IBASE
     testl   %eax, %eax                      # 0 means an exception is thrown
     jz      MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_new_instance.S b/runtime/interpreter/mterp/x86/op_new_instance.S
index a3632e8..712a5eb 100644
--- a/runtime/interpreter/mterp/x86/op_new_instance.S
+++ b/runtime/interpreter/mterp/x86/op_new_instance.S
@@ -9,7 +9,7 @@
     movl    %ecx, OUT_ARG1(%esp)
     REFRESH_INST ${opnum}
     movl    rINST, OUT_ARG2(%esp)
-    call    MterpNewInstance
+    call    SYMBOL(MterpNewInstance)
     REFRESH_IBASE
     testl   %eax, %eax                 # 0 means an exception is thrown
     jz      MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_not_long.S b/runtime/interpreter/mterp/x86/op_not_long.S
index 55666a1..8f706e1 100644
--- a/runtime/interpreter/mterp/x86/op_not_long.S
+++ b/runtime/interpreter/mterp/x86/op_not_long.S
@@ -2,10 +2,10 @@
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     sarl    $$4, %ecx                       # ecx <- B
     andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax %ecx                      # eax <- v[B+0]
-    GET_VREG_HIGH %ecx %ecx                 # ecx <- v[B+1]
+    GET_VREG %eax, %ecx                     # eax <- v[B+0]
+    GET_VREG_HIGH %ecx, %ecx                # ecx <- v[B+1]
     notl    %eax
     notl    %ecx
-    SET_VREG %eax rINST                     # v[A+0] <- eax
-    SET_VREG_HIGH %ecx rINST                # v[A+1] <- ecx
+    SET_VREG %eax, rINST                    # v[A+0] <- eax
+    SET_VREG_HIGH %ecx, rINST               # v[A+1] <- ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_packed_switch.S b/runtime/interpreter/mterp/x86/op_packed_switch.S
index 4e39a48..230b58e 100644
--- a/runtime/interpreter/mterp/x86/op_packed_switch.S
+++ b/runtime/interpreter/mterp/x86/op_packed_switch.S
@@ -10,11 +10,11 @@
  */
     /* op vAA, +BBBB */
     movl    2(rPC), %ecx                    # ecx <- BBBBbbbb
-    GET_VREG %eax rINST                     # eax <- vAA
+    GET_VREG %eax, rINST                    # eax <- vAA
     leal    (rPC,%ecx,2), %ecx              # ecx <- PC + BBBBbbbb*2
     movl    %eax, OUT_ARG1(%esp)            # ARG1 <- vAA
     movl    %ecx, OUT_ARG0(%esp)            # ARG0 <- switchData
-    call    $func
+    call    SYMBOL($func)
     addl    %eax, %eax
     leal    (rPC, %eax), rPC
     FETCH_INST
diff --git a/runtime/interpreter/mterp/x86/op_return.S b/runtime/interpreter/mterp/x86/op_return.S
index 183b3bf..8e3cfad 100644
--- a/runtime/interpreter/mterp/x86/op_return.S
+++ b/runtime/interpreter/mterp/x86/op_return.S
@@ -5,13 +5,13 @@
  */
     /* op vAA */
     .extern MterpThreadFenceForConstructor
-    call    MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
     testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
 1:
-    GET_VREG %eax rINST                     # eax <- vAA
+    GET_VREG %eax, rINST                    # eax <- vAA
     xorl    %ecx, %ecx
     jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_return_void.S b/runtime/interpreter/mterp/x86/op_return_void.S
index f3e24c7..a14a4f6 100644
--- a/runtime/interpreter/mterp/x86/op_return_void.S
+++ b/runtime/interpreter/mterp/x86/op_return_void.S
@@ -1,10 +1,10 @@
     .extern MterpThreadFenceForConstructor
-    call    MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
     testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
 1:
     xorl    %eax, %eax
     xorl    %ecx, %ecx
diff --git a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
index add4e20..1d0e933 100644
--- a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
+++ b/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
@@ -2,7 +2,7 @@
     testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
 1:
     xorl    %eax, %eax
     xorl    %ecx, %ecx
diff --git a/runtime/interpreter/mterp/x86/op_return_wide.S b/runtime/interpreter/mterp/x86/op_return_wide.S
index 34a3380..7d1850a 100644
--- a/runtime/interpreter/mterp/x86/op_return_wide.S
+++ b/runtime/interpreter/mterp/x86/op_return_wide.S
@@ -3,13 +3,13 @@
  */
     /* return-wide vAA */
     .extern MterpThreadFenceForConstructor
-    call    MterpThreadFenceForConstructor
+    call    SYMBOL(MterpThreadFenceForConstructor)
     movl    rSELF, %eax
     testl   $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
     jz      1f
     movl    %eax, OUT_ARG0(%esp)
-    call    MterpSuspendCheck
+    call    SYMBOL(MterpSuspendCheck)
 1:
-    GET_VREG %eax rINST                     # eax <- v[AA+0]
-    GET_VREG_HIGH %ecx rINST                # ecx <- v[AA+1]
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
+    GET_VREG_HIGH %ecx, rINST               # ecx <- v[AA+1]
     jmp     MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_sget.S b/runtime/interpreter/mterp/x86/op_sget.S
index ed5aedf..ec96458 100644
--- a/runtime/interpreter/mterp/x86/op_sget.S
+++ b/runtime/interpreter/mterp/x86/op_sget.S
@@ -13,14 +13,14 @@
     movl    %eax, OUT_ARG1(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)            # self
-    call    $helper
+    call    SYMBOL($helper)
     movl    rSELF, %ecx
     REFRESH_IBASE_FROM_SELF %ecx
     cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
     .if $is_object
-    SET_VREG_OBJECT %eax rINST              # fp[A] <- value
+    SET_VREG_OBJECT %eax, rINST             # fp[A] <- value
     .else
-    SET_VREG %eax rINST                     # fp[A] <- value
+    SET_VREG %eax, rINST                    # fp[A] <- value
     .endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_sget_wide.S b/runtime/interpreter/mterp/x86/op_sget_wide.S
index 76b993b..833f266 100644
--- a/runtime/interpreter/mterp/x86/op_sget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_sget_wide.S
@@ -11,11 +11,11 @@
     movl    %eax, OUT_ARG1(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG2(%esp)            # self
-    call    artGet64StaticFromCode
+    call    SYMBOL(artGet64StaticFromCode)
     movl    rSELF, %ecx
     cmpl    $$0, THREAD_EXCEPTION_OFFSET(%ecx)
     jnz     MterpException
-    SET_VREG %eax rINST                     # fp[A]<- low part
-    SET_VREG_HIGH %edx rINST                # fp[A+1]<- high part
+    SET_VREG %eax, rINST                    # fp[A]<- low part
+    SET_VREG_HIGH %edx, rINST               # fp[A+1]<- high part
     REFRESH_IBASE_FROM_SELF %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shl_long.S b/runtime/interpreter/mterp/x86/op_shl_long.S
index 56d13e3..aa58a93 100644
--- a/runtime/interpreter/mterp/x86/op_shl_long.S
+++ b/runtime/interpreter/mterp/x86/op_shl_long.S
@@ -13,9 +13,9 @@
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE %eax               # ecx <- v[BB+1]
-    GET_VREG %ecx %ecx                      # ecx <- vCC
-    GET_VREG %eax %eax                      # eax <- v[BB+0]
+    GET_VREG_HIGH rIBASE, %eax              # ecx <- v[BB+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- v[BB+0]
     shldl   %eax,rIBASE
     sall    %cl, %eax
     testb   $$32, %cl
@@ -23,7 +23,7 @@
     movl    %eax, rIBASE
     xorl    %eax, %eax
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[AA+0] <- %eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- %eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shl_long_2addr.S b/runtime/interpreter/mterp/x86/op_shl_long_2addr.S
index 5da873f..6bbf49c 100644
--- a/runtime/interpreter/mterp/x86/op_shl_long_2addr.S
+++ b/runtime/interpreter/mterp/x86/op_shl_long_2addr.S
@@ -8,11 +8,11 @@
     /* rINSTw gets AA */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- v[AA+0]
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
     sarl    $$4, %ecx                       # ecx <- B
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE rINST              # rIBASE <- v[AA+1]
-    GET_VREG %ecx %ecx                      # ecx <- vBB
+    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vBB
     shldl   %eax, rIBASE
     sall    %cl, %eax
     testb   $$32, %cl
@@ -20,7 +20,7 @@
     movl    %eax, rIBASE
     xorl    %eax, %eax
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_shr_long.S b/runtime/interpreter/mterp/x86/op_shr_long.S
index 4490a9a..68aa0ee 100644
--- a/runtime/interpreter/mterp/x86/op_shr_long.S
+++ b/runtime/interpreter/mterp/x86/op_shr_long.S
@@ -13,9 +13,9 @@
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE %eax               # rIBASE<- v[BB+1]
-    GET_VREG %ecx %ecx                      # ecx <- vCC
-    GET_VREG %eax %eax                      # eax <- v[BB+0]
+    GET_VREG_HIGH rIBASE, %eax              # rIBASE<- v[BB+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- v[BB+0]
     shrdl   rIBASE, %eax
     sarl    %cl, rIBASE
     testb   $$32, %cl
@@ -23,7 +23,7 @@
     movl    rIBASE, %eax
     sarl    $$31, rIBASE
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shr_long_2addr.S b/runtime/interpreter/mterp/x86/op_shr_long_2addr.S
index 57494f9..148bd1b 100644
--- a/runtime/interpreter/mterp/x86/op_shr_long_2addr.S
+++ b/runtime/interpreter/mterp/x86/op_shr_long_2addr.S
@@ -8,11 +8,11 @@
     /* rINSTw gets AA */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- v[AA+0]
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
     sarl    $$4, %ecx                       # ecx <- B
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE rINST              # rIBASE <- v[AA+1]
-    GET_VREG %ecx %ecx                      # ecx <- vBB
+    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vBB
     shrdl   rIBASE, %eax
     sarl    %cl, rIBASE
     testb   $$32, %cl
@@ -20,7 +20,7 @@
     movl    rIBASE, %eax
     sarl    $$31, rIBASE
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_sput.S b/runtime/interpreter/mterp/x86/op_sput.S
index 04a8f23..a199281 100644
--- a/runtime/interpreter/mterp/x86/op_sput.S
+++ b/runtime/interpreter/mterp/x86/op_sput.S
@@ -9,13 +9,13 @@
     EXPORT_PC
     movzwl  2(rPC), %eax
     movl    %eax, OUT_ARG0(%esp)            # field ref BBBB
-    GET_VREG rINST rINST
+    GET_VREG rINST, rINST
     movl    rINST, OUT_ARG1(%esp)           # fp[AA]
     movl    OFF_FP_METHOD(rFP), %eax
     movl    %eax, OUT_ARG2(%esp)            # referrer
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    $helper
+    call    SYMBOL($helper)
     testl   %eax, %eax
     jnz     MterpException
     REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_sput_object.S b/runtime/interpreter/mterp/x86/op_sput_object.S
index 0480e00..e3e57fc 100644
--- a/runtime/interpreter/mterp/x86/op_sput_object.S
+++ b/runtime/interpreter/mterp/x86/op_sput_object.S
@@ -6,7 +6,7 @@
     movl    rINST, OUT_ARG2(%esp)
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)
-    call    MterpSputObject
+    call    SYMBOL(MterpSputObject)
     testl   %eax, %eax
     jz      MterpException
     REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_sput_wide.S b/runtime/interpreter/mterp/x86/op_sput_wide.S
index d58d5af..7544838 100644
--- a/runtime/interpreter/mterp/x86/op_sput_wide.S
+++ b/runtime/interpreter/mterp/x86/op_sput_wide.S
@@ -13,7 +13,7 @@
     movl    %eax, OUT_ARG2(%esp)            # &fp[AA]
     movl    rSELF, %ecx
     movl    %ecx, OUT_ARG3(%esp)            # self
-    call    artSet64IndirectStaticFromMterp
+    call    SYMBOL(artSet64IndirectStaticFromMterp)
     testl   %eax, %eax
     jnz     MterpException
     REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_throw.S b/runtime/interpreter/mterp/x86/op_throw.S
index 15b20b5..a6e6b1e 100644
--- a/runtime/interpreter/mterp/x86/op_throw.S
+++ b/runtime/interpreter/mterp/x86/op_throw.S
@@ -3,7 +3,7 @@
  */
     /* throw vAA */
     EXPORT_PC
-    GET_VREG %eax rINST                     # eax<- vAA (exception object)
+    GET_VREG %eax, rINST                    # eax<- vAA (exception object)
     testl   %eax, %eax
     jz      common_errNullObject
     movl    rSELF,%ecx
diff --git a/runtime/interpreter/mterp/x86/op_ushr_long.S b/runtime/interpreter/mterp/x86/op_ushr_long.S
index 287946e..9527c9c 100644
--- a/runtime/interpreter/mterp/x86/op_ushr_long.S
+++ b/runtime/interpreter/mterp/x86/op_ushr_long.S
@@ -13,9 +13,9 @@
     movzbl  2(rPC), %eax                    # eax <- BB
     movzbl  3(rPC), %ecx                    # ecx <- CC
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE %eax               # rIBASE <- v[BB+1]
-    GET_VREG %ecx %ecx                      # ecx <- vCC
-    GET_VREG %eax %eax                      # eax <- v[BB+0]
+    GET_VREG_HIGH rIBASE, %eax              # rIBASE <- v[BB+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vCC
+    GET_VREG %eax, %eax                     # eax <- v[BB+0]
     shrdl   rIBASE, %eax
     shrl    %cl, rIBASE
     testb   $$32, %cl
@@ -23,7 +23,7 @@
     movl    rIBASE, %eax
     xorl    rIBASE, rIBASE
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[BB+0] <- eax
+    SET_VREG %eax, rINST                    # v[BB+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S b/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S
index 39c2724..72fcc36 100644
--- a/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S
+++ b/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S
@@ -8,11 +8,11 @@
     /* rINSTw gets AA */
     movzbl  rINSTbl, %ecx                   # ecx <- BA
     andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- v[AA+0]
+    GET_VREG %eax, rINST                    # eax <- v[AA+0]
     sarl    $$4, %ecx                       # ecx <- B
     movl    rIBASE, LOCAL0(%esp)
-    GET_VREG_HIGH rIBASE rINST              # rIBASE <- v[AA+1]
-    GET_VREG %ecx %ecx                      # ecx <- vBB
+    GET_VREG_HIGH rIBASE, rINST             # rIBASE <- v[AA+1]
+    GET_VREG %ecx, %ecx                     # ecx <- vBB
     shrdl   rIBASE, %eax
     shrl    %cl, rIBASE
     testb   $$32, %cl
@@ -20,7 +20,7 @@
     movl    rIBASE, %eax
     xorl    rIBASE, rIBASE
 2:
-    SET_VREG_HIGH rIBASE rINST              # v[AA+1] <- rIBASE
+    SET_VREG_HIGH rIBASE, rINST             # v[AA+1] <- rIBASE
     movl    LOCAL0(%esp), rIBASE
-    SET_VREG %eax rINST                     # v[AA+0] <- eax
+    SET_VREG %eax, rINST                    # v[AA+0] <- eax
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/shop2addr.S b/runtime/interpreter/mterp/x86/shop2addr.S
index 94d3545..96c9954 100644
--- a/runtime/interpreter/mterp/x86/shop2addr.S
+++ b/runtime/interpreter/mterp/x86/shop2addr.S
@@ -5,9 +5,9 @@
     /* shift/2addr vA, vB */
     movzx   rINSTbl, %ecx                   # eax <- BA
     sarl    $$4, %ecx                       # ecx <- B
-    GET_VREG %ecx %ecx                      # eax <- vBB
+    GET_VREG %ecx, %ecx                     # eax <- vBB
     andb    $$0xf, rINSTbl                  # rINST <- A
-    GET_VREG %eax rINST                     # eax <- vAA
+    GET_VREG %eax, rINST                    # eax <- vAA
     $instr                                  # ex: sarl %cl, %eax
-    SET_VREG $result rINST
+    SET_VREG $result, rINST
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/unop.S b/runtime/interpreter/mterp/x86/unop.S
index 00d3e15..db09fc0 100644
--- a/runtime/interpreter/mterp/x86/unop.S
+++ b/runtime/interpreter/mterp/x86/unop.S
@@ -6,8 +6,8 @@
     /* unop vA, vB */
     movzbl  rINSTbl,%ecx                    # ecx <- A+
     sarl    $$4,rINST                       # rINST <- B
-    GET_VREG %eax rINST                     # eax <- vB
+    GET_VREG %eax, rINST                    # eax <- vB
     andb    $$0xf,%cl                       # ecx <- A
     $instr
-    SET_VREG %eax %ecx
+    SET_VREG %eax, %ecx
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index fa5c41d..e38a684 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -25,15 +25,19 @@
 #include "jit_code_cache.h"
 #include "jit_instrumentation.h"
 #include "oat_file_manager.h"
+#include "oat_quick_method_header.h"
 #include "offline_profiling_info.h"
 #include "profile_saver.h"
 #include "runtime.h"
 #include "runtime_options.h"
+#include "stack_map.h"
 #include "utils.h"
 
 namespace art {
 namespace jit {
 
+static constexpr bool kEnableOnStackReplacement = false;
+
 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
   auto* jit_options = new JitOptions;
   jit_options->use_jit_ = options.GetOrDefault(RuntimeArgumentMap::UseJIT);
@@ -43,6 +47,8 @@
       options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
   jit_options->compile_threshold_ =
       options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
+  // TODO(ngeoffray): Make this a proper option.
+  jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2;
   jit_options->warmup_threshold_ =
       options.GetOrDefault(RuntimeArgumentMap::JITWarmupThreshold);
   jit_options->dump_info_on_shutdown_ =
@@ -121,7 +127,7 @@
     *error_msg = "JIT couldn't find jit_unload entry point";
     return false;
   }
-  jit_compile_method_ = reinterpret_cast<bool (*)(void*, ArtMethod*, Thread*)>(
+  jit_compile_method_ = reinterpret_cast<bool (*)(void*, ArtMethod*, Thread*, bool)>(
       dlsym(jit_library_handle_, "jit_compile_method"));
   if (jit_compile_method_ == nullptr) {
     dlclose(jit_library_handle_);
@@ -156,7 +162,7 @@
   return true;
 }
 
-bool Jit::CompileMethod(ArtMethod* method, Thread* self) {
+bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
   DCHECK(!method->IsRuntimeMethod());
   // Don't compile the method if it has breakpoints.
   if (Dbg::IsDebuggerActive() && Dbg::MethodHasAnyBreakpoints(method)) {
@@ -171,10 +177,11 @@
     return false;
   }
 
-  if (!code_cache_->NotifyCompilationOf(method, self)) {
+  if (!code_cache_->NotifyCompilationOf(method, self, osr)) {
+    VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to code cache";
     return false;
   }
-  bool success = jit_compile_method_(jit_compiler_handle_, method, self);
+  bool success = jit_compile_method_(jit_compiler_handle_, method, self, osr);
   code_cache_->DoneCompiling(method, self);
   return success;
 }
@@ -224,9 +231,11 @@
   }
 }
 
-void Jit::CreateInstrumentationCache(size_t compile_threshold, size_t warmup_threshold) {
+void Jit::CreateInstrumentationCache(size_t compile_threshold,
+                                     size_t warmup_threshold,
+                                     size_t osr_threshold) {
   instrumentation_cache_.reset(
-      new jit::JitInstrumentationCache(compile_threshold, warmup_threshold));
+      new jit::JitInstrumentationCache(compile_threshold, warmup_threshold, osr_threshold));
 }
 
 void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
@@ -255,5 +264,128 @@
   }
 }
 
+extern "C" void art_quick_osr_stub(void** stack,
+                                   uint32_t stack_size_in_bytes,
+                                   const uint8_t* native_pc,
+                                   JValue* result,
+                                   const char* shorty,
+                                   Thread* self);
+
+bool Jit::MaybeDoOnStackReplacement(Thread* thread,
+                                    ArtMethod* method,
+                                    uint32_t dex_pc,
+                                    int32_t dex_pc_offset,
+                                    JValue* result) {
+  if (!kEnableOnStackReplacement) {
+    return false;
+  }
+
+  Jit* jit = Runtime::Current()->GetJit();
+  if (jit == nullptr) {
+    return false;
+  }
+
+  if (kRuntimeISA == kMips || kRuntimeISA == kMips64) {
+    VLOG(jit) << "OSR not supported on this platform";
+    return false;
+  }
+
+  // Get the actual Java method if this method is from a proxy class. The compiler
+  // and the JIT code cache do not expect methods from proxy classes.
+  method = method->GetInterfaceMethodIfProxy(sizeof(void*));
+
+  // Cheap check if the method has been compiled already. That's an indicator that we should
+  // osr into it.
+  if (!jit->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+    return false;
+  }
+
+  const OatQuickMethodHeader* osr_method = jit->GetCodeCache()->LookupOsrMethodHeader(method);
+  if (osr_method == nullptr) {
+    // No osr method yet, just return to the interpreter.
+    return false;
+  }
+
+  const size_t number_of_vregs = method->GetCodeItem()->registers_size_;
+  CodeInfo code_info = osr_method->GetOptimizedCodeInfo();
+  StackMapEncoding encoding = code_info.ExtractEncoding();
+
+  // Find stack map starting at the target dex_pc.
+  StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding);
+  if (!stack_map.IsValid()) {
+    // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
+    // hope that the next branch has one.
+    return false;
+  }
+
+  // We found a stack map, now fill the frame with dex register values from the interpreter's
+  // shadow frame.
+  DexRegisterMap vreg_map =
+      code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+
+  ShadowFrame* shadow_frame = thread->PopShadowFrame();
+
+  size_t frame_size = osr_method->GetFrameSizeInBytes();
+  void** memory = reinterpret_cast<void**>(malloc(frame_size));
+  memset(memory, 0, frame_size);
+
+  // Art ABI: ArtMethod is at the bottom of the stack.
+  memory[0] = method;
+
+  if (!vreg_map.IsValid()) {
+    // If we don't have a dex register map, then there are no live dex registers at
+    // this dex pc.
+  } else {
+    for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
+      DexRegisterLocation::Kind location =
+          vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+      if (location == DexRegisterLocation::Kind::kNone) {
+        // Dex register is dead or unitialized.
+        continue;
+      }
+
+      if (location == DexRegisterLocation::Kind::kConstant) {
+        // We skip constants because the compiled code knows how to handle them.
+        continue;
+      }
+
+      DCHECK(location == DexRegisterLocation::Kind::kInStack);
+
+      int32_t vreg_value = shadow_frame->GetVReg(vreg);
+      int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg,
+                                                           number_of_vregs,
+                                                           code_info,
+                                                           encoding);
+      DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
+      DCHECK_GT(slot_offset, 0);
+      (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
+    }
+  }
+
+  const uint8_t* native_pc = stack_map.GetNativePcOffset(encoding) + osr_method->GetEntryPoint();
+  VLOG(jit) << "Jumping to "
+            << PrettyMethod(method)
+            << "@"
+            << std::hex << reinterpret_cast<uintptr_t>(native_pc);
+  {
+    ManagedStack fragment;
+    thread->PushManagedStackFragment(&fragment);
+    (*art_quick_osr_stub)(memory,
+                          frame_size,
+                          native_pc,
+                          result,
+                          method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(),
+                          thread);
+    if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
+      thread->DeoptimizeWithDeoptimizationException(result);
+    }
+    thread->PopManagedStackFragment(fragment);
+  }
+  free(memory);
+  thread->PushShadowFrame(shadow_frame);
+  VLOG(jit) << "Done running OSR code for " << PrettyMethod(method);
+  return true;
+}
+
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index a80f51f..042da92 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -49,9 +49,11 @@
 
   virtual ~Jit();
   static Jit* Create(JitOptions* options, std::string* error_msg);
-  bool CompileMethod(ArtMethod* method, Thread* self)
+  bool CompileMethod(ArtMethod* method, Thread* self, bool osr)
       SHARED_REQUIRES(Locks::mutator_lock_);
-  void CreateInstrumentationCache(size_t compile_threshold, size_t warmup_threshold);
+  void CreateInstrumentationCache(size_t compile_threshold,
+                                  size_t warmup_threshold,
+                                  size_t osr_threshold);
   void CreateThreadPool();
   CompilerCallbacks* GetCompilerCallbacks() {
     return compiler_callbacks_;
@@ -88,6 +90,17 @@
 
   bool JitAtFirstUse();
 
+  // If an OSR compiled version is available for `method`,
+  // and `dex_pc + dex_pc_offset` is an entry point of that compiled
+  // version, this method will jump to the compiled code, let it run,
+  // and return true afterwards. Return false otherwise.
+  static bool MaybeDoOnStackReplacement(Thread* thread,
+                                        ArtMethod* method,
+                                        uint32_t dex_pc,
+                                        int32_t dex_pc_offset,
+                                        JValue* result)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
  private:
   Jit();
   bool LoadCompiler(std::string* error_msg);
@@ -97,7 +110,7 @@
   void* jit_compiler_handle_;
   void* (*jit_load_)(CompilerCallbacks**, bool*);
   void (*jit_unload_)(void*);
-  bool (*jit_compile_method_)(void*, ArtMethod*, Thread*);
+  bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool);
   void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
 
   // Performance monitoring.
@@ -123,6 +136,9 @@
   size_t GetWarmupThreshold() const {
     return warmup_threshold_;
   }
+  size_t GetOsrThreshold() const {
+    return osr_threshold_;
+  }
   size_t GetCodeCacheInitialCapacity() const {
     return code_cache_initial_capacity_;
   }
@@ -155,6 +171,7 @@
   size_t code_cache_max_capacity_;
   size_t compile_threshold_;
   size_t warmup_threshold_;
+  size_t osr_threshold_;
   bool dump_info_on_shutdown_;
   bool save_profiling_info_;
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index f325949..464c441 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -184,7 +184,8 @@
                                   size_t core_spill_mask,
                                   size_t fp_spill_mask,
                                   const uint8_t* code,
-                                  size_t code_size) {
+                                  size_t code_size,
+                                  bool osr) {
   uint8_t* result = CommitCodeInternal(self,
                                        method,
                                        mapping_table,
@@ -194,7 +195,8 @@
                                        core_spill_mask,
                                        fp_spill_mask,
                                        code,
-                                       code_size);
+                                       code_size,
+                                       osr);
   if (result == nullptr) {
     // Retry.
     GarbageCollectCache(self);
@@ -207,7 +209,8 @@
                                 core_spill_mask,
                                 fp_spill_mask,
                                 code,
-                                code_size);
+                                code_size,
+                                osr);
   }
   return result;
 }
@@ -287,7 +290,8 @@
                                           size_t core_spill_mask,
                                           size_t fp_spill_mask,
                                           const uint8_t* code,
-                                          size_t code_size) {
+                                          size_t code_size,
+                                          bool osr) {
   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
   // Ensure the header ends up at expected instruction alignment.
   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -329,8 +333,12 @@
   {
     MutexLock mu(self, lock_);
     method_code_map_.Put(code_ptr, method);
-    Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
-        method, method_header->GetEntryPoint());
+    if (osr) {
+      osr_code_map_.Put(method, code_ptr);
+    } else {
+      Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
+          method, method_header->GetEntryPoint());
+    }
     if (collection_in_progress_) {
       // We need to update the live bitmap if there is a GC to ensure it sees this new
       // code.
@@ -338,7 +346,7 @@
     }
     last_update_time_ns_.StoreRelease(NanoTime());
     VLOG(jit)
-        << "JIT added "
+        << "JIT added (osr = " << std::boolalpha << osr << std::noboolalpha << ") "
         << PrettyMethod(method) << "@" << method
         << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
         << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
@@ -569,6 +577,10 @@
         info->GetMethod()->SetProfilingInfo(nullptr);
       }
     }
+
+    // Empty osr method map, as osr compile code will be deleted (except the ones
+    // on thread stacks).
+    osr_code_map_.clear();
   }
 
   // Run a checkpoint on all threads to mark the JIT compiled code they are running.
@@ -662,6 +674,15 @@
   return method_header;
 }
 
+OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
+  MutexLock mu(Thread::Current(), lock_);
+  auto it = osr_code_map_.find(method);
+  if (it == osr_code_map_.end()) {
+    return nullptr;
+  }
+  return OatQuickMethodHeader::FromCodePointer(it->second);
+}
+
 ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
                                               ArtMethod* method,
                                               const std::vector<uint32_t>& entries,
@@ -733,12 +754,15 @@
   return last_update_time_ns_.LoadAcquire();
 }
 
-bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self) {
-  if (ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) {
+  if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
     return false;
   }
 
   MutexLock mu(self, lock_);
+  if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) {
+    return false;
+  }
   ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
   if (info == nullptr || info->IsMethodBeingCompiled()) {
     return false;
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 69fc553..048f8d0 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -71,7 +71,7 @@
   // Number of compilations done throughout the lifetime of the JIT.
   size_t NumberOfCompilations() REQUIRES(!lock_);
 
-  bool NotifyCompilationOf(ArtMethod* method, Thread* self)
+  bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
@@ -89,7 +89,8 @@
                       size_t core_spill_mask,
                       size_t fp_spill_mask,
                       const uint8_t* code,
-                      size_t code_size)
+                      size_t code_size,
+                      bool osr)
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
@@ -131,6 +132,10 @@
       REQUIRES(!lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   // Remove all methods in our cache that were allocated by 'alloc'.
   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
       REQUIRES(!lock_)
@@ -187,7 +192,8 @@
                               size_t core_spill_mask,
                               size_t fp_spill_mask,
                               const uint8_t* code,
-                              size_t code_size)
+                              size_t code_size,
+                              bool osr)
       REQUIRES(!lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -237,8 +243,10 @@
   void* data_mspace_ GUARDED_BY(lock_);
   // Bitmap for collecting code and data.
   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
-  // This map holds compiled code associated to the ArtMethod.
+  // Holds compiled code associated to the ArtMethod.
   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
+  // Holds osr compiled code associated to the ArtMethod.
+  SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
   // ProfilingInfo objects we have allocated.
   std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
 
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index d597b36..a4e40ad 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -29,7 +29,8 @@
  public:
   enum TaskKind {
     kAllocateProfile,
-    kCompile
+    kCompile,
+    kCompileOsr
   };
 
   JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind) {
@@ -48,9 +49,14 @@
     ScopedObjectAccess soa(self);
     if (kind_ == kCompile) {
       VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
-      if (!Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
+      if (!Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false)) {
         VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
       }
+    } else if (kind_ == kCompileOsr) {
+      VLOG(jit) << "JitCompileTask compiling method osr " << PrettyMethod(method_);
+      if (!Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true)) {
+        VLOG(jit) << "Failed to compile method osr " << PrettyMethod(method_);
+      }
     } else {
       DCHECK(kind_ == kAllocateProfile);
       if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) {
@@ -72,9 +78,11 @@
 };
 
 JitInstrumentationCache::JitInstrumentationCache(size_t hot_method_threshold,
-                                                 size_t warm_method_threshold)
+                                                 size_t warm_method_threshold,
+                                                 size_t osr_method_threshold)
     : hot_method_threshold_(hot_method_threshold),
       warm_method_threshold_(warm_method_threshold),
+      osr_method_threshold_(osr_method_threshold),
       listener_(this) {
 }
 
@@ -151,6 +159,11 @@
     DCHECK(thread_pool_ != nullptr);
     thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
   }
+
+  if (sample_count == osr_method_threshold_) {
+    DCHECK(thread_pool_ != nullptr);
+    thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
+  }
 }
 
 JitInstrumentationListener::JitInstrumentationListener(JitInstrumentationCache* cache)
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 06559ad..d1c5c44 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -96,7 +96,9 @@
 // Keeps track of which methods are hot.
 class JitInstrumentationCache {
  public:
-  JitInstrumentationCache(size_t hot_method_threshold, size_t warm_method_threshold);
+  JitInstrumentationCache(size_t hot_method_threshold,
+                          size_t warm_method_threshold,
+                          size_t osr_method_threshold);
   void AddSamples(Thread* self, ArtMethod* method, size_t samples)
       SHARED_REQUIRES(Locks::mutator_lock_);
   void CreateThreadPool();
@@ -112,6 +114,7 @@
  private:
   size_t hot_method_threshold_;
   size_t warm_method_threshold_;
+  size_t osr_method_threshold_;
   JitInstrumentationListener listener_;
   std::unique_ptr<ThreadPool> thread_pool_;
 
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 18c52e4..c908b39 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -877,4 +877,22 @@
   return os;
 }
 
+void MemMap::TryReadable() {
+  if (base_begin_ == nullptr && base_size_ == 0) {
+    return;
+  }
+  CHECK_NE(prot_ & PROT_READ, 0);
+  volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
+  volatile uint8_t* end = begin + base_size_;
+  DCHECK(IsAligned<kPageSize>(begin));
+  DCHECK(IsAligned<kPageSize>(end));
+  // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
+  // reads.
+  for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
+    // This read could fault if protection wasn't set correctly.
+    uint8_t value = *ptr;
+    UNUSED(value);
+  }
+}
+
 }  // namespace art
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index ebd550a..3eaf576 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -184,6 +184,11 @@
   static void Init() REQUIRES(!Locks::mem_maps_lock_);
   static void Shutdown() REQUIRES(!Locks::mem_maps_lock_);
 
+  // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
+  // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
+  // intermittently.
+  void TryReadable();
+
  private:
   MemMap(const std::string& name,
          uint8_t* begin,
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index b97d994..cdc6204 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -800,11 +800,11 @@
   return nullptr;
 }
 
-void Class::SetPreverifiedFlagOnAllMethods(size_t pointer_size) {
+void Class::SetSkipAccessChecksFlagOnAllMethods(size_t pointer_size) {
   DCHECK(IsVerified());
   for (auto& m : GetMethods(pointer_size)) {
     if (!m.IsNative() && m.IsInvokable()) {
-      m.SetPreverified();
+      m.SetSkipAccessChecks();
     }
   }
 }
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 1dae194..388a231 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -287,14 +287,18 @@
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
-  // Returns true if the class can avoid access checks.
-  bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
-    return (GetAccessFlags() & kAccPreverified) != 0;
+  // Return whether the class had run the verifier at least once.
+  // This does not necessarily mean that access checks are avoidable,
+  // since the class methods might still need to be run with access checks.
+  bool WasVerificationAttempted() SHARED_REQUIRES(Locks::mutator_lock_) {
+    return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
   }
 
-  void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
+  // Mark the class as having gone through a verification attempt.
+  // Mutually exclusive from whether or not each method is allowed to skip access checks.
+  void SetVerificationAttempted() SHARED_REQUIRES(Locks::mutator_lock_) {
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
-    SetAccessFlags(flags | kAccPreverified);
+    SetAccessFlags(flags | kAccVerificationAttempted);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -1136,8 +1140,8 @@
   void VisitNativeRoots(Visitor& visitor, size_t pointer_size)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // When class is verified, set the kAccPreverified flag on each method.
-  void SetPreverifiedFlagOnAllMethods(size_t pointer_size)
+  // When class is verified, set the kAccSkipAccessChecks flag on each method.
+  void SetSkipAccessChecksFlagOnAllMethods(size_t pointer_size)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Get the descriptor of the class. In a few cases a std::string is required, rather than
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 9946eab..ed4c5fc 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -42,14 +42,16 @@
 
 static constexpr uint32_t kAccJavaFlagsMask = 0xffff;  // bits set from Java sources (low 16)
 
-static constexpr uint32_t kAccConstructor =          0x00010000;  // method (dex only) <(cl)init>
-static constexpr uint32_t kAccDeclaredSynchronized = 0x00020000;  // method (dex only)
-static constexpr uint32_t kAccClassIsProxy =         0x00040000;  // class  (dex only)
-static constexpr uint32_t kAccPreverified =          0x00080000;  // class (runtime),
-                                                                  // method (dex only)
-static constexpr uint32_t kAccFastNative =           0x00080000;  // method (dex only)
-static constexpr uint32_t kAccMiranda =              0x00200000;  // method (dex only)
-static constexpr uint32_t kAccDefault =              0x00400000;  // method (runtime)
+static constexpr uint32_t kAccConstructor =           0x00010000;  // method (dex only) <(cl)init>
+static constexpr uint32_t kAccDeclaredSynchronized =  0x00020000;  // method (dex only)
+static constexpr uint32_t kAccClassIsProxy =          0x00040000;  // class  (dex only)
+// Used by a method to denote that its execution does not need to go through slow path interpreter.
+static constexpr uint32_t kAccSkipAccessChecks =      0x00080000;  // method (dex only)
+// Used by a class to denote that the verifier has attempted to check it at least once.
+static constexpr uint32_t kAccVerificationAttempted = 0x00080000;  // class (runtime)
+static constexpr uint32_t kAccFastNative =            0x00080000;  // method (dex only)
+static constexpr uint32_t kAccMiranda =               0x00200000;  // method (dex only)
+static constexpr uint32_t kAccDefault =               0x00400000;  // method (runtime)
 // This is set by the class linker during LinkInterfaceMethods. Prior to that point we do not know
 // if any particular method needs to be a default conflict. Used to figure out at runtime if
 // invoking this method will throw an exception.
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index a80585a..b5d859b 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -388,7 +388,7 @@
   auto h_args = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
   Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
   ArtMethod* result = nullptr;
-  for (auto& m : h_klass->GetVirtualMethods(sizeof(void*))) {
+  for (auto& m : h_klass->GetDeclaredVirtualMethods(sizeof(void*))) {
     auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*));
     // May cause thread suspension.
     mirror::String* np_name = np_method->GetNameAsString(soa.Self());
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index de90f0a..e76e443 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -24,6 +24,7 @@
 #include "base/stl_util.h"
 #include "class_linker.h"
 #include "dex_file-inl.h"
+#include "gc/scoped_gc_critical_section.h"
 #include "gc/space/image_space.h"
 #include "handle_scope-inl.h"
 #include "mirror/class_loader.h"
@@ -379,6 +380,9 @@
           // spaces array.
           {
             ScopedThreadSuspension sts(self, kSuspended);
+            gc::ScopedGCCriticalSection gcs(self,
+                                            gc::kGcCauseAddRemoveAppImageSpace,
+                                            gc::kCollectorTypeAddRemoveAppImageSpace);
             ScopedSuspendAll ssa("Add image space");
             runtime->GetHeap()->AddSpace(image_space.get());
           }
@@ -393,6 +397,9 @@
             dex_files.clear();
             {
               ScopedThreadSuspension sts(self, kSuspended);
+              gc::ScopedGCCriticalSection gcs(self,
+                                              gc::kGcCauseAddRemoveAppImageSpace,
+                                              gc::kCollectorTypeAddRemoveAppImageSpace);
               ScopedSuspendAll ssa("Remove image space");
               runtime->GetHeap()->RemoveSpace(image_space.get());
             }
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 5643739..2b7eca2 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -108,7 +108,7 @@
   }
 
   template <bool kCheckFrameSize = true>
-  uint32_t GetFrameSizeInBytes() {
+  uint32_t GetFrameSizeInBytes() const {
     uint32_t result = frame_info_.FrameSizeInBytes();
     if (kCheckFrameSize) {
       DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index ab0d934..725067a 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -66,17 +66,13 @@
 #undef LOG_TAG
 #define LOG_TAG "artopenjdk"
 
-using art::DEBUG;
 using art::WARNING;
-using art::VERBOSE;
 using art::INFO;
 using art::ERROR;
 using art::FATAL;
 
 /* posix open() with extensions; used by e.g. ZipFile */
 JNIEXPORT jint JVM_Open(const char* fname, jint flags, jint mode) {
-    LOG(DEBUG) << "JVM_Open fname='" << fname << "', flags=" << flags << ", mode=" << mode;
-
     /*
      * The call is expected to handle JVM_O_DELETE, which causes the file
      * to be removed after it is opened.  Also, some code seems to
@@ -86,7 +82,6 @@
     int fd = TEMP_FAILURE_RETRY(open(fname, flags & ~JVM_O_DELETE, mode));
     if (fd < 0) {
         int err = errno;
-        LOG(DEBUG) << "open(" << fname << ") failed: " << strerror(errno);
         if (err == EEXIST) {
             return JVM_EEXIST;
         } else {
@@ -95,39 +90,32 @@
     }
 
     if (flags & JVM_O_DELETE) {
-        LOG(DEBUG) << "Deleting '" << fname << "' after open\n";
         if (unlink(fname) != 0) {
             LOG(WARNING) << "Post-open deletion of '" << fname << "' failed: " << strerror(errno);
         }
-        /* ignore */
     }
 
-    LOG(VERBOSE) << "open(" << fname << ") --> " << fd;
     return fd;
 }
 
 /* posix close() */
 JNIEXPORT jint JVM_Close(jint fd) {
-    LOG(DEBUG) << "JVM_Close fd=" << fd;
     // don't want TEMP_FAILURE_RETRY here -- file is closed even if EINTR
     return close(fd);
 }
 
 /* posix read() */
 JNIEXPORT jint JVM_Read(jint fd, char* buf, jint nbytes) {
-    LOG(DEBUG) << "JVM_Read fd=" << fd << ", buf='" << buf << "', nbytes=" << nbytes;
     return TEMP_FAILURE_RETRY(read(fd, buf, nbytes));
 }
 
 /* posix write(); is used to write messages to stderr */
 JNIEXPORT jint JVM_Write(jint fd, char* buf, jint nbytes) {
-    LOG(DEBUG) << "JVM_Write fd=" << fd << ", buf='" << buf << "', nbytes=" << nbytes;
     return TEMP_FAILURE_RETRY(write(fd, buf, nbytes));
 }
 
 /* posix lseek() */
 JNIEXPORT jlong JVM_Lseek(jint fd, jlong offset, jint whence) {
-    LOG(DEBUG) << "JVM_Lseek fd=" << fd << ", offset=" << offset << ", whence=" << whence;
     return TEMP_FAILURE_RETRY(lseek(fd, offset, whence));
 }
 
@@ -136,42 +124,41 @@
  * mutexes.  They're used by ZipFile.
  */
 JNIEXPORT void* JVM_RawMonitorCreate(void) {
-    LOG(DEBUG) << "JVM_RawMonitorCreate";
-    pthread_mutex_t* newMutex =
+    pthread_mutex_t* mutex =
         reinterpret_cast<pthread_mutex_t*>(malloc(sizeof(pthread_mutex_t)));
-    pthread_mutex_init(newMutex, NULL);
-    return newMutex;
+    CHECK(mutex != nullptr);
+    CHECK_PTHREAD_CALL(pthread_mutex_init, (mutex, nullptr), "JVM_RawMonitorCreate");
+    return mutex;
 }
 
 JNIEXPORT void JVM_RawMonitorDestroy(void* mon) {
-    LOG(DEBUG) << "JVM_RawMonitorDestroy mon=" << mon;
-    pthread_mutex_destroy(reinterpret_cast<pthread_mutex_t*>(mon));
+    CHECK_PTHREAD_CALL(pthread_mutex_destroy,
+                       (reinterpret_cast<pthread_mutex_t*>(mon)),
+                       "JVM_RawMonitorDestroy");
+    free(mon);
 }
 
 JNIEXPORT jint JVM_RawMonitorEnter(void* mon) {
-    LOG(DEBUG) << "JVM_RawMonitorEnter mon=" << mon;
     return pthread_mutex_lock(reinterpret_cast<pthread_mutex_t*>(mon));
 }
 
 JNIEXPORT void JVM_RawMonitorExit(void* mon) {
-    LOG(DEBUG) << "JVM_RawMonitorExit mon=" << mon;
-    pthread_mutex_unlock(reinterpret_cast<pthread_mutex_t*>(mon));
+    CHECK_PTHREAD_CALL(pthread_mutex_unlock,
+                       (reinterpret_cast<pthread_mutex_t*>(mon)),
+                       "JVM_RawMonitorExit");
 }
 
 JNIEXPORT char* JVM_NativePath(char* path) {
-    LOG(DEBUG) << "JVM_NativePath path='" << path << "'";
     return path;
 }
 
 JNIEXPORT jint JVM_GetLastErrorString(char* buf, int len) {
 #if defined(__GLIBC__) || defined(__BIONIC__)
-  int err = errno;    // grab before JVM_TRACE can trash it
-  LOG(DEBUG) << "JVM_GetLastErrorString buf=" << buf << ", len=" << len;
-
   if (len == 0) {
     return 0;
   }
 
+  const int err = errno;
   char* result = strerror_r(err, buf, len);
   if (result != buf) {
     strncpy(buf, result, len);
@@ -203,27 +190,22 @@
 
 /* posix fsync() */
 JNIEXPORT jint JVM_Sync(jint fd) {
-    LOG(DEBUG) << "JVM_Sync fd=" << fd;
     return TEMP_FAILURE_RETRY(fsync(fd));
 }
 
 JNIEXPORT void* JVM_FindLibraryEntry(void* handle, const char* name) {
-    LOG(DEBUG) << "JVM_FindLibraryEntry handle=" << handle << " name=" << name;
     return dlsym(handle, name);
 }
 
-JNIEXPORT jlong JVM_CurrentTimeMillis(JNIEnv* env, jclass clazz ATTRIBUTE_UNUSED) {
-    LOG(DEBUG) << "JVM_CurrentTimeMillis env=" << env;
+JNIEXPORT jlong JVM_CurrentTimeMillis(JNIEnv* env ATTRIBUTE_UNUSED,
+                                      jclass clazz ATTRIBUTE_UNUSED) {
     struct timeval tv;
-
     gettimeofday(&tv, (struct timezone *) NULL);
     jlong when = tv.tv_sec * 1000LL + tv.tv_usec / 1000;
     return when;
 }
 
 JNIEXPORT jint JVM_Socket(jint domain, jint type, jint protocol) {
-    LOG(DEBUG) << "JVM_Socket domain=" << domain << ", type=" << type << ", protocol=" << protocol;
-
     return TEMP_FAILURE_RETRY(socket(domain, type, protocol));
 }
 
@@ -247,21 +229,15 @@
 
 JNIEXPORT jint JVM_SetSockOpt(jint fd, int level, int optname,
     const char* optval, int optlen) {
-  LOG(DEBUG) << "JVM_SetSockOpt fd=" << fd << ", level=" << level << ", optname=" << optname
-             << ", optval=" << optval << ", optlen=" << optlen;
   return TEMP_FAILURE_RETRY(setsockopt(fd, level, optname, optval, optlen));
 }
 
 JNIEXPORT jint JVM_SocketShutdown(jint fd, jint howto) {
-  LOG(DEBUG) << "JVM_SocketShutdown fd=" << fd << ", howto=" << howto;
   return TEMP_FAILURE_RETRY(shutdown(fd, howto));
 }
 
 JNIEXPORT jint JVM_GetSockOpt(jint fd, int level, int optname, char* optval,
   int* optlen) {
-  LOG(DEBUG) << "JVM_GetSockOpt fd=" << fd << ", level=" << level << ", optname=" << optname
-             << ", optval=" << optval << ", optlen=" << optlen;
-
   socklen_t len = *optlen;
   int cc = TEMP_FAILURE_RETRY(getsockopt(fd, level, optname, optval, &len));
   *optlen = len;
@@ -269,8 +245,6 @@
 }
 
 JNIEXPORT jint JVM_GetSockName(jint fd, struct sockaddr* addr, int* addrlen) {
-  LOG(DEBUG) << "JVM_GetSockName fd=" << fd << ", addr=" << addr << ", addrlen=" << addrlen;
-
   socklen_t len = *addrlen;
   int cc = TEMP_FAILURE_RETRY(getsockname(fd, addr, &len));
   *addrlen = len;
@@ -278,10 +252,7 @@
 }
 
 JNIEXPORT jint JVM_SocketAvailable(jint fd, jint* result) {
-  LOG(DEBUG) << "JVM_SocketAvailable fd=" << fd << ", result=" << result;
-
   if (TEMP_FAILURE_RETRY(ioctl(fd, FIONREAD, result)) < 0) {
-      LOG(DEBUG) << "ioctl(" << fd << ", FIONREAD) failed: " << strerror(errno);
       return JNI_FALSE;
   }
 
@@ -289,39 +260,27 @@
 }
 
 JNIEXPORT jint JVM_Send(jint fd, char* buf, jint nBytes, jint flags) {
-  LOG(DEBUG) << "JVM_Send fd=" << fd << ", buf=" << buf << ", nBytes="
-             << nBytes << ", flags=" << flags;
-
   return TEMP_FAILURE_RETRY(send(fd, buf, nBytes, flags));
 }
 
 JNIEXPORT jint JVM_SocketClose(jint fd) {
-  LOG(DEBUG) << "JVM_SocketClose fd=" << fd;
-
-    // don't want TEMP_FAILURE_RETRY here -- file is closed even if EINTR
+  // Don't want TEMP_FAILURE_RETRY here -- file is closed even if EINTR.
   return close(fd);
 }
 
 JNIEXPORT jint JVM_Listen(jint fd, jint count) {
-  LOG(DEBUG) << "JVM_Listen fd=" << fd << ", count=" << count;
-
   return TEMP_FAILURE_RETRY(listen(fd, count));
 }
 
 JNIEXPORT jint JVM_Connect(jint fd, struct sockaddr* addr, jint addrlen) {
-  LOG(DEBUG) << "JVM_Connect fd=" << fd << ", addr=" << addr << ", addrlen=" << addrlen;
-
   return TEMP_FAILURE_RETRY(connect(fd, addr, addrlen));
 }
 
 JNIEXPORT int JVM_GetHostName(char* name, int namelen) {
-  LOG(DEBUG) << "JVM_GetHostName name=" << name << ", namelen=" << namelen;
-
   return TEMP_FAILURE_RETRY(gethostname(name, namelen));
 }
 
 JNIEXPORT jstring JVM_InternString(JNIEnv* env, jstring jstr) {
-  LOG(DEBUG) << "JVM_InternString env=" << env << ", jstr=" << jstr;
   art::ScopedFastNativeObjectAccess soa(env);
   art::mirror::String* s = soa.Decode<art::mirror::String*>(jstr);
   art::mirror::String* result = s->Intern();
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index aa64ee3..2ea4b14 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -277,6 +277,8 @@
           .WithType<ExperimentalFlags>()
           .AppendValues()
           .IntoKey(M::Experimental)
+      .Define("-Xforce-nb-testing")
+          .IntoKey(M::ForceNativeBridge)
       .Ignore({
           "-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
           "-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 6b84c8f..9b10f2e 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -22,6 +22,7 @@
 #include "dex_file-inl.h"
 #include "dex_instruction.h"
 #include "dex_instruction-inl.h"
+#include "dex_instruction_utils.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
 #include "verifier/method_verifier-inl.h"
@@ -33,6 +34,366 @@
 
 namespace art {
 
+namespace {  // anonymous namespace
+
+// Helper class for matching a pattern.
+class Matcher {
+ public:
+  // Match function type.
+  typedef bool MatchFn(Matcher* matcher);
+
+  template <size_t size>
+  static bool Match(const DexFile::CodeItem* code_item, MatchFn* const (&pattern)[size]);
+
+  // Match and advance.
+
+  static bool Mark(Matcher* matcher);
+
+  template <bool (Matcher::*Fn)()>
+  static bool Required(Matcher* matcher);
+
+  template <bool (Matcher::*Fn)()>
+  static bool Repeated(Matcher* matcher);  // On match, returns to the mark.
+
+  // Match an individual instruction.
+
+  template <Instruction::Code opcode> bool Opcode();
+  bool Const0();
+  bool IPutOnThis();
+
+ private:
+  explicit Matcher(const DexFile::CodeItem* code_item)
+      : code_item_(code_item),
+        instruction_(Instruction::At(code_item->insns_)),
+        pos_(0u),
+        mark_(0u) { }
+
+  static bool DoMatch(const DexFile::CodeItem* code_item, MatchFn* const* pattern, size_t size);
+
+  const DexFile::CodeItem* const code_item_;
+  const Instruction* instruction_;
+  size_t pos_;
+  size_t mark_;
+};
+
+template <size_t size>
+bool Matcher::Match(const DexFile::CodeItem* code_item, MatchFn* const (&pattern)[size]) {
+  return DoMatch(code_item, pattern, size);
+}
+
+bool Matcher::Mark(Matcher* matcher) {
+  matcher->pos_ += 1u;  // Advance to the next match function before marking.
+  matcher->mark_ = matcher->pos_;
+  return true;
+}
+
+template <bool (Matcher::*Fn)()>
+bool Matcher::Required(Matcher* matcher) {
+  if (!(matcher->*Fn)()) {
+    return false;
+  }
+  matcher->pos_ += 1u;
+  matcher->instruction_ = matcher->instruction_->Next();
+  return true;
+}
+
+template <bool (Matcher::*Fn)()>
+bool Matcher::Repeated(Matcher* matcher) {
+  if (!(matcher->*Fn)()) {
+    // Didn't match optional instruction, try the next match function.
+    matcher->pos_ += 1u;
+    return true;
+  }
+  matcher->pos_ = matcher->mark_;
+  matcher->instruction_ = matcher->instruction_->Next();
+  return true;
+}
+
+template <Instruction::Code opcode>
+bool Matcher::Opcode() {
+  return instruction_->Opcode() == opcode;
+}
+
+// Match const 0.
+bool Matcher::Const0() {
+  return IsInstructionDirectConst(instruction_->Opcode()) &&
+      (instruction_->Opcode() == Instruction::CONST_WIDE ? instruction_->VRegB_51l() == 0
+                                                         : instruction_->VRegB() == 0);
+}
+
+bool Matcher::IPutOnThis() {
+  DCHECK_NE(code_item_->ins_size_, 0u);
+  return IsInstructionIPut(instruction_->Opcode()) &&
+      instruction_->VRegB_22c() == code_item_->registers_size_ - code_item_->ins_size_;
+}
+
+bool Matcher::DoMatch(const DexFile::CodeItem* code_item, MatchFn* const* pattern, size_t size) {
+  Matcher matcher(code_item);
+  while (matcher.pos_ != size) {
+    if (!pattern[matcher.pos_](&matcher)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+// Used for a single invoke in a constructor. In that situation, the method verifier makes
+// sure we invoke a constructor either in the same class or superclass with at least "this".
+ArtMethod* GetTargetConstructor(ArtMethod* method, const Instruction* invoke_direct)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT);
+  DCHECK_EQ(invoke_direct->VRegC_35c(),
+            method->GetCodeItem()->registers_size_ - method->GetCodeItem()->ins_size_);
+  uint32_t method_index = invoke_direct->VRegB_35c();
+  size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+  ArtMethod* target_method =
+      method->GetDexCache()->GetResolvedMethod(method_index, pointer_size);
+  if (kIsDebugBuild && target_method != nullptr) {
+    CHECK(!target_method->IsStatic());
+    CHECK(target_method->IsConstructor());
+    CHECK(target_method->GetDeclaringClass() == method->GetDeclaringClass() ||
+          target_method->GetDeclaringClass() == method->GetDeclaringClass()->GetSuperClass());
+  }
+  return target_method;
+}
+
+// Return the forwarded arguments and check that all remaining arguments are zero.
+// If the check fails, return static_cast<size_t>(-1).
+size_t CountForwardedConstructorArguments(const DexFile::CodeItem* code_item,
+                                          const Instruction* invoke_direct,
+                                          uint16_t zero_vreg_mask) {
+  DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT);
+  size_t number_of_args = invoke_direct->VRegA_35c();
+  DCHECK_NE(number_of_args, 0u);
+  uint32_t args[Instruction::kMaxVarArgRegs];
+  invoke_direct->GetVarArgs(args);
+  uint16_t this_vreg = args[0];
+  DCHECK_EQ(this_vreg, code_item->registers_size_ - code_item->ins_size_);  // Checked by verifier.
+  size_t forwarded = 1u;
+  while (forwarded < number_of_args &&
+      args[forwarded] == this_vreg + forwarded &&
+      (zero_vreg_mask & (1u << args[forwarded])) == 0) {
+    ++forwarded;
+  }
+  for (size_t i = forwarded; i != number_of_args; ++i) {
+    if ((zero_vreg_mask & (1u << args[i])) == 0) {
+      return static_cast<size_t>(-1);
+    }
+  }
+  return forwarded;
+}
+
+uint16_t GetZeroVRegMask(const Instruction* const0) {
+  DCHECK(IsInstructionDirectConst(const0->Opcode()));
+  DCHECK((const0->Opcode() == Instruction::CONST_WIDE) ? const0->VRegB_51l() == 0u
+                                                       : const0->VRegB() == 0);
+  uint16_t base_mask = IsInstructionConstWide(const0->Opcode()) ? 3u : 1u;
+  return base_mask << const0->VRegA();
+}
+
+// We limit the number of IPUTs storing parameters. There can be any number
+// of IPUTs that store the value 0 as they are useless in a constructor as
+// the object always starts zero-initialized. We also eliminate all but the
+// last store to any field as they are not observable; not even if the field
+// is volatile as no reference to the object can escape from a constructor
+// with this pattern.
+static constexpr size_t kMaxConstructorIPuts = 3u;
+
+struct ConstructorIPutData {
+  ConstructorIPutData() : field_index(DexFile::kDexNoIndex16), arg(0u) { }
+
+  uint16_t field_index;
+  uint16_t arg;
+};
+
+bool RecordConstructorIPut(ArtMethod* method,
+                           const Instruction* new_iput,
+                           uint16_t this_vreg,
+                           uint16_t zero_vreg_mask,
+                           /*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts])
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  DCHECK(IsInstructionIPut(new_iput->Opcode()));
+  uint32_t field_index = new_iput->VRegC_22c();
+  size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+  mirror::DexCache* dex_cache = method->GetDexCache();
+  ArtField* field = dex_cache->GetResolvedField(field_index, pointer_size);
+  if (UNLIKELY(field == nullptr)) {
+    return false;
+  }
+  // Remove previous IPUT to the same field, if any. Different field indexes may refer
+  // to the same field, so we need to compare resolved fields from the dex cache.
+  for (size_t old_pos = 0; old_pos != arraysize(iputs); ++old_pos) {
+    if (iputs[old_pos].field_index == DexFile::kDexNoIndex16) {
+      break;
+    }
+    ArtField* f = dex_cache->GetResolvedField(iputs[old_pos].field_index, pointer_size);
+    DCHECK(f != nullptr);
+    if (f == field) {
+      auto back_it = std::copy(iputs + old_pos + 1, iputs + arraysize(iputs), iputs + old_pos);
+      *back_it = ConstructorIPutData();
+      break;
+    }
+  }
+  // If the stored value isn't zero, record the IPUT.
+  if ((zero_vreg_mask & (1u << new_iput->VRegA_22c())) == 0u) {
+    size_t new_pos = 0;
+    while (new_pos != arraysize(iputs) && iputs[new_pos].field_index != DexFile::kDexNoIndex16) {
+      ++new_pos;
+    }
+    if (new_pos == arraysize(iputs)) {
+      return false;  // Exceeded capacity of the output array.
+    }
+    iputs[new_pos].field_index = field_index;
+    iputs[new_pos].arg = new_iput->VRegA_22c() - this_vreg;
+  }
+  return true;
+}
+
+bool DoAnalyseConstructor(const DexFile::CodeItem* code_item,
+                          ArtMethod* method,
+                          /*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts])
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  // On entry we should not have any IPUTs yet.
+  DCHECK_EQ(0, std::count_if(
+      iputs,
+      iputs + arraysize(iputs),
+      [](const ConstructorIPutData& iput_data) {
+        return iput_data.field_index != DexFile::kDexNoIndex16;
+      }));
+
+  // Limit the maximum number of code units we're willing to match.
+  static constexpr size_t kMaxCodeUnits = 16u;
+
+  // Limit the number of registers that the constructor may use to 16.
+  // Given that IPUTs must use low 16 registers and we do not match MOVEs,
+  // this is a reasonable limitation.
+  static constexpr size_t kMaxVRegs = 16u;
+
+  // We try to match a constructor that calls another constructor (either in
+  // superclass or in the same class) with the same parameters, or with some
+  // parameters truncated (allowed only for calls to superclass constructor)
+  // or with extra parameters with value 0 (with any type, including null).
+  // This call can be followed by optional IPUTs on "this" storing either one
+  // of the parameters or 0 and the code must then finish with RETURN_VOID.
+  // The called constructor must be either java.lang.Object.<init>() or it
+  // must also match the same pattern.
+  static Matcher::MatchFn* const kConstructorPattern[] = {
+      &Matcher::Mark,
+      &Matcher::Repeated<&Matcher::Const0>,
+      &Matcher::Required<&Matcher::Opcode<Instruction::INVOKE_DIRECT>>,
+      &Matcher::Mark,
+      &Matcher::Repeated<&Matcher::Const0>,
+      &Matcher::Repeated<&Matcher::IPutOnThis>,
+      &Matcher::Required<&Matcher::Opcode<Instruction::RETURN_VOID>>,
+  };
+
+  DCHECK(method != nullptr);
+  DCHECK(!method->IsStatic());
+  DCHECK(method->IsConstructor());
+  DCHECK(code_item != nullptr);
+  if (!method->GetDeclaringClass()->IsVerified() ||
+      code_item->insns_size_in_code_units_ > kMaxCodeUnits ||
+      code_item->registers_size_ > kMaxVRegs ||
+      !Matcher::Match(code_item, kConstructorPattern)) {
+    return false;
+  }
+
+  // Verify the invoke, prevent a few odd cases and collect IPUTs.
+  uint16_t this_vreg = code_item->registers_size_ - code_item->ins_size_;
+  uint16_t zero_vreg_mask = 0u;
+  for (const Instruction* instruction = Instruction::At(code_item->insns_);
+      instruction->Opcode() != Instruction::RETURN_VOID;
+      instruction = instruction->Next()) {
+    if (instruction->Opcode() == Instruction::INVOKE_DIRECT) {
+      ArtMethod* target_method = GetTargetConstructor(method, instruction);
+      if (target_method == nullptr) {
+        return false;
+      }
+      // We allow forwarding constructors only if they pass more arguments
+      // to prevent infinite recursion.
+      if (target_method->GetDeclaringClass() == method->GetDeclaringClass() &&
+          instruction->VRegA_35c() <= code_item->ins_size_) {
+        return false;
+      }
+      size_t forwarded = CountForwardedConstructorArguments(code_item, instruction, zero_vreg_mask);
+      if (forwarded == static_cast<size_t>(-1)) {
+        return false;
+      }
+      if (target_method->GetDeclaringClass()->IsObjectClass()) {
+        DCHECK_EQ(Instruction::At(target_method->GetCodeItem()->insns_)->Opcode(),
+                  Instruction::RETURN_VOID);
+      } else {
+        const DexFile::CodeItem* target_code_item = target_method->GetCodeItem();
+        if (target_code_item == nullptr) {
+          return false;  // Native constructor?
+        }
+        if (!DoAnalyseConstructor(target_code_item, target_method, iputs)) {
+          return false;
+        }
+        // Prune IPUTs with zero input.
+        auto kept_end = std::remove_if(
+            iputs,
+            iputs + arraysize(iputs),
+            [forwarded](const ConstructorIPutData& iput_data) {
+              return iput_data.arg >= forwarded;
+            });
+        std::fill(kept_end, iputs + arraysize(iputs), ConstructorIPutData());
+        // If we have any IPUTs from the call, check that the target method is in the same
+        // dex file (compare DexCache references), otherwise field_indexes would be bogus.
+        if (iputs[0].field_index != DexFile::kDexNoIndex16 &&
+            target_method->GetDexCache() != method->GetDexCache()) {
+          return false;
+        }
+      }
+    } else if (IsInstructionDirectConst(instruction->Opcode())) {
+      zero_vreg_mask |= GetZeroVRegMask(instruction);
+      if ((zero_vreg_mask & (1u << this_vreg)) != 0u) {
+        return false;  // Overwriting `this` is unsupported.
+      }
+    } else {
+      DCHECK(IsInstructionIPut(instruction->Opcode()));
+      DCHECK_EQ(instruction->VRegB_22c(), this_vreg);
+      if (!RecordConstructorIPut(method, instruction, this_vreg, zero_vreg_mask, iputs)) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+}  // anonymous namespace
+
+bool AnalyseConstructor(const DexFile::CodeItem* code_item,
+                        ArtMethod* method,
+                        InlineMethod* result)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  ConstructorIPutData iputs[kMaxConstructorIPuts];
+  if (!DoAnalyseConstructor(code_item, method, iputs)) {
+    return false;
+  }
+  static_assert(kMaxConstructorIPuts == 3, "Unexpected limit");  // Code below depends on this.
+  DCHECK(iputs[0].field_index != DexFile::kDexNoIndex16 ||
+         iputs[1].field_index == DexFile::kDexNoIndex16);
+  DCHECK(iputs[1].field_index != DexFile::kDexNoIndex16 ||
+         iputs[2].field_index == DexFile::kDexNoIndex16);
+
+#define STORE_IPUT(n)                                                         \
+  do {                                                                        \
+    result->d.constructor_data.iput##n##_field_index = iputs[n].field_index;  \
+    result->d.constructor_data.iput##n##_arg = iputs[n].arg;                  \
+  } while (false)
+
+  STORE_IPUT(0);
+  STORE_IPUT(1);
+  STORE_IPUT(2);
+#undef STORE_IPUT
+
+  result->opcode = kInlineOpConstructor;
+  result->flags = kInlineSpecial;
+  result->d.constructor_data.reserved = 0u;
+  return true;
+}
+
 static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET), "iget type");
 static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE), "iget_wide type");
 static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT),
@@ -123,7 +484,19 @@
     case Instruction::CONST_16:
     case Instruction::CONST_HIGH16:
       // TODO: Support wide constants (RETURN_WIDE).
-      return AnalyseConstMethod(code_item, result);
+      if (AnalyseConstMethod(code_item, result)) {
+        return true;
+      }
+      FALLTHROUGH_INTENDED;
+    case Instruction::CONST_WIDE:
+    case Instruction::CONST_WIDE_16:
+    case Instruction::CONST_WIDE_32:
+    case Instruction::CONST_WIDE_HIGH16:
+    case Instruction::INVOKE_DIRECT:
+      if (method != nullptr && !method->IsStatic() && method->IsConstructor()) {
+        return AnalyseConstructor(code_item, method, result);
+      }
+      return false;
     case Instruction::IGET:
     case Instruction::IGET_OBJECT:
     case Instruction::IGET_BOOLEAN:
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 046d225..0b09a70 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -107,6 +107,7 @@
   kInlineOpNonWideConst,
   kInlineOpIGet,
   kInlineOpIPut,
+  kInlineOpConstructor,
   kInlineStringInit,
 };
 std::ostream& operator<<(std::ostream& os, const InlineMethodOpcode& rhs);
@@ -168,6 +169,19 @@
 static_assert(sizeof(InlineReturnArgData) == sizeof(uint64_t),
               "Invalid size of InlineReturnArgData");
 
+struct InlineConstructorData {
+  // There can be up to 3 IPUTs, unused fields are marked with kNoDexIndex16.
+  uint16_t iput0_field_index;
+  uint16_t iput1_field_index;
+  uint16_t iput2_field_index;
+  uint16_t iput0_arg : 4;
+  uint16_t iput1_arg : 4;
+  uint16_t iput2_arg : 4;
+  uint16_t reserved : 4;
+};
+static_assert(sizeof(InlineConstructorData) == sizeof(uint64_t),
+              "Invalid size of InlineConstructorData");
+
 struct InlineMethod {
   InlineMethodOpcode opcode;
   InlineMethodFlags flags;
@@ -175,6 +189,7 @@
     uint64_t data;
     InlineIGetIPutData ifield_data;
     InlineReturnArgData return_data;
+    InlineConstructorData constructor_data;
   } d;
 };
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 0c06ca6..1b59c6f 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -602,9 +602,12 @@
     if (is_native_bridge_loaded_) {
       PreInitializeNativeBridge(".");
     }
+    NativeBridgeAction action = force_native_bridge_
+        ? NativeBridgeAction::kInitialize
+        : NativeBridgeAction::kUnload;
     InitNonZygoteOrPostFork(self->GetJniEnv(),
                             /* is_system_server */ false,
-                            NativeBridgeAction::kInitialize,
+                            action,
                             GetInstructionSetString(kRuntimeISA));
   }
 
@@ -939,6 +942,7 @@
   allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
 
   no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
+  force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
 
   Split(runtime_options.GetOrDefault(Opt::CpuAbiList), ',', &cpu_abilist_);
 
@@ -1883,7 +1887,8 @@
   if (jit_.get() != nullptr) {
     compiler_callbacks_ = jit_->GetCompilerCallbacks();
     jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold(),
-                                     jit_options_->GetWarmupThreshold());
+                                     jit_options_->GetWarmupThreshold(),
+                                     jit_options_->GetOsrThreshold());
     jit_->CreateThreadPool();
 
     // Notify native debugger about the classes already loaded before the creation of the jit.
@@ -1914,7 +1919,8 @@
 }
 
 bool Runtime::IsVerificationEnabled() const {
-  return verify_ == verifier::VerifyMode::kEnable;
+  return verify_ == verifier::VerifyMode::kEnable ||
+      verify_ == verifier::VerifyMode::kSoftFail;
 }
 
 bool Runtime::IsVerificationSoftFail() const {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index c8c2ee5..bec26f8 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -774,6 +774,9 @@
   // building a statically link version of dex2oat.
   bool no_sig_chain_;
 
+  // Force the use of native bridge even if the app ISA matches the runtime ISA.
+  bool force_native_bridge_;
+
   // Whether or not a native bridge has been loaded.
   //
   // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 122dcb1..8237b06 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -114,6 +114,9 @@
       switch (signal_code) {
         case SEGV_MAPERR: return "SEGV_MAPERR";
         case SEGV_ACCERR: return "SEGV_ACCERR";
+#if defined(SEGV_BNDERR)
+        case SEGV_BNDERR: return "SEGV_BNDERR";
+#endif
       }
       break;
     case SIGTRAP:
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 308f3ba..097bccb 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -92,6 +92,7 @@
 
 RUNTIME_OPTIONS_KEY (Unit,                DisableExplicitGC)
 RUNTIME_OPTIONS_KEY (Unit,                NoSigChain)
+RUNTIME_OPTIONS_KEY (Unit,                ForceNativeBridge)
 RUNTIME_OPTIONS_KEY (LogVerbosity,        Verbose)
 RUNTIME_OPTIONS_KEY (unsigned int,        LockProfThreshold)
 RUNTIME_OPTIONS_KEY (std::string,         StackTraceFile)
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 84185ce..97eb805 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -1195,6 +1195,35 @@
     return StackMap();
   }
 
+  StackMap GetOsrStackMapForDexPc(uint32_t dex_pc, const StackMapEncoding& encoding) const {
+    size_t e = GetNumberOfStackMaps();
+    if (e == 0) {
+      // There cannot be OSR stack map if there is no stack map.
+      return StackMap();
+    }
+    // Walk over all stack maps. If two consecutive stack maps are identical, then we
+    // have found a stack map suitable for OSR.
+    for (size_t i = 0; i < e - 1; ++i) {
+      StackMap stack_map = GetStackMapAt(i, encoding);
+      if (stack_map.GetDexPc(encoding) == dex_pc) {
+        StackMap other = GetStackMapAt(i + 1, encoding);
+        if (other.GetDexPc(encoding) == dex_pc &&
+            other.GetNativePcOffset(encoding) == stack_map.GetNativePcOffset(encoding)) {
+          DCHECK_EQ(other.GetDexRegisterMapOffset(encoding),
+                    stack_map.GetDexRegisterMapOffset(encoding));
+          DCHECK(!stack_map.HasInlineInfo(encoding));
+          if (i < e - 2) {
+            // Make sure there are not three identical stack maps following each other.
+            DCHECK_NE(stack_map.GetNativePcOffset(encoding),
+                      GetStackMapAt(i + 2, encoding).GetNativePcOffset(encoding));
+          }
+          return stack_map;
+        }
+      }
+    }
+    return StackMap();
+  }
+
   StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset,
                                         const StackMapEncoding& encoding) const {
     // TODO: Safepoint stack maps are sorted by native_pc_offset but catch stack
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2abcd67..7a45594 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1599,7 +1599,7 @@
   tls32_.state_and_flags.as_struct.state = kNative;
   memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
   std::fill(tlsPtr_.rosalloc_runs,
-            tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBrackets,
+            tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
             gc::allocator::RosAlloc::GetDedicatedFullRun());
   for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
     tlsPtr_.checkpoint_functions[i] = nullptr;
@@ -3012,4 +3012,25 @@
   return count;
 }
 
+
+void Thread::DeoptimizeWithDeoptimizationException(JValue* result) {
+  DCHECK_EQ(GetException(), Thread::GetDeoptimizationException());
+  ClearException();
+  ShadowFrame* shadow_frame =
+      PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame);
+  mirror::Throwable* pending_exception = nullptr;
+  bool from_code = false;
+  PopDeoptimizationContext(result, &pending_exception, &from_code);
+  CHECK(!from_code) << "Deoptimizing from code should be done with single frame deoptimization";
+  SetTopOfStack(nullptr);
+  SetTopOfShadowStack(shadow_frame);
+
+  // Restore the exception that was pending before deoptimization then interpret the
+  // deoptimized frames.
+  if (pending_exception != nullptr) {
+    SetException(pending_exception);
+  }
+  interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, from_code, result);
+}
+
 }  // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index d7887ca..3a5d72e 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -113,7 +113,8 @@
   kSingleFrameDeoptimizationShadowFrame
 };
 
-static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
+// This should match RosAlloc::kNumThreadLocalSizeBrackets.
+static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
 
 // Thread's stack layout for implicit stack overflow checks:
 //
@@ -552,6 +553,9 @@
         OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
   }
 
+  // Deoptimize the Java stack.
+  void DeoptimizeWithDeoptimizationException(JValue* result) SHARED_REQUIRES(Locks::mutator_lock_);
+
  private:
   template<size_t pointer_size>
   static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
@@ -1421,7 +1425,7 @@
     void* mterp_alt_ibase;
 
     // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
-    void* rosalloc_runs[kNumRosAllocThreadLocalSizeBrackets];
+    void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
 
     // Thread-local allocation stack data/routines.
     StackReference<mirror::Object>* thread_local_alloc_stack_top;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 56154c6..c7ac172 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -58,6 +58,10 @@
 // On VLOG(verifier), should we dump the whole state when we run into a hard failure?
 static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true;
 
+// We print a warning blurb about "dx --no-optimize" when we find monitor-locking issues. Make
+// sure we only print this once.
+static bool gPrintedDxMonitorText = false;
+
 PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& arena)
     : register_lines_(arena.Adapter(kArenaAllocVerifier)) {}
 
@@ -166,23 +170,38 @@
   return kDirect ? it->HasNextDirectMethod() : it->HasNextVirtualMethod();
 }
 
+static MethodVerifier::FailureKind FailureKindMax(MethodVerifier::FailureKind fk1,
+                                                  MethodVerifier::FailureKind fk2) {
+  static_assert(MethodVerifier::FailureKind::kNoFailure <
+                    MethodVerifier::FailureKind::kSoftFailure
+                && MethodVerifier::FailureKind::kSoftFailure <
+                       MethodVerifier::FailureKind::kHardFailure,
+                "Unexpected FailureKind order");
+  return std::max(fk1, fk2);
+}
+
+void MethodVerifier::FailureData::Merge(const MethodVerifier::FailureData& fd) {
+  kind = FailureKindMax(kind, fd.kind);
+  types |= fd.types;
+}
+
 template <bool kDirect>
-void MethodVerifier::VerifyMethods(Thread* self,
-                                   ClassLinker* linker,
-                                   const DexFile* dex_file,
-                                   const DexFile::ClassDef* class_def,
-                                   ClassDataItemIterator* it,
-                                   Handle<mirror::DexCache> dex_cache,
-                                   Handle<mirror::ClassLoader> class_loader,
-                                   CompilerCallbacks* callbacks,
-                                   bool allow_soft_failures,
-                                   bool log_hard_failures,
-                                   bool need_precise_constants,
-                                   bool* hard_fail,
-                                   size_t* error_count,
-                                   std::string* error_string) {
+MethodVerifier::FailureData MethodVerifier::VerifyMethods(Thread* self,
+                                                          ClassLinker* linker,
+                                                          const DexFile* dex_file,
+                                                          const DexFile::ClassDef* class_def,
+                                                          ClassDataItemIterator* it,
+                                                          Handle<mirror::DexCache> dex_cache,
+                                                          Handle<mirror::ClassLoader> class_loader,
+                                                          CompilerCallbacks* callbacks,
+                                                          bool allow_soft_failures,
+                                                          bool log_hard_failures,
+                                                          bool need_precise_constants,
+                                                          std::string* error_string) {
   DCHECK(it != nullptr);
 
+  MethodVerifier::FailureData failure_data;
+
   int64_t previous_method_idx = -1;
   while (HasNextMethod<kDirect>(it)) {
     self->AllowThreadSuspension();
@@ -206,7 +225,7 @@
     }
     StackHandleScope<1> hs(self);
     std::string hard_failure_msg;
-    MethodVerifier::FailureKind result = VerifyMethod(self,
+    MethodVerifier::FailureData result = VerifyMethod(self,
                                                       method_idx,
                                                       dex_file,
                                                       dex_cache,
@@ -220,24 +239,24 @@
                                                       log_hard_failures,
                                                       need_precise_constants,
                                                       &hard_failure_msg);
-    if (result != kNoFailure) {
-      if (result == kHardFailure) {
-        if (*error_count > 0) {
-          *error_string += "\n";
-        }
-        if (!*hard_fail) {
-          *error_string += "Verifier rejected class ";
-          *error_string += PrettyDescriptor(dex_file->GetClassDescriptor(*class_def));
-          *error_string += ":";
-        }
-        *error_string += " ";
-        *error_string += hard_failure_msg;
-        *hard_fail = true;
+    if (result.kind == kHardFailure) {
+      if (failure_data.kind == kHardFailure) {
+        // If we logged an error before, we need a newline.
+        *error_string += "\n";
+      } else {
+        // If we didn't log a hard failure before, print the header of the message.
+        *error_string += "Verifier rejected class ";
+        *error_string += PrettyDescriptor(dex_file->GetClassDescriptor(*class_def));
+        *error_string += ":";
       }
-      *error_count = *error_count + 1;
+      *error_string += " ";
+      *error_string += hard_failure_msg;
     }
+    failure_data.Merge(result);
     it->Next();
   }
+
+  return failure_data;
 }
 
 MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
@@ -268,44 +287,53 @@
   while (it.HasNextStaticField() || it.HasNextInstanceField()) {
     it.Next();
   }
-  size_t error_count = 0;
-  bool hard_fail = false;
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
   // Direct methods.
-  VerifyMethods<true>(self,
-                      linker,
-                      dex_file,
-                      class_def,
-                      &it,
-                      dex_cache,
-                      class_loader,
-                      callbacks,
-                      allow_soft_failures,
-                      log_hard_failures,
-                      false /* need precise constants */,
-                      &hard_fail,
-                      &error_count,
-                      error);
+  MethodVerifier::FailureData data1 = VerifyMethods<true>(self,
+                                                          linker,
+                                                          dex_file,
+                                                          class_def,
+                                                          &it,
+                                                          dex_cache,
+                                                          class_loader,
+                                                          callbacks,
+                                                          allow_soft_failures,
+                                                          log_hard_failures,
+                                                          false /* need precise constants */,
+                                                          error);
   // Virtual methods.
-  VerifyMethods<false>(self,
-                      linker,
-                      dex_file,
-                      class_def,
-                      &it,
-                      dex_cache,
-                      class_loader,
-                      callbacks,
-                      allow_soft_failures,
-                      log_hard_failures,
-                      false /* need precise constants */,
-                      &hard_fail,
-                      &error_count,
-                      error);
+  MethodVerifier::FailureData data2 = VerifyMethods<false>(self,
+                                                           linker,
+                                                           dex_file,
+                                                           class_def,
+                                                           &it,
+                                                           dex_cache,
+                                                           class_loader,
+                                                           callbacks,
+                                                           allow_soft_failures,
+                                                           log_hard_failures,
+                                                           false /* need precise constants */,
+                                                           error);
 
-  if (error_count == 0) {
+  data1.Merge(data2);
+
+  if (data1.kind == kNoFailure) {
     return kNoFailure;
   } else {
-    return hard_fail ? kHardFailure : kSoftFailure;
+    if ((data1.types & VERIFY_ERROR_LOCKING) != 0) {
+      // Print a warning about expected slow-down. Use a string temporary to print one contiguous
+      // warning.
+      std::string tmp =
+          StringPrintf("Class %s failed lock verification and will run slower.",
+                       PrettyDescriptor(dex_file->GetClassDescriptor(*class_def)).c_str());
+      if (!gPrintedDxMonitorText) {
+        tmp = tmp + "\nCommon causes for lock verification issues are non-optimized dex code\n"
+                    "and incorrect proguard optimizations.";
+        gPrintedDxMonitorText = true;
+      }
+      LOG(WARNING) << tmp;
+    }
+    return data1.kind;
   }
 }
 
@@ -320,7 +348,7 @@
   return registers_size * insns_size > 4*1024*1024;
 }
 
-MethodVerifier::FailureKind MethodVerifier::VerifyMethod(Thread* self,
+MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self,
                                                          uint32_t method_idx,
                                                          const DexFile* dex_file,
                                                          Handle<mirror::DexCache> dex_cache,
@@ -334,7 +362,7 @@
                                                          bool log_hard_failures,
                                                          bool need_precise_constants,
                                                          std::string* hard_failure_msg) {
-  MethodVerifier::FailureKind result = kNoFailure;
+  MethodVerifier::FailureData result;
   uint64_t start_ns = kTimeVerifyMethod ? NanoTime() : 0;
 
   MethodVerifier verifier(self, dex_file, dex_cache, class_loader, class_def, code_item,
@@ -355,7 +383,7 @@
         verifier.DumpFailures(VLOG_STREAM(verifier) << "Soft verification failures in "
                                                     << PrettyMethod(method_idx, *dex_file) << "\n");
       }
-      result = kSoftFailure;
+      result.kind = kSoftFailure;
     }
   } else {
     // Bad method data.
@@ -364,7 +392,7 @@
     if (UNLIKELY(verifier.have_pending_experimental_failure_)) {
       // Failed due to being forced into interpreter. This is ok because
       // we just want to skip verification.
-      result = kSoftFailure;
+      result.kind = kSoftFailure;
     } else {
       CHECK(verifier.have_pending_hard_failure_);
       if (VLOG_IS_ON(verifier) || log_hard_failures) {
@@ -376,7 +404,7 @@
         *hard_failure_msg =
             verifier.failure_messages_[verifier.failure_messages_.size() - 1]->str();
       }
-      result = kHardFailure;
+      result.kind = kHardFailure;
 
       if (callbacks != nullptr) {
         // Let the interested party know that we failed the class.
@@ -397,6 +425,7 @@
                    << (IsLargeMethod(code_item) ? " (large method)" : "");
     }
   }
+  result.types = verifier.encountered_failure_types_;
   return result;
 }
 
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 613d5af..c7d1e6b 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -25,6 +25,7 @@
 #include "base/macros.h"
 #include "base/scoped_arena_containers.h"
 #include "base/stl_util.h"
+#include "base/value_object.h"
 #include "dex_file.h"
 #include "handle.h"
 #include "instruction_flags.h"
@@ -344,23 +345,31 @@
   // Adds the given string to the end of the last failure message.
   void AppendToLastFailMessage(std::string);
 
+  // Verification result for method(s). Includes a (maximum) failure kind, and (the union of)
+  // all failure types.
+  struct FailureData : ValueObject {
+    FailureKind kind = kNoFailure;
+    uint32_t types = 0U;
+
+    // Merge src into this. Uses the most severe failure kind, and the union of types.
+    void Merge(const FailureData& src);
+  };
+
   // Verify all direct or virtual methods of a class. The method assumes that the iterator is
   // positioned correctly, and the iterator will be updated.
   template <bool kDirect>
-  static void VerifyMethods(Thread* self,
-                            ClassLinker* linker,
-                            const DexFile* dex_file,
-                            const DexFile::ClassDef* class_def,
-                            ClassDataItemIterator* it,
-                            Handle<mirror::DexCache> dex_cache,
-                            Handle<mirror::ClassLoader> class_loader,
-                            CompilerCallbacks* callbacks,
-                            bool allow_soft_failures,
-                            bool log_hard_failures,
-                            bool need_precise_constants,
-                            bool* hard_fail,
-                            size_t* error_count,
-                            std::string* error_string)
+  static FailureData VerifyMethods(Thread* self,
+                                   ClassLinker* linker,
+                                   const DexFile* dex_file,
+                                   const DexFile::ClassDef* class_def,
+                                   ClassDataItemIterator* it,
+                                   Handle<mirror::DexCache> dex_cache,
+                                   Handle<mirror::ClassLoader> class_loader,
+                                   CompilerCallbacks* callbacks,
+                                   bool allow_soft_failures,
+                                   bool log_hard_failures,
+                                   bool need_precise_constants,
+                                   std::string* error_string)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   /*
@@ -374,7 +383,7 @@
    *  (3) Iterate through the method, checking type safety and looking
    *      for code flow problems.
    */
-  static FailureKind VerifyMethod(Thread* self, uint32_t method_idx,
+  static FailureData VerifyMethod(Thread* self, uint32_t method_idx,
                                   const DexFile* dex_file,
                                   Handle<mirror::DexCache> dex_cache,
                                   Handle<mirror::ClassLoader> class_loader,
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 08f85b3..330c06a 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -178,9 +178,9 @@
   if (MonitorStackDepth() != 0) {
     verifier->Fail(VERIFY_ERROR_LOCKING);
     if (kDumpLockFailures) {
-      LOG(WARNING) << "expected empty monitor stack in "
-                   << PrettyMethod(verifier->GetMethodReference().dex_method_index,
-                                   *verifier->GetMethodReference().dex_file);
+      VLOG(verifier) << "expected empty monitor stack in "
+                     << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+                                     *verifier->GetMethodReference().dex_file);
     }
   }
 }
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 37343b5..b7cde99 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -348,9 +348,9 @@
   } else if (monitors_.size() >= 32) {
     verifier->Fail(VERIFY_ERROR_LOCKING);
     if (kDumpLockFailures) {
-      LOG(WARNING) << "monitor-enter stack overflow while verifying "
-                   << PrettyMethod(verifier->GetMethodReference().dex_method_index,
-                                   *verifier->GetMethodReference().dex_file);
+      VLOG(verifier) << "monitor-enter stack overflow while verifying "
+                     << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+                                     *verifier->GetMethodReference().dex_file);
     }
   } else {
     if (SetRegToLockDepth(reg_idx, monitors_.size())) {
@@ -364,9 +364,9 @@
     } else {
       verifier->Fail(VERIFY_ERROR_LOCKING);
       if (kDumpLockFailures) {
-        LOG(WARNING) << "unexpected monitor-enter on register v" <<  reg_idx << " in "
-                     << PrettyMethod(verifier->GetMethodReference().dex_method_index,
-                                     *verifier->GetMethodReference().dex_file);
+        VLOG(verifier) << "unexpected monitor-enter on register v" <<  reg_idx << " in "
+                       << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+                                       *verifier->GetMethodReference().dex_file);
       }
     }
   }
@@ -379,9 +379,9 @@
   } else if (monitors_.empty()) {
     verifier->Fail(VERIFY_ERROR_LOCKING);
     if (kDumpLockFailures) {
-      LOG(WARNING) << "monitor-exit stack underflow while verifying "
-                   << PrettyMethod(verifier->GetMethodReference().dex_method_index,
-                                   *verifier->GetMethodReference().dex_file);
+      VLOG(verifier) << "monitor-exit stack underflow while verifying "
+                     << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+                                     *verifier->GetMethodReference().dex_file);
     }
   } else {
     monitors_.pop_back();
@@ -400,9 +400,9 @@
     if (!success) {
       verifier->Fail(VERIFY_ERROR_LOCKING);
       if (kDumpLockFailures) {
-        LOG(WARNING) << "monitor-exit not unlocking the top of the monitor stack while verifying "
-                     << PrettyMethod(verifier->GetMethodReference().dex_method_index,
-                                     *verifier->GetMethodReference().dex_file);
+        VLOG(verifier) << "monitor-exit not unlocking the top of the monitor stack while verifying "
+                       << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+                                       *verifier->GetMethodReference().dex_file);
       }
     } else {
       // Record the register was unlocked. This clears all aliases, thus it will also clear the
@@ -453,10 +453,10 @@
     if (monitors_.size() != incoming_line->monitors_.size()) {
       verifier->Fail(VERIFY_ERROR_LOCKING);
       if (kDumpLockFailures) {
-        LOG(WARNING) << "mismatched stack depths (depth=" << MonitorStackDepth()
-                     << ", incoming depth=" << incoming_line->MonitorStackDepth() << ") in "
-                     << PrettyMethod(verifier->GetMethodReference().dex_method_index,
-                                     *verifier->GetMethodReference().dex_file);
+        VLOG(verifier) << "mismatched stack depths (depth=" << MonitorStackDepth()
+                       << ", incoming depth=" << incoming_line->MonitorStackDepth() << ") in "
+                       << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+                                       *verifier->GetMethodReference().dex_file);
       }
     } else if (reg_to_lock_depths_ != incoming_line->reg_to_lock_depths_) {
       for (uint32_t idx = 0; idx < num_regs_; idx++) {
@@ -488,10 +488,10 @@
                                        reg_to_lock_depths_)) {
             verifier->Fail(VERIFY_ERROR_LOCKING);
             if (kDumpLockFailures) {
-              LOG(WARNING) << "mismatched stack depths for register v" << idx
-                           << ": " << depths  << " != " << incoming_depths << " in "
-                           << PrettyMethod(verifier->GetMethodReference().dex_method_index,
-                                           *verifier->GetMethodReference().dex_file);
+              VLOG(verifier) << "mismatched stack depths for register v" << idx
+                             << ": " << depths  << " != " << incoming_depths << " in "
+                             << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+                                             *verifier->GetMethodReference().dex_file);
             }
             break;
           }
@@ -530,11 +530,11 @@
               // No aliases for both current and incoming, we'll lose information.
               verifier->Fail(VERIFY_ERROR_LOCKING);
               if (kDumpLockFailures) {
-                LOG(WARNING) << "mismatched lock levels for register v" << idx << ": "
-                    << std::hex << locked_levels << std::dec  << " != "
-                    << std::hex << incoming_locked_levels << std::dec << " in "
-                    << PrettyMethod(verifier->GetMethodReference().dex_method_index,
-                                    *verifier->GetMethodReference().dex_file);
+                VLOG(verifier) << "mismatched lock levels for register v" << idx << ": "
+                               << std::hex << locked_levels << std::dec  << " != "
+                               << std::hex << incoming_locked_levels << std::dec << " in "
+                               << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+                                               *verifier->GetMethodReference().dex_file);
               }
               break;
             }
diff --git a/test/048-reflect-v8/expected.txt b/test/048-reflect-v8/expected.txt
index 3109ecc..54aede9 100644
--- a/test/048-reflect-v8/expected.txt
+++ b/test/048-reflect-v8/expected.txt
@@ -6,6 +6,15 @@
 IsDefaultTest$ImplementsWithDefault is default = yes
 IsDefaultTest$ImplementsWithRegular is default = no
 ==============================
+Are These Methods found by getDeclaredMethod:
+==============================
+No error thrown for class interface DefaultDeclared$DefaultInterface
+No error thrown for class interface DefaultDeclared$RegularInterface
+NoSuchMethodException thrown for class class DefaultDeclared$ImplementsWithDefault
+No error thrown for class class DefaultDeclared$ImplementsWithDeclared
+No error thrown for class class DefaultDeclared$ImplementsWithRegular
+NoSuchMethodException thrown for class class DefaultDeclared$UnimplementedWithRegular
+==============================
 Class annotations by type:
 ==============================
 Annotations by type, defined by class SingleUser with annotation Calendar: @Calendar(dayOfMonth=unspecified_month, dayOfWeek=single, hour=23)
diff --git a/test/048-reflect-v8/src/DefaultDeclared.java b/test/048-reflect-v8/src/DefaultDeclared.java
new file mode 100644
index 0000000..16e8a24
--- /dev/null
+++ b/test/048-reflect-v8/src/DefaultDeclared.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class DefaultDeclared {
+  interface DefaultInterface {
+    default void sayHi() {
+      System.out.println("hi default");
+    }
+  }
+
+  interface RegularInterface {
+    void sayHi();
+  }
+
+  class ImplementsWithDefault implements DefaultInterface {}
+
+  class ImplementsWithDeclared implements DefaultInterface {
+    public void sayHi() {
+      System.out.println("hello specific from default");
+    }
+  }
+
+  abstract class UnimplementedWithRegular implements RegularInterface { }
+
+  class ImplementsWithRegular implements RegularInterface {
+    public void sayHi() {
+      System.out.println("hello specific");
+    }
+  }
+
+  private static void printGetMethod(Class<?> klass) {
+    Method m;
+    try {
+      m = klass.getDeclaredMethod("sayHi");
+      System.out.println("No error thrown for class " + klass.toString());
+    } catch (NoSuchMethodException e) {
+      System.out.println("NoSuchMethodException thrown for class " + klass.toString());
+    } catch (Throwable t) {
+      System.out.println("Unknown error thrown for class " + klass.toString());
+      t.printStackTrace();
+    }
+  }
+
+  public static void test() {
+    System.out.println("==============================");
+    System.out.println("Are These Methods found by getDeclaredMethod:");
+    System.out.println("==============================");
+
+    printGetMethod(DefaultInterface.class);
+    printGetMethod(RegularInterface.class);
+    printGetMethod(ImplementsWithDefault.class);
+    printGetMethod(ImplementsWithDeclared.class);
+    printGetMethod(ImplementsWithRegular.class);
+    printGetMethod(UnimplementedWithRegular.class);
+  }
+}
diff --git a/test/048-reflect-v8/src/Main.java b/test/048-reflect-v8/src/Main.java
index f2b8287..b270e68 100644
--- a/test/048-reflect-v8/src/Main.java
+++ b/test/048-reflect-v8/src/Main.java
@@ -17,6 +17,7 @@
 public class Main {
   public static void main(String[] args) {
     IsDefaultTest.test();
+    DefaultDeclared.test();
     AnnotationTest.testAnnotationsByType();
     AnnotationTest.testDeclaredAnnotation();
     AnnotationTest.testDeclaredAnnotationsByType();
diff --git a/test/115-native-bridge/run b/test/115-native-bridge/run
index ea2045b..aeb5721 100644
--- a/test/115-native-bridge/run
+++ b/test/115-native-bridge/run
@@ -28,4 +28,4 @@
 LEFT=$(echo ${ARGS} | sed -r 's/-Djava.library.path.*//')
 RIGHT=$(echo ${ARGS} | sed -r 's/.*Djava.library.path[^ ]* //')
 MODARGS="${LEFT} -Djava.library.path=`pwd` ${RIGHT}"
-exec ${RUN} --runtime-option -XX:NativeBridge=libnativebridgetest.so ${MODARGS} NativeBridgeMain
+exec ${RUN} --runtime-option -Xforce-nb-testing --runtime-option -XX:NativeBridge=libnativebridgetest.so ${MODARGS} NativeBridgeMain
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index 3c8abeb..2f80470 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -1404,7 +1404,7 @@
   /// CHECK-START: int Main.floatConditionNotEqualOne(float) ssa_builder (after)
   /// CHECK:                            LessThanOrEqual
 
-  /// CHECK-START: int Main.floatConditionNotEqualOne(float) register (before)
+  /// CHECK-START: int Main.floatConditionNotEqualOne(float) instruction_simplifier_before_codegen (after)
   /// CHECK-DAG:      <<Arg:f\d+>>      ParameterValue
   /// CHECK-DAG:      <<Const13:i\d+>>  IntConstant 13
   /// CHECK-DAG:      <<Const54:i\d+>>  IntConstant 54
@@ -1420,7 +1420,7 @@
   /// CHECK-START: int Main.doubleConditionEqualZero(double) ssa_builder (after)
   /// CHECK:                            LessThanOrEqual
 
-  /// CHECK-START: int Main.doubleConditionEqualZero(double) register (before)
+  /// CHECK-START: int Main.doubleConditionEqualZero(double) instruction_simplifier_before_codegen (after)
   /// CHECK-DAG:      <<Arg:d\d+>>      ParameterValue
   /// CHECK-DAG:      <<Const13:i\d+>>  IntConstant 13
   /// CHECK-DAG:      <<Const54:i\d+>>  IntConstant 54
diff --git a/test/555-checker-regression-x86const/build b/test/555-checker-regression-x86const/build
new file mode 100644
index 0000000..09dcc36
--- /dev/null
+++ b/test/555-checker-regression-x86const/build
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# We can't use src-ex testing infrastructure because src and src-ex are compiled
+# with javac independetely and can't share code (without reflection).
+
+mkdir classes
+${JAVAC} -d classes `find src -name '*.java'`
+
+mkdir classes-ex
+mv classes/UnresolvedClass.class classes-ex
+
+if [ ${USE_JACK} = "true" ]; then
+  # Create .jack files from classes generated with javac.
+  ${JILL} classes --output classes.jack
+  ${JILL} classes-ex --output classes-ex.jack
+
+  # Create DEX files from .jack files.
+  ${JACK} --import classes.jack --output-dex .
+  zip $TEST_NAME.jar classes.dex
+  ${JACK} --import classes-ex.jack --output-dex .
+  zip ${TEST_NAME}-ex.jar classes.dex
+else
+  if [ ${NEED_DEX} = "true" ]; then
+    ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
+    zip $TEST_NAME.jar classes.dex
+    ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
+    zip ${TEST_NAME}-ex.jar classes.dex
+  fi
+fi
diff --git a/test/555-checker-regression-x86const/expected.txt b/test/555-checker-regression-x86const/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/555-checker-regression-x86const/expected.txt
diff --git a/test/555-checker-regression-x86const/info.txt b/test/555-checker-regression-x86const/info.txt
new file mode 100644
index 0000000..c4037fa
--- /dev/null
+++ b/test/555-checker-regression-x86const/info.txt
@@ -0,0 +1,2 @@
+Check that X86 FP constant-area handling handles intrinsics with CurrentMethod
+on the call.
diff --git a/test/555-checker-regression-x86const/run b/test/555-checker-regression-x86const/run
new file mode 100644
index 0000000..63fdb8c
--- /dev/null
+++ b/test/555-checker-regression-x86const/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use secondary switch to add secondary dex file to class path.
+exec ${RUN} "${@}" --secondary
diff --git a/test/555-checker-regression-x86const/src/Main.java b/test/555-checker-regression-x86const/src/Main.java
new file mode 100644
index 0000000..914cfde
--- /dev/null
+++ b/test/555-checker-regression-x86const/src/Main.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main extends UnresolvedClass {
+
+  /// CHECK-START: float Main.callAbs(float) register (before)
+  /// CHECK:       <<CurrentMethod:[ij]\d+>> CurrentMethod
+  /// CHECK:       <<ParamValue:f\d+>> ParameterValue
+  /// CHECK:       InvokeStaticOrDirect [<<ParamValue>>,<<CurrentMethod>>] method_name:java.lang.Math.abs
+  static public float callAbs(float f) {
+    // An intrinsic invoke in a method that has unresolved references will still
+    // have a CurrentMethod as an argument.  The X86 pc_relative_fixups_x86 pass
+    // must be able to handle Math.abs invokes that have a CurrentMethod, as both
+    // the CurrentMethod and the HX86LoadFromConstantTable (for the bitmask)
+    // expect to be in the 'SpecialInputIndex' input index.
+    return Math.abs(f);
+  }
+
+  static public void main(String[] args) {
+    expectEquals(callAbs(-6.5f), 6.5f);
+  }
+
+  public static void expectEquals(float expected, float result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/555-checker-regression-x86const/src/Unresolved.java b/test/555-checker-regression-x86const/src/Unresolved.java
new file mode 100644
index 0000000..e98bdbf
--- /dev/null
+++ b/test/555-checker-regression-x86const/src/Unresolved.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class UnresolvedClass {
+}
diff --git a/test/562-bce-preheader/src/Main.java b/test/562-bce-preheader/src/Main.java
index 8de0533..8b527b4 100644
--- a/test/562-bce-preheader/src/Main.java
+++ b/test/562-bce-preheader/src/Main.java
@@ -70,6 +70,26 @@
     return acc;
   }
 
+  /**
+   * An artificial example with an inconsistent phi structure during
+   * dynamic bce that is corrected afterwards. Note that only the last
+   * assignment is really live, but the other statements set up an
+   * interesting phi structure.
+   */
+  private static int doit(int[] z) {
+    int a = 0;
+    for (int i = 0; i < 10; ++i) {
+      for (int j = i; j < 10; ++j) {
+        a = z[i];
+        for (int k = 0; k < 10; ++k) {
+          a += z[k];
+          a = z[i];
+        }
+      }
+    }
+    return a;
+  }
+
   public static void main(String args[]) {
     int[][] x = new int[2][2];
     int y;
@@ -96,6 +116,9 @@
     expectEquals(26, foo(a, b,  2));
     expectEquals(38, foo(a, b,  3));
 
+    int[] z = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    expectEquals(10, doit(z));
+
     System.out.println("passed");
   }
 
diff --git a/test/565-checker-condition-liveness/src/Main.java b/test/565-checker-condition-liveness/src/Main.java
index a811e5b..dc4cb76 100644
--- a/test/565-checker-condition-liveness/src/Main.java
+++ b/test/565-checker-condition-liveness/src/Main.java
@@ -16,6 +16,24 @@
 
 public class Main {
 
+  /// CHECK-START-X86: int Main.p(float) liveness (after)
+  /// CHECK:         <<Arg:f\d+>>  ParameterValue uses:[<<UseInput:\d+>>]
+  /// CHECK-DAG:     <<Five:f\d+>> FloatConstant 5 uses:[<<UseInput>>]
+  /// CHECK-DAG:     <<Zero:i\d+>> IntConstant 0
+  /// CHECK-DAG:     <<MinusOne:i\d+>> IntConstant -1 uses:[<<UseInput>>]
+  /// CHECK:         <<Base:i\d+>> X86ComputeBaseMethodAddress uses:[<<UseInput>>]
+  /// CHECK-NEXT:    <<Load:f\d+>> X86LoadFromConstantTable [<<Base>>,<<Five>>]
+  /// CHECK-NEXT:    <<Cond:z\d+>> LessThanOrEqual [<<Arg>>,<<Load>>]
+  /// CHECK-NEXT:                  Select [<<Zero>>,<<MinusOne>>,<<Cond>>] liveness:<<LivSel:\d+>>
+  /// CHECK-EVAL:    <<UseInput>> == <<LivSel>> + 1
+
+  public static int p(float arg) {
+    if (arg > 5.0f) {
+      return 0;
+    }
+    return -1;
+  }
+
   /// CHECK-START: void Main.main(java.lang.String[]) liveness (after)
   /// CHECK:         <<X:i\d+>>    ArrayLength uses:[<<UseInput:\d+>>]
   /// CHECK:         <<Y:i\d+>>    StaticFieldGet uses:[<<UseInput>>]
diff --git a/test/565-checker-doublenegbitwise/src/Main.java b/test/565-checker-doublenegbitwise/src/Main.java
index d681ad7..41af97b 100644
--- a/test/565-checker-doublenegbitwise/src/Main.java
+++ b/test/565-checker-doublenegbitwise/src/Main.java
@@ -37,7 +37,7 @@
 
   // Note: before the instruction_simplifier pass, Xor's are used instead of
   // Not's (the simplification happens during the same pass).
-  /// CHECK-START-ARM64: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (before)
+  /// CHECK-START: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (before)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
   /// CHECK:       <<CstM1:i\d+>>       IntConstant -1
@@ -46,16 +46,18 @@
   /// CHECK:       <<And:i\d+>>         And [<<Not1>>,<<Not2>>]
   /// CHECK:                            Return [<<And>>]
 
-  /// CHECK-START-ARM64: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (after)
+  /// CHECK-START: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (after)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
   /// CHECK:       <<Or:i\d+>>          Or [<<P1>>,<<P2>>]
   /// CHECK:       <<Not:i\d+>>         Not [<<Or>>]
   /// CHECK:                            Return [<<Not>>]
 
-  /// CHECK-START-ARM64: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (after)
+  /// CHECK-START: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (after)
   /// CHECK:                            Not
   /// CHECK-NOT:                        Not
+
+  /// CHECK-START: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (after)
   /// CHECK-NOT:                        And
 
   public static int $opt$noinline$andToOr(int a, int b) {
@@ -64,12 +66,49 @@
   }
 
   /**
+   * Test transformation of Not/Not/And into Or/Not for boolean negations.
+   * Note that the graph before this instruction simplification pass does not
+   * contain `HBooleanNot` instructions. This is because this transformation
+   * follows the optimization of `HSelect` to `HBooleanNot` occurring in the
+   * same pass.
+   */
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier_after_bce (before)
+  /// CHECK:       <<P1:z\d+>>          ParameterValue
+  /// CHECK:       <<P2:z\d+>>          ParameterValue
+  /// CHECK-DAG:   <<Const0:i\d+>>      IntConstant 0
+  /// CHECK-DAG:   <<Const1:i\d+>>      IntConstant 1
+  /// CHECK:       <<Select1:i\d+>>     Select [<<Const1>>,<<Const0>>,<<P1>>]
+  /// CHECK:       <<Select2:i\d+>>     Select [<<Const1>>,<<Const0>>,<<P2>>]
+  /// CHECK:       <<And:i\d+>>         And [<<Select2>>,<<Select1>>]
+  /// CHECK:                            Return [<<And>>]
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier_after_bce (after)
+  /// CHECK:       <<Cond1:z\d+>>       ParameterValue
+  /// CHECK:       <<Cond2:z\d+>>       ParameterValue
+  /// CHECK:       <<Or:i\d+>>          Or [<<Cond2>>,<<Cond1>>]
+  /// CHECK:       <<BooleanNot:z\d+>>  BooleanNot [<<Or>>]
+  /// CHECK:                            Return [<<BooleanNot>>]
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier_after_bce (after)
+  /// CHECK:                            BooleanNot
+  /// CHECK-NOT:                        BooleanNot
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier_after_bce (after)
+  /// CHECK-NOT:                        And
+
+  public static boolean $opt$noinline$booleanAndToOr(boolean a, boolean b) {
+    if (doThrow) throw new Error();
+    return !a & !b;
+  }
+
+  /**
    * Test transformation of Not/Not/Or into And/Not.
    */
 
   // See note above.
   // The second Xor has its arguments reversed for no obvious reason.
-  /// CHECK-START-ARM64: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (before)
+  /// CHECK-START: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (before)
   /// CHECK:       <<P1:j\d+>>          ParameterValue
   /// CHECK:       <<P2:j\d+>>          ParameterValue
   /// CHECK:       <<CstM1:j\d+>>       LongConstant -1
@@ -78,16 +117,18 @@
   /// CHECK:       <<Or:j\d+>>          Or [<<Not1>>,<<Not2>>]
   /// CHECK:                            Return [<<Or>>]
 
-  /// CHECK-START-ARM64: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (after)
+  /// CHECK-START: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (after)
   /// CHECK:       <<P1:j\d+>>          ParameterValue
   /// CHECK:       <<P2:j\d+>>          ParameterValue
   /// CHECK:       <<And:j\d+>>         And [<<P1>>,<<P2>>]
   /// CHECK:       <<Not:j\d+>>         Not [<<And>>]
   /// CHECK:                            Return [<<Not>>]
 
-  /// CHECK-START-ARM64: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (after)
+  /// CHECK-START: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (after)
   /// CHECK:                            Not
   /// CHECK-NOT:                        Not
+
+  /// CHECK-START: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (after)
   /// CHECK-NOT:                        Or
 
   public static long $opt$noinline$orToAnd(long a, long b) {
@@ -96,13 +137,50 @@
   }
 
   /**
+   * Test transformation of Not/Not/Or into Or/And for boolean negations.
+   * Note that the graph before this instruction simplification pass does not
+   * contain `HBooleanNot` instructions. This is because this transformation
+   * follows the optimization of `HSelect` to `HBooleanNot` occurring in the
+   * same pass.
+   */
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier_after_bce (before)
+  /// CHECK:       <<P1:z\d+>>          ParameterValue
+  /// CHECK:       <<P2:z\d+>>          ParameterValue
+  /// CHECK-DAG:   <<Const0:i\d+>>      IntConstant 0
+  /// CHECK-DAG:   <<Const1:i\d+>>      IntConstant 1
+  /// CHECK:       <<Select1:i\d+>>     Select [<<Const1>>,<<Const0>>,<<P1>>]
+  /// CHECK:       <<Select2:i\d+>>     Select [<<Const1>>,<<Const0>>,<<P2>>]
+  /// CHECK:       <<Or:i\d+>>          Or [<<Select2>>,<<Select1>>]
+  /// CHECK:                            Return [<<Or>>]
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier_after_bce (after)
+  /// CHECK:       <<Cond1:z\d+>>       ParameterValue
+  /// CHECK:       <<Cond2:z\d+>>       ParameterValue
+  /// CHECK:       <<And:i\d+>>         And [<<Cond2>>,<<Cond1>>]
+  /// CHECK:       <<BooleanNot:z\d+>>  BooleanNot [<<And>>]
+  /// CHECK:                            Return [<<BooleanNot>>]
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier_after_bce (after)
+  /// CHECK:                            BooleanNot
+  /// CHECK-NOT:                        BooleanNot
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier_after_bce (after)
+  /// CHECK-NOT:                        Or
+
+  public static boolean $opt$noinline$booleanOrToAnd(boolean a, boolean b) {
+    if (doThrow) throw new Error();
+    return !a | !b;
+  }
+
+  /**
    * Test that the transformation copes with inputs being separated from the
    * bitwise operations.
    * This is a regression test. The initial logic was inserting the new bitwise
    * operation incorrectly.
    */
 
-  /// CHECK-START-ARM64: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (before)
+  /// CHECK-START: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (before)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
   /// CHECK-DAG:   <<Cst1:i\d+>>        IntConstant 1
@@ -114,7 +192,7 @@
   /// CHECK:       <<Or:i\d+>>          Or [<<Not1>>,<<Not2>>]
   /// CHECK:                            Return [<<Or>>]
 
-  /// CHECK-START-ARM64: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (after)
+  /// CHECK-START: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (after)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
   /// CHECK:       <<Cst1:i\d+>>        IntConstant 1
@@ -124,9 +202,11 @@
   /// CHECK:       <<Not:i\d+>>         Not [<<And>>]
   /// CHECK:                            Return [<<Not>>]
 
-  /// CHECK-START-ARM64: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (after)
+  /// CHECK-START: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (after)
   /// CHECK:                            Not
   /// CHECK-NOT:                        Not
+
+  /// CHECK-START: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (after)
   /// CHECK-NOT:                        Or
 
   public static int $opt$noinline$regressInputsAway(int a, int b) {
@@ -143,7 +223,7 @@
    */
 
   // See first note above.
-  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (before)
+  /// CHECK-START: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (before)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
   /// CHECK:       <<CstM1:i\d+>>       IntConstant -1
@@ -152,13 +232,13 @@
   /// CHECK:       <<Xor:i\d+>>         Xor [<<Not1>>,<<Not2>>]
   /// CHECK:                            Return [<<Xor>>]
 
-  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (after)
+  /// CHECK-START: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (after)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
   /// CHECK:       <<Xor:i\d+>>         Xor [<<P1>>,<<P2>>]
   /// CHECK:                            Return [<<Xor>>]
 
-  /// CHECK-START-ARM64: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (after)
+  /// CHECK-START: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (after)
   /// CHECK-NOT:                        Not
 
   public static int $opt$noinline$notXorToXor(int a, int b) {
@@ -167,10 +247,42 @@
   }
 
   /**
+   * Test transformation of Not/Not/Xor into Xor for boolean negations.
+   * Note that the graph before this instruction simplification pass does not
+   * contain `HBooleanNot` instructions. This is because this transformation
+   * follows the optimization of `HSelect` to `HBooleanNot` occurring in the
+   * same pass.
+   */
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier_after_bce (before)
+  /// CHECK:       <<P1:z\d+>>          ParameterValue
+  /// CHECK:       <<P2:z\d+>>          ParameterValue
+  /// CHECK-DAG:   <<Const0:i\d+>>      IntConstant 0
+  /// CHECK-DAG:   <<Const1:i\d+>>      IntConstant 1
+  /// CHECK:       <<Select1:i\d+>>     Select [<<Const1>>,<<Const0>>,<<P1>>]
+  /// CHECK:       <<Select2:i\d+>>     Select [<<Const1>>,<<Const0>>,<<P2>>]
+  /// CHECK:       <<Xor:i\d+>>         Xor [<<Select2>>,<<Select1>>]
+  /// CHECK:                            Return [<<Xor>>]
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier_after_bce (after)
+  /// CHECK:       <<Cond1:z\d+>>       ParameterValue
+  /// CHECK:       <<Cond2:z\d+>>       ParameterValue
+  /// CHECK:       <<Xor:i\d+>>         Xor [<<Cond2>>,<<Cond1>>]
+  /// CHECK:                            Return [<<Xor>>]
+
+  /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier_after_bce (after)
+  /// CHECK-NOT:                        BooleanNot
+
+  public static boolean $opt$noinline$booleanNotXorToXor(boolean a, boolean b) {
+    if (doThrow) throw new Error();
+    return !a ^ !b;
+  }
+
+  /**
    * Check that no transformation is done when one Not has multiple uses.
    */
 
-  /// CHECK-START-ARM64: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (before)
+  /// CHECK-START: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (before)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
   /// CHECK:       <<CstM1:i\d+>>       IntConstant -1
@@ -182,7 +294,7 @@
   /// CHECK:       <<Add:i\d+>>         Add [<<And2>>,<<And1>>]
   /// CHECK:                            Return [<<Add>>]
 
-  /// CHECK-START-ARM64: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (after)
+  /// CHECK-START: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (after)
   /// CHECK:       <<P1:i\d+>>          ParameterValue
   /// CHECK:       <<P2:i\d+>>          ParameterValue
   /// CHECK:       <<One:i\d+>>         IntConstant 1
@@ -193,7 +305,7 @@
   /// CHECK:       <<Add:i\d+>>         Add [<<And2>>,<<And1>>]
   /// CHECK:                            Return [<<Add>>]
 
-  /// CHECK-START-ARM64: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (after)
+  /// CHECK-START: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (after)
   /// CHECK-NOT:                        Or
 
   public static int $opt$noinline$notMultipleUses(int a, int b) {
diff --git a/test/566-checker-codegen-select/src/Main.java b/test/566-checker-codegen-select/src/Main.java
index edb31e6..3a1b3fc 100644
--- a/test/566-checker-codegen-select/src/Main.java
+++ b/test/566-checker-codegen-select/src/Main.java
@@ -45,6 +45,13 @@
   /// CHECK:             LessThanOrEqual
   /// CHECK-NEXT:        Select
 
+  // Check that we generate CMOV for long on x86_64.
+  /// CHECK-START-X86_64: long Main.$noinline$longSelect_Constant(long) disassembly (after)
+  /// CHECK:             LessThanOrEqual
+  /// CHECK-NEXT:        Select
+  /// CHECK:             cmpq
+  /// CHECK:             cmovle/ngq
+
   public long $noinline$longSelect_Constant(long param) {
     if (doThrow) { throw new Error(); }
     long val_true = longB;
@@ -52,12 +59,34 @@
     return (param > 3L) ? val_true : val_false;
   }
 
+  // Check that we generate CMOV for int on x86_64.
+  /// CHECK-START-X86_64: int Main.$noinline$intSelect_Constant(int) disassembly (after)
+  /// CHECK:             LessThan
+  /// CHECK-NEXT:        Select
+  /// CHECK:             cmp
+  /// CHECK:             cmovl/nge
+
+  public int $noinline$intSelect_Constant(int param) {
+    if (doThrow) { throw new Error(); }
+    int val_true = intB;
+    int val_false = intC;
+    return (param >= 3) ? val_true : val_false;
+  }
+
   public static void main(String[] args) {
     Main m = new Main();
     assertLongEquals(5L, m.$noinline$longSelect(4L));
     assertLongEquals(7L, m.$noinline$longSelect(2L));
     assertLongEquals(5L, m.$noinline$longSelect_Constant(4L));
     assertLongEquals(7L, m.$noinline$longSelect_Constant(2L));
+    assertIntEquals(5, m.$noinline$intSelect_Constant(4));
+    assertIntEquals(7, m.$noinline$intSelect_Constant(2));
+  }
+
+  public static void assertIntEquals(int expected, int actual) {
+    if (expected != actual) {
+      throw new Error(expected + " != " + actual);
+    }
   }
 
   public static void assertLongEquals(long expected, long actual) {
@@ -71,4 +100,6 @@
   public long longA = 3L;
   public long longB = 5L;
   public long longC = 7L;
+  public int intB = 5;
+  public int intC = 7;
 }
diff --git a/test/566-checker-signum/src/Main.java b/test/566-checker-signum/src/Main.java
index cc4a984..0ad0042 100644
--- a/test/566-checker-signum/src/Main.java
+++ b/test/566-checker-signum/src/Main.java
@@ -54,6 +54,13 @@
     expectEquals(1, sign64(12345L));
     expectEquals(1, sign64(Long.MAX_VALUE));
 
+    expectEquals(-1, sign64(0x800000007FFFFFFFL));
+    expectEquals(-1, sign64(0x80000000FFFFFFFFL));
+    expectEquals(1, sign64(0x000000007FFFFFFFL));
+    expectEquals(1, sign64(0x00000000FFFFFFFFL));
+    expectEquals(1, sign64(0x7FFFFFFF7FFFFFFFL));
+    expectEquals(1, sign64(0x7FFFFFFFFFFFFFFFL));
+
     for (long i = -11L; i <= 11L; i++) {
       int expected = 0;
       if (i < 0) expected = -1;
@@ -61,6 +68,14 @@
       expectEquals(expected, sign64(i));
     }
 
+    for (long i = Long.MIN_VALUE; i <= Long.MIN_VALUE + 11L; i++) {
+      expectEquals(-1, sign64(i));
+    }
+
+    for (long i = Long.MAX_VALUE; i >= Long.MAX_VALUE - 11L; i--) {
+      expectEquals(1, sign64(i));
+    }
+
     System.out.println("passed");
   }
 
diff --git a/test/567-checker-compare/src/Main.java b/test/567-checker-compare/src/Main.java
index 52abb75..951d2c7 100644
--- a/test/567-checker-compare/src/Main.java
+++ b/test/567-checker-compare/src/Main.java
@@ -88,6 +88,10 @@
     expectEquals(1, compare64(Long.MAX_VALUE, 1L));
     expectEquals(1, compare64(Long.MAX_VALUE, Long.MAX_VALUE - 1L));
 
+    expectEquals(-1, compare64(0x111111117FFFFFFFL, 0x11111111FFFFFFFFL));
+    expectEquals(0, compare64(0x111111117FFFFFFFL, 0x111111117FFFFFFFL));
+    expectEquals(1, compare64(0x11111111FFFFFFFFL, 0x111111117FFFFFFFL));
+
     for (long i = -11L; i <= 11L; i++) {
       for (long j = -11L; j <= 11L; j++) {
         int expected = 0;
@@ -97,6 +101,14 @@
       }
     }
 
+    for (long i = Long.MIN_VALUE; i <= Long.MIN_VALUE + 11L; i++) {
+      expectEquals(-1, compare64(i, 0));
+    }
+
+    for (long i = Long.MAX_VALUE; i >= Long.MAX_VALUE - 11L; i--) {
+      expectEquals(1, compare64(i, 0));
+    }
+
     System.out.println("passed");
   }
 
diff --git a/test/569-checker-pattern-replacement/src-multidex/Base.java b/test/569-checker-pattern-replacement/src-multidex/Base.java
new file mode 100644
index 0000000..f4d59af
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src-multidex/Base.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Base {
+  Base() {
+    intField = 0;               // Unnecessary IPUT.
+    doubleField = 0.0;          // Unnecessary IPUT.
+    objectField = null;         // Unnecessary IPUT.
+  }
+
+  Base(int intValue) {
+    intField = intValue;
+  }
+
+  Base(String stringValue) {
+    objectField = stringValue;  // Unnecessary IPUT.
+    stringField = stringValue;
+    objectField = null;         // Unnecessary IPUT.
+  }
+
+  Base(double doubleValue, Object objectValue) {
+    doubleField = doubleValue;
+    objectField = objectValue;
+  }
+
+  Base(int intValue, double doubleValue, Object objectValue) {
+    intField = intValue;
+    doubleField = doubleValue;
+    objectField = objectValue;
+  }
+
+  Base(int intValue, double doubleValue, Object objectValue, String stringValue) {
+    // Outside our limit of 3 IPUTs.
+    intField = intValue;
+    doubleField = doubleValue;
+    objectField = objectValue;
+    stringField = stringValue;
+  }
+
+  Base(double doubleValue) {
+    this(doubleValue, null);
+  }
+
+  Base(Object objectValue) {
+    // Unsupported forwarding of a value after a zero.
+    this(0.0, objectValue);
+  }
+
+  Base(int intValue, long dummy) {
+    this(intValue, 0.0, null);
+  }
+
+  public int intField;
+  public double doubleField;
+  public Object objectField;
+  public String stringField;
+}
diff --git a/test/569-checker-pattern-replacement/src-multidex/BaseWithFinalField.java b/test/569-checker-pattern-replacement/src-multidex/BaseWithFinalField.java
new file mode 100644
index 0000000..7a1d591
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src-multidex/BaseWithFinalField.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class BaseWithFinalField {
+  BaseWithFinalField() {
+    intField = 0;
+  }
+
+  BaseWithFinalField(int intValue) {
+    intField = intValue;
+  }
+
+  public final int intField;
+}
diff --git a/test/569-checker-pattern-replacement/src-multidex/Derived.java b/test/569-checker-pattern-replacement/src-multidex/Derived.java
new file mode 100644
index 0000000..184563f
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src-multidex/Derived.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public final class Derived extends Base {
+  public Derived() {
+    this(0);
+  }
+
+  public Derived(int intValue) {
+    super(intValue);
+  }
+
+  public Derived(String stringValue) {
+    super(stringValue);
+    stringField = null;   // Clear field set by Base.<init>(String).
+  }
+
+  public Derived(double doubleValue) {
+    super(doubleValue, null);
+  }
+
+  public Derived(int intValue, double doubleValue, Object objectValue) {
+    super(intValue, doubleValue, objectValue);
+    objectField = null;   // Clear field set by Base.<init>(int, double, Object).
+    intField = 0;         // Clear field set by Base.<init>(int, double, Object).
+  }
+
+  Derived(int intValue, double doubleValue, Object objectValue, String stringValue) {
+    super(intValue, doubleValue, objectValue, stringValue);
+    // Clearing fields here doesn't help because the superclass constructor must
+    // satisfy the pattern constraints on its own and it doesn't (it has 4 IPUTs).
+    intField = 0;
+    doubleField = 0.0;
+    objectField = null;
+    stringField = null;
+  }
+
+  public Derived(float floatValue) {
+    super();
+    floatField = floatValue;
+  }
+
+  public Derived(int intValue, double doubleValue, Object objectValue, float floatValue) {
+    super(intValue, doubleValue, objectValue);
+    objectField = null;   // Clear field set by Base.<init>(int, double, Object).
+    floatField = floatValue;
+  }
+
+  public float floatField;
+}
diff --git a/test/569-checker-pattern-replacement/src-multidex/DerivedInSecondDex.java b/test/569-checker-pattern-replacement/src-multidex/DerivedInSecondDex.java
new file mode 100644
index 0000000..50266e8
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src-multidex/DerivedInSecondDex.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public final class DerivedInSecondDex extends BaseInMainDex {
+  DerivedInSecondDex() {
+    super();
+  }
+
+  DerivedInSecondDex(int intValue) {
+    // Not matched: Superclass in a different dex file has an IPUT.
+    super(intValue);
+  }
+
+  DerivedInSecondDex(long dummy) {
+    // Matched: Superclass in a different dex file has an IPUT that's pruned because we store 0.
+    super(0);
+  }
+}
diff --git a/test/569-checker-pattern-replacement/src-multidex/DerivedWithFinalField.java b/test/569-checker-pattern-replacement/src-multidex/DerivedWithFinalField.java
new file mode 100644
index 0000000..5b39b8a
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src-multidex/DerivedWithFinalField.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public final class DerivedWithFinalField extends BaseWithFinalField {
+  DerivedWithFinalField() {
+    this(0);
+  }
+
+  DerivedWithFinalField(int intValue) {
+    super(intValue);
+    doubleField = 0.0;
+  }
+
+  DerivedWithFinalField(double doubleValue) {
+    super(0);
+    doubleField = doubleValue;
+  }
+
+  DerivedWithFinalField(int intValue, double doubleValue) {
+    super(intValue);
+    doubleField = doubleValue;
+  }
+
+  public final double doubleField;
+}
diff --git a/test/569-checker-pattern-replacement/src/BaseInMainDex.java b/test/569-checker-pattern-replacement/src/BaseInMainDex.java
new file mode 100644
index 0000000..b401540
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src/BaseInMainDex.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class BaseInMainDex {
+  BaseInMainDex() {
+  }
+
+  BaseInMainDex(int intValue) {
+    intField = intValue;
+  }
+
+  public int intField;
+}
diff --git a/test/569-checker-pattern-replacement/src/Main.java b/test/569-checker-pattern-replacement/src/Main.java
index e2d451c..345e9fd 100644
--- a/test/569-checker-pattern-replacement/src/Main.java
+++ b/test/569-checker-pattern-replacement/src/Main.java
@@ -15,368 +15,1210 @@
  */
 
 public class Main {
-    /// CHECK-START: void Main.staticNop() inliner (before)
-    /// CHECK:                          InvokeStaticOrDirect
+  /// CHECK-START: void Main.staticNop() inliner (before)
+  /// CHECK:                          InvokeStaticOrDirect
 
-    /// CHECK-START: void Main.staticNop() inliner (after)
-    /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-START: void Main.staticNop() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
 
-    public static void staticNop() {
-      Second.staticNop(11);
+  public static void staticNop() {
+    Second.staticNop(11);
+  }
+
+  /// CHECK-START: void Main.nop(Second) inliner (before)
+  /// CHECK:                          InvokeVirtual
+
+  /// CHECK-START: void Main.nop(Second) inliner (after)
+  /// CHECK-NOT:                      InvokeVirtual
+
+  public static void nop(Second s) {
+    s.nop();
+  }
+
+  /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (before)
+  /// CHECK-DAG:  <<Value:l\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Ignored:i\d+>>    IntConstant 77
+  /// CHECK-DAG:  <<ClinitCk:l\d+>>   ClinitCheck
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:  <<Invoke:l\d+>>     InvokeStaticOrDirect [<<Ignored>>,<<Value>>{{(,[ij]\d+)?}},<<ClinitCk>>]
+  /// CHECK-DAG:                      Return [<<Invoke>>]
+
+  /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (after)
+  /// CHECK-DAG:  <<Value:l\d+>>      ParameterValue
+  /// CHECK-DAG:                      Return [<<Value>>]
+
+  /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+
+  public static Object staticReturnArg2(String value) {
+    return Second.staticReturnArg2(77, value);
+  }
+
+  /// CHECK-START: long Main.returnArg1(Second, long) inliner (before)
+  /// CHECK-DAG:  <<Second:l\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Value:j\d+>>      ParameterValue
+  /// CHECK-DAG:  <<NullCk:l\d+>>     NullCheck [<<Second>>]
+  /// CHECK-DAG:  <<Invoke:j\d+>>     InvokeVirtual [<<NullCk>>,<<Value>>]
+  /// CHECK-DAG:                      Return [<<Invoke>>]
+
+  /// CHECK-START: long Main.returnArg1(Second, long) inliner (after)
+  /// CHECK-DAG:  <<Value:j\d+>>      ParameterValue
+  /// CHECK-DAG:                      Return [<<Value>>]
+
+  /// CHECK-START: long Main.returnArg1(Second, long) inliner (after)
+  /// CHECK-NOT:                      InvokeVirtual
+
+  public static long returnArg1(Second s, long value) {
+    return s.returnArg1(value);
+  }
+
+  /// CHECK-START: int Main.staticReturn9() inliner (before)
+  /// CHECK:      {{i\d+}}            InvokeStaticOrDirect
+
+  /// CHECK-START: int Main.staticReturn9() inliner (before)
+  /// CHECK-NOT:                      IntConstant 9
+
+  /// CHECK-START: int Main.staticReturn9() inliner (after)
+  /// CHECK-DAG:  <<Const9:i\d+>>     IntConstant 9
+  /// CHECK-DAG:                      Return [<<Const9>>]
+
+  /// CHECK-START: int Main.staticReturn9() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+
+  public static int staticReturn9() {
+    return Second.staticReturn9();
+  }
+
+  /// CHECK-START: int Main.return7(Second) inliner (before)
+  /// CHECK:      {{i\d+}}            InvokeVirtual
+
+  /// CHECK-START: int Main.return7(Second) inliner (before)
+  /// CHECK-NOT:                      IntConstant 7
+
+  /// CHECK-START: int Main.return7(Second) inliner (after)
+  /// CHECK-DAG:  <<Const7:i\d+>>     IntConstant 7
+  /// CHECK-DAG:                      Return [<<Const7>>]
+
+  /// CHECK-START: int Main.return7(Second) inliner (after)
+  /// CHECK-NOT:                      InvokeVirtual
+
+  public static int return7(Second s) {
+    return s.return7(null);
+  }
+
+  /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (before)
+  /// CHECK:      {{l\d+}}            InvokeStaticOrDirect
+
+  /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (before)
+  /// CHECK-NOT:                      NullConstant
+
+  /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (after)
+  /// CHECK-DAG:  <<Null:l\d+>>       NullConstant
+  /// CHECK-DAG:                      Return [<<Null>>]
+
+  /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+
+  public static String staticReturnNull() {
+    return Second.staticReturnNull();
+  }
+
+  /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (before)
+  /// CHECK:      {{l\d+}}            InvokeVirtual
+
+  /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (before)
+  /// CHECK-NOT:                      NullConstant
+
+  /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (after)
+  /// CHECK-DAG:  <<Null:l\d+>>       NullConstant
+  /// CHECK-DAG:                      Return [<<Null>>]
+
+  /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (after)
+  /// CHECK-NOT:                      InvokeVirtual
+
+  public static Object returnNull(Second s) {
+    return s.returnNull();
+  }
+
+  /// CHECK-START: int Main.getInt(Second) inliner (before)
+  /// CHECK:      {{i\d+}}            InvokeVirtual
+
+  /// CHECK-START: int Main.getInt(Second) inliner (after)
+  /// CHECK:      {{i\d+}}            InstanceFieldGet
+
+  /// CHECK-START: int Main.getInt(Second) inliner (after)
+  /// CHECK-NOT:                      InvokeVirtual
+
+  public static int getInt(Second s) {
+    return s.getInstanceIntField();
+  }
+
+  /// CHECK-START: double Main.getDouble(Second) inliner (before)
+  /// CHECK:      {{d\d+}}            InvokeVirtual
+
+  /// CHECK-START: double Main.getDouble(Second) inliner (after)
+  /// CHECK:      {{d\d+}}            InstanceFieldGet
+
+  /// CHECK-START: double Main.getDouble(Second) inliner (after)
+  /// CHECK-NOT:                      InvokeVirtual
+
+  public static double getDouble(Second s) {
+    return s.getInstanceDoubleField(22);
+  }
+
+  /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (before)
+  /// CHECK:      {{l\d+}}            InvokeVirtual
+
+  /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (after)
+  /// CHECK:      {{l\d+}}            InstanceFieldGet
+
+  /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (after)
+  /// CHECK-NOT:                      InvokeVirtual
+
+  public static Object getObject(Second s) {
+    return s.getInstanceObjectField(-1L);
+  }
+
+  /// CHECK-START: java.lang.String Main.getString(Second) inliner (before)
+  /// CHECK:      {{l\d+}}            InvokeVirtual
+
+  /// CHECK-START: java.lang.String Main.getString(Second) inliner (after)
+  /// CHECK:      {{l\d+}}            InstanceFieldGet
+
+  /// CHECK-START: java.lang.String Main.getString(Second) inliner (after)
+  /// CHECK-NOT:                      InvokeVirtual
+
+  public static String getString(Second s) {
+    return s.getInstanceStringField(null, "whatever", 1234L);
+  }
+
+  /// CHECK-START: int Main.staticGetInt(Second) inliner (before)
+  /// CHECK:      {{i\d+}}            InvokeStaticOrDirect
+
+  /// CHECK-START: int Main.staticGetInt(Second) inliner (after)
+  /// CHECK:      {{i\d+}}            InvokeStaticOrDirect
+
+  /// CHECK-START: int Main.staticGetInt(Second) inliner (after)
+  /// CHECK-NOT:                      InstanceFieldGet
+
+  public static int staticGetInt(Second s) {
+    return Second.staticGetInstanceIntField(s);
+  }
+
+  /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (before)
+  /// CHECK:      {{d\d+}}            InvokeVirtual
+
+  /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (after)
+  /// CHECK:      {{d\d+}}            InvokeVirtual
+
+  /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (after)
+  /// CHECK-NOT:                      InstanceFieldGet
+
+  public static double getDoubleFromParam(Second s) {
+    return s.getInstanceDoubleFieldFromParam(s);
+  }
+
+  /// CHECK-START: int Main.getStaticInt(Second) inliner (before)
+  /// CHECK:      {{i\d+}}            InvokeVirtual
+
+  /// CHECK-START: int Main.getStaticInt(Second) inliner (after)
+  /// CHECK:      {{i\d+}}            InvokeVirtual
+
+  /// CHECK-START: int Main.getStaticInt(Second) inliner (after)
+  /// CHECK-NOT:                      InstanceFieldGet
+  /// CHECK-NOT:                      StaticFieldGet
+
+  public static int getStaticInt(Second s) {
+    return s.getStaticIntField();
+  }
+
+  /// CHECK-START: long Main.setLong(Second, long) inliner (before)
+  /// CHECK:                          InvokeVirtual
+
+  /// CHECK-START: long Main.setLong(Second, long) inliner (after)
+  /// CHECK:                          InstanceFieldSet
+
+  /// CHECK-START: long Main.setLong(Second, long) inliner (after)
+  /// CHECK-NOT:                      InvokeVirtual
+
+  public static long setLong(Second s, long value) {
+    s.setInstanceLongField(-1, value);
+    return s.instanceLongField;
+  }
+
+  /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (before)
+  /// CHECK:                          InvokeVirtual
+
+  /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (after)
+  /// CHECK-DAG:  <<Second:l\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Value:j\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Arg2:i\d+>>       ParameterValue
+  /// CHECK-DAG:  <<NullCk:l\d+>>     NullCheck [<<Second>>]
+  /// CHECK-DAG:                      InstanceFieldSet [<<NullCk>>,<<Value>>]
+  /// CHECK-DAG:  <<NullCk2:l\d+>>    NullCheck [<<Second>>]
+  /// CHECK-DAG:  <<IGet:j\d+>>       InstanceFieldGet [<<NullCk2>>]
+  /// CHECK-DAG:  <<Conv:j\d+>>       TypeConversion [<<Arg2>>]
+  /// CHECK-DAG:  <<Add:j\d+>>        Add [<<IGet>>,<<Conv>>]
+  /// CHECK-DAG:                      Return [<<Add>>]
+
+  /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (after)
+  /// CHECK-NOT:                      InvokeVirtual
+
+  public static long setLongReturnArg2(Second s, long value, int arg2) {
+    int result = s.setInstanceLongFieldReturnArg2(value, arg2);
+    return s.instanceLongField + result;
+  }
+
+  /// CHECK-START: long Main.staticSetLong(Second, long) inliner (before)
+  /// CHECK:                          InvokeStaticOrDirect
+
+  /// CHECK-START: long Main.staticSetLong(Second, long) inliner (after)
+  /// CHECK:                          InvokeStaticOrDirect
+
+  /// CHECK-START: long Main.staticSetLong(Second, long) inliner (after)
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static long staticSetLong(Second s, long value) {
+    Second.staticSetInstanceLongField(s, value);
+    return s.instanceLongField;
+  }
+
+  /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (before)
+  /// CHECK:                          InvokeVirtual
+
+  /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (after)
+  /// CHECK:                          InvokeVirtual
+
+  /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (after)
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static long setLongThroughParam(Second s, long value) {
+    s.setInstanceLongFieldThroughParam(s, value);
+    return s.instanceLongField;
+  }
+
+  /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (before)
+  /// CHECK:                          InvokeVirtual
+
+  /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (after)
+  /// CHECK:                          InvokeVirtual
+
+  /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (after)
+  /// CHECK-NOT:                      InstanceFieldSet
+  /// CHECK-NOT:                      StaticFieldSet
+
+  public static float setStaticFloat(Second s, float value) {
+    s.setStaticFloatField(value);
+    return s.staticFloatField;
+  }
+
+  /// CHECK-START: java.lang.Object Main.newObject() inliner (before)
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:java.lang.Object.<init>
+
+  /// CHECK-START: java.lang.Object Main.newObject() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+
+  public static Object newObject() {
+    return new Object();
+  }
+
+  /// CHECK-START: double Main.constructBase() inliner (before)
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBase() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBase() {
+    Base b = new Base();
+    return b.intField + b.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructBase(int) inliner (before)
+  /// CHECK-DAG:  <<Value:i\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBase(int) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructBase(int) inliner (after)
+  /// CHECK-DAG:  <<Value:i\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<Value>>]
+
+  /// CHECK-START: double Main.constructBase(int) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBase(int intValue) {
+    Base b = new Base(intValue);
+    return b.intField + b.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructBaseWith0() inliner (before)
+  /// CHECK-DAG:  <<Value:i\d+>>      IntConstant 0
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBaseWith0() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBaseWith0() {
+    Base b = new Base(0);
+    return b.intField + b.doubleField;
+  }
+
+  /// CHECK-START: java.lang.String Main.constructBase(java.lang.String) inliner (before)
+  /// CHECK-DAG:  <<Value:l\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: java.lang.String Main.constructBase(java.lang.String) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: java.lang.String Main.constructBase(java.lang.String) inliner (after)
+  /// CHECK-DAG:  <<Value:l\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<Value>>]
+
+  /// CHECK-START: java.lang.String Main.constructBase(java.lang.String) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static String constructBase(String stringValue) {
+    Base b = new Base(stringValue);
+    return b.stringField;
+  }
+
+  /// CHECK-START: java.lang.String Main.constructBaseWithNullString() inliner (before)
+  /// CHECK-DAG:  <<Null:l\d+>>       NullConstant
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Null>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: java.lang.String Main.constructBaseWithNullString() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: java.lang.String Main.constructBaseWithNullString() inliner (after)
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static String constructBaseWithNullString() {
+    String stringValue = null;
+    Base b = new Base(stringValue);
+    return b.stringField;
+  }
+
+  /// CHECK-START: double Main.constructBase(double, java.lang.Object) inliner (before)
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<OValue:l\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<DValue>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBase(double, java.lang.Object) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructBase(double, java.lang.Object) inliner (after)
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<OValue:l\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<DValue>>]
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<OValue>>]
+
+  /// CHECK-START: double Main.constructBase(double, java.lang.Object) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBase(double doubleValue, Object objectValue) {
+    Base b = new Base(doubleValue, objectValue);
+    return (b.objectField != null) ? b.doubleField : -b.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructBase(int, double, java.lang.Object) inliner (before)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<OValue:l\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBase(int, double, java.lang.Object) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructBase(int, double, java.lang.Object) inliner (after)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<OValue:l\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<IValue>>]
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<DValue>>]
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<OValue>>]
+
+  /// CHECK-START: double Main.constructBase(int, double, java.lang.Object) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBase(int intValue, double doubleValue, Object objectValue) {
+    Base b = new Base(intValue, doubleValue, objectValue);
+    double tmp = b.intField + b.doubleField;
+    return (b.objectField != null) ? tmp : -tmp;
+  }
+
+  /// CHECK-START: double Main.constructBaseWith0DoubleNull(double) inliner (before)
+  /// CHECK-DAG:  <<IValue:i\d+>>     IntConstant 0
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<OValue:l\d+>>     NullConstant
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBaseWith0DoubleNull(double) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructBaseWith0DoubleNull(double) inliner (after)
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<DValue>>]
+
+  /// CHECK-START: double Main.constructBaseWith0DoubleNull(double) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBaseWith0DoubleNull(double doubleValue) {
+    Base b = new Base(0, doubleValue, null);
+    double tmp = b.intField + b.doubleField;
+    return (b.objectField != null) ? tmp : -tmp;
+  }
+
+  /// CHECK-START: double Main.constructBase(int, double, java.lang.Object, java.lang.String) inliner (before)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,{{l\d+}},{{l\d+}}{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBase(int, double, java.lang.Object, java.lang.String) inliner (after)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,{{l\d+}},{{l\d+}}{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBase(int, double, java.lang.Object, java.lang.String) inliner (after)
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBase(
+      int intValue, double doubleValue, Object objectValue, String stringValue) {
+    Base b = new Base(intValue, doubleValue, objectValue, stringValue);
+    double tmp = b.intField + b.doubleField;
+    tmp = (b.objectField != null) ? tmp : -tmp;
+    return (b.stringField != null) ? 2.0 * tmp : 0.5 * tmp;
+  }
+
+  /// CHECK-START: double Main.constructBase(double) inliner (before)
+  /// CHECK-DAG:  <<Value:d\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBase(double) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructBase(double) inliner (after)
+  /// CHECK-DAG:  <<Value:d\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<Value>>]
+
+  /// CHECK-START: double Main.constructBase(double) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBase(double doubleValue) {
+    Base b = new Base(doubleValue);
+    return b.intField + b.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructBaseWith0d() inliner (before)
+  /// CHECK-DAG:  <<Value:d\d+>>      DoubleConstant
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBaseWith0d() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBaseWith0d() {
+    Base b = new Base(0.0);
+    return b.intField + b.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructBase(java.lang.Object) inliner (before)
+  /// CHECK-DAG:  <<OValue:l\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBase(java.lang.Object) inliner (after)
+  /// CHECK-DAG:  <<OValue:l\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBase(java.lang.Object) inliner (after)
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBase(Object objectValue) {
+    Base b = new Base(objectValue);
+    double tmp = b.intField + b.doubleField;
+    return (b.objectField != null) ? tmp + 1.0 : tmp - 1.0;
+  }
+
+  /// CHECK-START: double Main.constructBase(int, long) inliner (before)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<JValue:j\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<JValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+  /// CHECK-START: double Main.constructBase(int, long) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructBase(int, long) inliner (after)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<IValue>>]
+
+  /// CHECK-START: double Main.constructBase(int, long) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructBase(int intValue, long dummy) {
+    Base b = new Base(intValue, dummy);
+    return b.intField + b.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerived() inliner (before)
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: double Main.constructDerived() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerived() {
+    Derived d = new Derived();
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerived(int) inliner (before)
+  /// CHECK-DAG:  <<Value:i\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: double Main.constructDerived(int) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructDerived(int) inliner (after)
+  /// CHECK-DAG:  <<Value:i\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<Value>>]
+
+  /// CHECK-START: double Main.constructDerived(int) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerived(int intValue) {
+    Derived d = new Derived(intValue);
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerivedWith0() inliner (before)
+  /// CHECK-DAG:  <<Value:i\d+>>      IntConstant 0
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: double Main.constructDerivedWith0() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerivedWith0() {
+    Derived d = new Derived(0);
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: java.lang.String Main.constructDerived(java.lang.String) inliner (before)
+  /// CHECK-DAG:  <<Value:l\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: java.lang.String Main.constructDerived(java.lang.String) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: java.lang.String Main.constructDerived(java.lang.String) inliner (after)
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static String constructDerived(String stringValue) {
+    Derived d = new Derived(stringValue);
+    return d.stringField;
+  }
+
+  /// CHECK-START: double Main.constructDerived(double) inliner (before)
+  /// CHECK-DAG:  <<Value:d\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: double Main.constructDerived(double) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructDerived(double) inliner (after)
+  /// CHECK-DAG:  <<Value:d\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<Value>>]
+
+  /// CHECK-START: double Main.constructDerived(double) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerived(double doubleValue) {
+    Derived d = new Derived(doubleValue);
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerivedWith0d() inliner (before)
+  /// CHECK-DAG:  <<Value:d\d+>>      DoubleConstant
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: double Main.constructDerivedWith0d() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerivedWith0d() {
+    Derived d = new Derived(0.0);
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object) inliner (before)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<OValue:l\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object) inliner (after)
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<DValue>>]
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerived(int intValue, double doubleValue, Object objectValue) {
+    Derived d = new Derived(intValue, doubleValue, objectValue);
+    double tmp = d.intField + d.doubleField;
+    return (d.objectField != null) ? tmp : -tmp;
+  }
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, java.lang.String) inliner (before)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,{{l\d+}},{{l\d+}}{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, java.lang.String) inliner (after)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,{{l\d+}},{{l\d+}}{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, java.lang.String) inliner (after)
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerived(
+      int intValue, double doubleValue, Object objectValue, String stringValue) {
+    Derived d = new Derived(intValue, doubleValue, objectValue, stringValue);
+    double tmp = d.intField + d.doubleField;
+    tmp = (d.objectField != null) ? tmp : -tmp;
+    return (d.stringField != null) ? 2.0 * tmp : 0.5 * tmp;
+  }
+
+  /// CHECK-START: double Main.constructDerived(float) inliner (before)
+  /// CHECK-DAG:  <<Value:f\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: double Main.constructDerived(float) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructDerived(float) inliner (after)
+  /// CHECK-DAG:  <<Value:f\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<Value>>]
+
+  /// CHECK-START: double Main.constructDerived(float) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerived(float floatValue) {
+    Derived d = new Derived(floatValue);
+    return d.intField + d.doubleField + d.floatField;
+  }
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, float) inliner (before)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<OValue:l\d+>>     ParameterValue
+  /// CHECK-DAG:  <<FValue:f\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,<<OValue>>,<<FValue>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, float) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, float) inliner (after)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<FValue:f\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<IValue>>]
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<DValue>>]
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<FValue>>]
+
+  /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, float) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerived(
+      int intValue, double doubleValue, Object objectValue, float floatValue) {
+    Derived d = new Derived(intValue, doubleValue, objectValue, floatValue);
+    double tmp = d.intField + d.doubleField + d.floatField;
+    return (d.objectField != null) ? tmp : -tmp;
+  }
+
+  /// CHECK-START: int Main.constructBaseWithFinalField() inliner (before)
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:BaseWithFinalField.<init>
+
+  /// CHECK-START: int Main.constructBaseWithFinalField() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static int constructBaseWithFinalField() {
+    BaseWithFinalField b = new BaseWithFinalField();
+    return b.intField;
+  }
+
+  /// CHECK-START: int Main.constructBaseWithFinalField(int) inliner (before)
+  /// CHECK-DAG:  <<Value:i\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:BaseWithFinalField.<init>
+
+  /// CHECK-START: int Main.constructBaseWithFinalField(int) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+
+  /// CHECK-START: int Main.constructBaseWithFinalField(int) inliner (after)
+  /// CHECK-DAG:  <<Value:i\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<Value>>]
+  /// CHECK-DAG:                      MemoryBarrier
+
+  /// CHECK-START: int Main.constructBaseWithFinalField(int) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static int constructBaseWithFinalField(int intValue) {
+    BaseWithFinalField b = new BaseWithFinalField(intValue);
+    return b.intField;
+  }
+
+  /// CHECK-START: int Main.constructBaseWithFinalFieldWith0() inliner (before)
+  /// CHECK-DAG:  <<Value:i\d+>>      IntConstant 0
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:BaseWithFinalField.<init>
+
+  /// CHECK-START: int Main.constructBaseWithFinalFieldWith0() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static int constructBaseWithFinalFieldWith0() {
+    BaseWithFinalField b = new BaseWithFinalField(0);
+    return b.intField;
+  }
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField() inliner (before)
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerivedWithFinalField() {
+    DerivedWithFinalField d = new DerivedWithFinalField();
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(int) inliner (before)
+  /// CHECK-DAG:  <<Value:i\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(int) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(int) inliner (after)
+  /// CHECK-DAG:  <<Value:i\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<Value>>]
+  /// CHECK-DAG:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(int) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerivedWithFinalField(int intValue) {
+    DerivedWithFinalField d = new DerivedWithFinalField(intValue);
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0() inliner (before)
+  /// CHECK-DAG:  <<Value:i\d+>>      IntConstant 0
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+  /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerivedWithFinalFieldWith0() {
+    DerivedWithFinalField d = new DerivedWithFinalField(0);
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(double) inliner (before)
+  /// CHECK-DAG:  <<Value:d\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(double) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(double) inliner (after)
+  /// CHECK-DAG:  <<Value:d\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<Value>>]
+  /// CHECK-DAG:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(double) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerivedWithFinalField(double doubleValue) {
+    DerivedWithFinalField d = new DerivedWithFinalField(doubleValue);
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0d() inliner (before)
+  /// CHECK-DAG:  <<Value:d\d+>>      DoubleConstant
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+  /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0d() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerivedWithFinalFieldWith0d() {
+    DerivedWithFinalField d = new DerivedWithFinalField(0.0);
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(int, double) inliner (before)
+  /// CHECK-DAG:  <<IValue:i\d+>>     ParameterValue
+  /// CHECK-DAG:  <<DValue:d\d+>>     ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(int, double) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(int, double) inliner (after)
+  /// CHECK-DAG:  <<Value:d\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  /// CHECK-DAG:                      InstanceFieldSet [<<Obj>>,<<Value>>]
+  /// CHECK-DAG:                      MemoryBarrier
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(int, double) inliner (after)
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-DAG:                      InstanceFieldSet
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  /// CHECK-START: double Main.constructDerivedWithFinalField(int, double) inliner (after)
+  /// CHECK-DAG:                      MemoryBarrier
+  /// CHECK-NOT:                      MemoryBarrier
+
+  public static double constructDerivedWithFinalField(int intValue, double doubleValue) {
+    DerivedWithFinalField d = new DerivedWithFinalField(intValue, doubleValue);
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0And0d() inliner (before)
+  /// CHECK-DAG:  <<IValue:i\d+>>     IntConstant 0
+  /// CHECK-DAG:  <<DValue:d\d+>>     DoubleConstant
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+  /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0And0d() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static double constructDerivedWithFinalFieldWith0And0d() {
+    DerivedWithFinalField d = new DerivedWithFinalField(0, 0.0);
+    return d.intField + d.doubleField;
+  }
+
+  /// CHECK-START: int Main.constructDerivedInSecondDex() inliner (before)
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+  /// CHECK-START: int Main.constructDerivedInSecondDex() inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static int constructDerivedInSecondDex() {
+    DerivedInSecondDex d = new DerivedInSecondDex();
+    return d.intField;
+  }
+
+  /// CHECK-START: int Main.constructDerivedInSecondDex(int) inliner (before)
+  /// CHECK-DAG:  <<Value:i\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+  /// CHECK-START: int Main.constructDerivedInSecondDex(int) inliner (after)
+  /// CHECK-DAG:  <<Value:i\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+  /// CHECK-START: int Main.constructDerivedInSecondDex(int) inliner (after)
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static int constructDerivedInSecondDex(int intValue) {
+    DerivedInSecondDex d = new DerivedInSecondDex(intValue);
+    return d.intField;
+  }
+
+  /// CHECK-START: int Main.constructDerivedInSecondDexWith0() inliner (before)
+  /// CHECK-DAG:  <<Value:i\d+>>      IntConstant 0
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+  /// CHECK-START: int Main.constructDerivedInSecondDexWith0() inliner (after)
+  /// CHECK-DAG:  <<Value:i\d+>>      IntConstant 0
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+  /// CHECK-START: int Main.constructDerivedInSecondDexWith0() inliner (after)
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static int constructDerivedInSecondDexWith0() {
+    DerivedInSecondDex d = new DerivedInSecondDex(0);
+    return d.intField;
+  }
+
+  /// CHECK-START: int Main.constructDerivedInSecondDex(long) inliner (before)
+  /// CHECK-DAG:  <<Value:j\d+>>      ParameterValue
+  /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
+  // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+  /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+  /// CHECK-START: int Main.constructDerivedInSecondDex(long) inliner (after)
+  /// CHECK-NOT:                      InvokeStaticOrDirect
+  /// CHECK-NOT:                      MemoryBarrier
+  /// CHECK-NOT:                      InstanceFieldSet
+
+  public static int constructDerivedInSecondDex(long dummy) {
+    DerivedInSecondDex d = new DerivedInSecondDex(dummy);
+    return d.intField;
+  }
+
+  public static void main(String[] args) throws Exception {
+    Second s = new Second();
+
+    // Replaced NOP pattern.
+    staticNop();
+    nop(s);
+    // Replaced "return arg" pattern.
+    assertEquals("arbitrary string", staticReturnArg2("arbitrary string"));
+    assertEquals(4321L, returnArg1(s, 4321L));
+    // Replaced "return const" pattern.
+    assertEquals(9, staticReturn9());
+    assertEquals(7, return7(s));
+    assertEquals(null, staticReturnNull());
+    assertEquals(null, returnNull(s));
+    // Replaced IGET pattern.
+    assertEquals(42, getInt(s));
+    assertEquals(-42.0, getDouble(s));
+    assertEquals(null, getObject(s));
+    assertEquals("dummy", getString(s));
+    // Not replaced IGET pattern.
+    assertEquals(42, staticGetInt(s));
+    assertEquals(-42.0, getDoubleFromParam(s));
+    // SGET.
+    assertEquals(4242, getStaticInt(s));
+    // Replaced IPUT pattern.
+    assertEquals(111L, setLong(s, 111L));
+    assertEquals(345L, setLongReturnArg2(s, 222L, 123));
+    // Not replaced IPUT pattern.
+    assertEquals(222L, staticSetLong(s, 222L));
+    assertEquals(333L, setLongThroughParam(s, 333L));
+    // SPUT.
+    assertEquals(-11.5f, setStaticFloat(s, -11.5f));
+
+    if (newObject() == null) {
+      throw new AssertionError("new Object() cannot be null.");
     }
 
-    /// CHECK-START: void Main.nop(Second) inliner (before)
-    /// CHECK:                          InvokeVirtual
+    assertEquals(0.0, constructBase());
+    assertEquals(42.0, constructBase(42));
+    assertEquals(0.0, constructBaseWith0());
+    assertEquals("something", constructBase("something"));
+    assertEquals(null, constructBaseWithNullString());
+    assertEquals(11.0, constructBase(11.0, new Object()));
+    assertEquals(-12.0, constructBase(12.0, null));
+    assertEquals(30.0, constructBase(17, 13.0, new Object()));
+    assertEquals(-34.0, constructBase(19, 15.0, null));
+    assertEquals(-22.5, constructBaseWith0DoubleNull(22.5));
+    assertEquals(-8.0, constructBase(2, 14.0, null, null));
+    assertEquals(-64.0, constructBase(4, 28.0, null, "dummy"));
+    assertEquals(13.0, constructBase(24, 2.0, new Object(), null));
+    assertEquals(30.0, constructBase(11, 4.0, new Object(), "dummy"));
+    assertEquals(43.0, constructBase(43.0));
+    assertEquals(0.0, constructBaseWith0d());
+    assertEquals(1.0, constructBase(new Object()));
+    assertEquals(-1.0, constructBase((Object) null));
+    assertEquals(123.0, constructBase(123, 65L));
 
-    /// CHECK-START: void Main.nop(Second) inliner (after)
-    /// CHECK-NOT:                      InvokeVirtual
+    assertEquals(0.0, constructDerived());
+    assertEquals(73.0, constructDerived(73));
+    assertEquals(0.0, constructDerivedWith0());
+    assertEquals(null, constructDerived("something else"));
+    assertEquals(18.0, constructDerived(18.0));
+    assertEquals(0.0, constructDerivedWith0d());
+    assertEquals(-7.0, constructDerived(5, 7.0, new Object()));
+    assertEquals(-4.0, constructDerived(9, 4.0, null));
+    assertEquals(0.0, constructDerived(1, 9.0, null, null));
+    assertEquals(0.0, constructDerived(2, 8.0, null, "dummy"));
+    assertEquals(0.0, constructDerived(3, 7.0, new Object(), null));
+    assertEquals(0.0, constructDerived(4, 6.0, new Object(), "dummy"));
+    assertEquals(17.0, constructDerived(17.0f));
+    assertEquals(-5.5, constructDerived(6, -7.0, new Object(), 6.5f));
 
-    public static void nop(Second s) {
-      s.nop();
+    assertEquals(0, constructBaseWithFinalField());
+    assertEquals(77, constructBaseWithFinalField(77));
+    assertEquals(0, constructBaseWithFinalFieldWith0());
+    assertEquals(0.0, constructDerivedWithFinalField());
+    assertEquals(-33.0, constructDerivedWithFinalField(-33));
+    assertEquals(0.0, constructDerivedWithFinalFieldWith0());
+    assertEquals(-44.0, constructDerivedWithFinalField(-44.0));
+    assertEquals(0.0, constructDerivedWithFinalFieldWith0d());
+    assertEquals(88, constructDerivedWithFinalField(22, 66.0));
+    assertEquals(0.0, constructDerivedWithFinalFieldWith0And0d());
+
+    assertEquals(0, constructDerivedInSecondDex());
+    assertEquals(123, constructDerivedInSecondDex(123));
+    assertEquals(0, constructDerivedInSecondDexWith0());
+    assertEquals(0, constructDerivedInSecondDex(7L));
+  }
+
+  private static void assertEquals(int expected, int actual) {
+    if (expected != actual) {
+      throw new AssertionError("Wrong result: " + expected + " != " + actual);
     }
+  }
 
-    /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (before)
-    /// CHECK-DAG:  <<Value:l\d+>>      ParameterValue
-    /// CHECK-DAG:  <<Ignored:i\d+>>    IntConstant 77
-    /// CHECK-DAG:  <<ClinitCk:l\d+>>   ClinitCheck
-    // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
-    /// CHECK-DAG:  <<Invoke:l\d+>>     InvokeStaticOrDirect [<<Ignored>>,<<Value>>{{(,[ij]\d+)?}},<<ClinitCk>>]
-    /// CHECK-DAG:                      Return [<<Invoke>>]
-
-    /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (after)
-    /// CHECK-DAG:  <<Value:l\d+>>      ParameterValue
-    /// CHECK-DAG:                      Return [<<Value>>]
-
-    /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (after)
-    /// CHECK-NOT:                      InvokeStaticOrDirect
-
-    public static Object staticReturnArg2(String value) {
-      return Second.staticReturnArg2(77, value);
+  private static void assertEquals(double expected, double actual) {
+    if (expected != actual) {
+      throw new AssertionError("Wrong result: " + expected + " != " + actual);
     }
+  }
 
-    /// CHECK-START: long Main.returnArg1(Second, long) inliner (before)
-    /// CHECK-DAG:  <<Second:l\d+>>     ParameterValue
-    /// CHECK-DAG:  <<Value:j\d+>>      ParameterValue
-    /// CHECK-DAG:  <<NullCk:l\d+>>     NullCheck [<<Second>>]
-    /// CHECK-DAG:  <<Invoke:j\d+>>     InvokeVirtual [<<NullCk>>,<<Value>>]
-    /// CHECK-DAG:                      Return [<<Invoke>>]
-
-    /// CHECK-START: long Main.returnArg1(Second, long) inliner (after)
-    /// CHECK-DAG:  <<Value:j\d+>>      ParameterValue
-    /// CHECK-DAG:                      Return [<<Value>>]
-
-    /// CHECK-START: long Main.returnArg1(Second, long) inliner (after)
-    /// CHECK-NOT:                      InvokeVirtual
-
-    public static long returnArg1(Second s, long value) {
-      return s.returnArg1(value);
+  private static void assertEquals(Object expected, Object actual) {
+    if (expected != actual && (expected == null || !expected.equals(actual))) {
+      throw new AssertionError("Wrong result: " + expected + " != " + actual);
     }
-
-    /// CHECK-START: int Main.staticReturn9() inliner (before)
-    /// CHECK:      {{i\d+}}            InvokeStaticOrDirect
-
-    /// CHECK-START: int Main.staticReturn9() inliner (before)
-    /// CHECK-NOT:                      IntConstant 9
-
-    /// CHECK-START: int Main.staticReturn9() inliner (after)
-    /// CHECK-DAG:  <<Const9:i\d+>>     IntConstant 9
-    /// CHECK-DAG:                      Return [<<Const9>>]
-
-    /// CHECK-START: int Main.staticReturn9() inliner (after)
-    /// CHECK-NOT:                      InvokeStaticOrDirect
-
-    public static int staticReturn9() {
-      return Second.staticReturn9();
-    }
-
-    /// CHECK-START: int Main.return7(Second) inliner (before)
-    /// CHECK:      {{i\d+}}            InvokeVirtual
-
-    /// CHECK-START: int Main.return7(Second) inliner (before)
-    /// CHECK-NOT:                      IntConstant 7
-
-    /// CHECK-START: int Main.return7(Second) inliner (after)
-    /// CHECK-DAG:  <<Const7:i\d+>>     IntConstant 7
-    /// CHECK-DAG:                      Return [<<Const7>>]
-
-    /// CHECK-START: int Main.return7(Second) inliner (after)
-    /// CHECK-NOT:                      InvokeVirtual
-
-    public static int return7(Second s) {
-      return s.return7(null);
-    }
-
-    /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (before)
-    /// CHECK:      {{l\d+}}            InvokeStaticOrDirect
-
-    /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (before)
-    /// CHECK-NOT:                      NullConstant
-
-    /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (after)
-    /// CHECK-DAG:  <<Null:l\d+>>       NullConstant
-    /// CHECK-DAG:                      Return [<<Null>>]
-
-    /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (after)
-    /// CHECK-NOT:                      InvokeStaticOrDirect
-
-    public static String staticReturnNull() {
-      return Second.staticReturnNull();
-    }
-
-    /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (before)
-    /// CHECK:      {{l\d+}}            InvokeVirtual
-
-    /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (before)
-    /// CHECK-NOT:                      NullConstant
-
-    /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (after)
-    /// CHECK-DAG:  <<Null:l\d+>>       NullConstant
-    /// CHECK-DAG:                      Return [<<Null>>]
-
-    /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (after)
-    /// CHECK-NOT:                      InvokeVirtual
-
-    public static Object returnNull(Second s) {
-      return s.returnNull();
-    }
-
-    /// CHECK-START: int Main.getInt(Second) inliner (before)
-    /// CHECK:      {{i\d+}}            InvokeVirtual
-
-    /// CHECK-START: int Main.getInt(Second) inliner (after)
-    /// CHECK:      {{i\d+}}            InstanceFieldGet
-
-    /// CHECK-START: int Main.getInt(Second) inliner (after)
-    /// CHECK-NOT:                      InvokeVirtual
-
-    public static int getInt(Second s) {
-      return s.getInstanceIntField();
-    }
-
-    /// CHECK-START: double Main.getDouble(Second) inliner (before)
-    /// CHECK:      {{d\d+}}            InvokeVirtual
-
-    /// CHECK-START: double Main.getDouble(Second) inliner (after)
-    /// CHECK:      {{d\d+}}            InstanceFieldGet
-
-    /// CHECK-START: double Main.getDouble(Second) inliner (after)
-    /// CHECK-NOT:                      InvokeVirtual
-
-    public static double getDouble(Second s) {
-      return s.getInstanceDoubleField(22);
-    }
-
-    /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (before)
-    /// CHECK:      {{l\d+}}            InvokeVirtual
-
-    /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (after)
-    /// CHECK:      {{l\d+}}            InstanceFieldGet
-
-    /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (after)
-    /// CHECK-NOT:                      InvokeVirtual
-
-    public static Object getObject(Second s) {
-      return s.getInstanceObjectField(-1L);
-    }
-
-    /// CHECK-START: java.lang.String Main.getString(Second) inliner (before)
-    /// CHECK:      {{l\d+}}            InvokeVirtual
-
-    /// CHECK-START: java.lang.String Main.getString(Second) inliner (after)
-    /// CHECK:      {{l\d+}}            InstanceFieldGet
-
-    /// CHECK-START: java.lang.String Main.getString(Second) inliner (after)
-    /// CHECK-NOT:                      InvokeVirtual
-
-    public static String getString(Second s) {
-      return s.getInstanceStringField(null, "whatever", 1234L);
-    }
-
-    /// CHECK-START: int Main.staticGetInt(Second) inliner (before)
-    /// CHECK:      {{i\d+}}            InvokeStaticOrDirect
-
-    /// CHECK-START: int Main.staticGetInt(Second) inliner (after)
-    /// CHECK:      {{i\d+}}            InvokeStaticOrDirect
-
-    /// CHECK-START: int Main.staticGetInt(Second) inliner (after)
-    /// CHECK-NOT:                      InstanceFieldGet
-
-    public static int staticGetInt(Second s) {
-      return Second.staticGetInstanceIntField(s);
-    }
-
-    /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (before)
-    /// CHECK:      {{d\d+}}            InvokeVirtual
-
-    /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (after)
-    /// CHECK:      {{d\d+}}            InvokeVirtual
-
-    /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (after)
-    /// CHECK-NOT:                      InstanceFieldGet
-
-    public static double getDoubleFromParam(Second s) {
-      return s.getInstanceDoubleFieldFromParam(s);
-    }
-
-    /// CHECK-START: int Main.getStaticInt(Second) inliner (before)
-    /// CHECK:      {{i\d+}}            InvokeVirtual
-
-    /// CHECK-START: int Main.getStaticInt(Second) inliner (after)
-    /// CHECK:      {{i\d+}}            InvokeVirtual
-
-    /// CHECK-START: int Main.getStaticInt(Second) inliner (after)
-    /// CHECK-NOT:                      InstanceFieldGet
-    /// CHECK-NOT:                      StaticFieldGet
-
-    public static int getStaticInt(Second s) {
-      return s.getStaticIntField();
-    }
-
-    /// CHECK-START: long Main.setLong(Second, long) inliner (before)
-    /// CHECK:                          InvokeVirtual
-
-    /// CHECK-START: long Main.setLong(Second, long) inliner (after)
-    /// CHECK:                          InstanceFieldSet
-
-    /// CHECK-START: long Main.setLong(Second, long) inliner (after)
-    /// CHECK-NOT:                      InvokeVirtual
-
-    public static long setLong(Second s, long value) {
-      s.setInstanceLongField(-1, value);
-      return s.instanceLongField;
-    }
-
-    /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (before)
-    /// CHECK:                          InvokeVirtual
-
-    /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (after)
-    /// CHECK-DAG:  <<Second:l\d+>>     ParameterValue
-    /// CHECK-DAG:  <<Value:j\d+>>      ParameterValue
-    /// CHECK-DAG:  <<Arg2:i\d+>>       ParameterValue
-    /// CHECK-DAG:  <<NullCk:l\d+>>     NullCheck [<<Second>>]
-    /// CHECK-DAG:                      InstanceFieldSet [<<NullCk>>,<<Value>>]
-    /// CHECK-DAG:  <<NullCk2:l\d+>>    NullCheck [<<Second>>]
-    /// CHECK-DAG:  <<IGet:j\d+>>       InstanceFieldGet [<<NullCk2>>]
-    /// CHECK-DAG:  <<Conv:j\d+>>       TypeConversion [<<Arg2>>]
-    /// CHECK-DAG:  <<Add:j\d+>>        Add [<<IGet>>,<<Conv>>]
-    /// CHECK-DAG:                      Return [<<Add>>]
-
-    /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (after)
-    /// CHECK-NOT:                      InvokeVirtual
-
-    public static long setLongReturnArg2(Second s, long value, int arg2) {
-      int result = s.setInstanceLongFieldReturnArg2(value, arg2);
-      return s.instanceLongField + result;
-    }
-
-    /// CHECK-START: long Main.staticSetLong(Second, long) inliner (before)
-    /// CHECK:                          InvokeStaticOrDirect
-
-    /// CHECK-START: long Main.staticSetLong(Second, long) inliner (after)
-    /// CHECK:                          InvokeStaticOrDirect
-
-    /// CHECK-START: long Main.staticSetLong(Second, long) inliner (after)
-    /// CHECK-NOT:                      InstanceFieldSet
-
-    public static long staticSetLong(Second s, long value) {
-      Second.staticSetInstanceLongField(s, value);
-      return s.instanceLongField;
-    }
-
-    /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (before)
-    /// CHECK:                          InvokeVirtual
-
-    /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (after)
-    /// CHECK:                          InvokeVirtual
-
-    /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (after)
-    /// CHECK-NOT:                      InstanceFieldSet
-
-    public static long setLongThroughParam(Second s, long value) {
-      s.setInstanceLongFieldThroughParam(s, value);
-      return s.instanceLongField;
-    }
-
-    /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (before)
-    /// CHECK:                          InvokeVirtual
-
-    /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (after)
-    /// CHECK:                          InvokeVirtual
-
-    /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (after)
-    /// CHECK-NOT:                      InstanceFieldSet
-    /// CHECK-NOT:                      StaticFieldSet
-
-    public static float setStaticFloat(Second s, float value) {
-      s.setStaticFloatField(value);
-      return s.staticFloatField;
-    }
-
-    /// CHECK-START: java.lang.Object Main.newObject() inliner (before)
-    /// CHECK-DAG:  <<Obj:l\d+>>        NewInstance
-    // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
-    /// CHECK-DAG:                      InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:java.lang.Object.<init>
-
-    /// CHECK-START: java.lang.Object Main.newObject() inliner (after)
-    /// CHECK-NOT:                      InvokeStaticOrDirect
-
-    public static Object newObject() {
-      return new Object();
-    }
-
-    public static void main(String[] args) throws Exception {
-      Second s = new Second();
-
-      // Replaced NOP pattern.
-      staticNop();
-      nop(s);
-      // Replaced "return arg" pattern.
-      assertEquals("arbitrary string", staticReturnArg2("arbitrary string"));
-      assertEquals(4321L, returnArg1(s, 4321L));
-      // Replaced "return const" pattern.
-      assertEquals(9, staticReturn9());
-      assertEquals(7, return7(s));
-      assertEquals(null, staticReturnNull());
-      assertEquals(null, returnNull(s));
-      // Replaced IGET pattern.
-      assertEquals(42, getInt(s));
-      assertEquals(-42.0, getDouble(s));
-      assertEquals(null, getObject(s));
-      assertEquals("dummy", getString(s));
-      // Not replaced IGET pattern.
-      assertEquals(42, staticGetInt(s));
-      assertEquals(-42.0, getDoubleFromParam(s));
-      // SGET.
-      assertEquals(4242, getStaticInt(s));
-      // Replaced IPUT pattern.
-      assertEquals(111L, setLong(s, 111L));
-      assertEquals(345L, setLongReturnArg2(s, 222L, 123));
-      // Not replaced IPUT pattern.
-      assertEquals(222L, staticSetLong(s, 222L));
-      assertEquals(333L, setLongThroughParam(s, 333L));
-      // SPUT.
-      assertEquals(-11.5f, setStaticFloat(s, -11.5f));
-
-      if (newObject() == null) {
-        throw new AssertionError("new Object() cannot be null.");
-      }
-    }
-
-    private static void assertEquals(int expected, int actual) {
-      if (expected != actual) {
-        throw new AssertionError("Wrong result: " + expected + " != " + actual);
-      }
-    }
-
-    private static void assertEquals(double expected, double actual) {
-      if (expected != actual) {
-        throw new AssertionError("Wrong result: " + expected + " != " + actual);
-      }
-    }
-
-    private static void assertEquals(Object expected, Object actual) {
-      if (expected != actual && (expected == null || !expected.equals(actual))) {
-        throw new AssertionError("Wrong result: " + expected + " != " + actual);
-      }
-    }
+  }
 }
diff --git a/test/570-checker-osr/expected.txt b/test/570-checker-osr/expected.txt
new file mode 100644
index 0000000..555c6a9
--- /dev/null
+++ b/test/570-checker-osr/expected.txt
@@ -0,0 +1,5 @@
+JNI_OnLoad called
+100000000
+200000000
+300000000
+400000000
diff --git a/test/570-checker-osr/info.txt b/test/570-checker-osr/info.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/570-checker-osr/info.txt
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
new file mode 100644
index 0000000..fb84687
--- /dev/null
+++ b/test/570-checker-osr/osr.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_method.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "oat_quick_method_header.h"
+#include "scoped_thread_state_change.h"
+#include "stack_map.h"
+
+namespace art {
+
+class OsrVisitor : public StackVisitor {
+ public:
+  explicit OsrVisitor(Thread* thread)
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+        in_osr_method_(false) {}
+
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+    ArtMethod* m = GetMethod();
+    std::string m_name(m->GetName());
+
+    if ((m_name.compare("$noinline$returnInt") == 0) ||
+        (m_name.compare("$noinline$returnFloat") == 0) ||
+        (m_name.compare("$noinline$returnDouble") == 0) ||
+        (m_name.compare("$noinline$returnLong") == 0) ||
+        (m_name.compare("$noinline$deopt") == 0)) {
+      const OatQuickMethodHeader* header =
+          Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
+      if (header != nullptr && header == GetCurrentOatQuickMethodHeader()) {
+        in_osr_method_ = true;
+      }
+      return false;
+    }
+    return true;
+  }
+
+  bool in_osr_method_;
+};
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_ensureInOsrCode(JNIEnv*, jclass) {
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  if (jit == nullptr) {
+    // Just return true for non-jit configurations to stop the infinite loop.
+    return JNI_TRUE;
+  }
+  ScopedObjectAccess soa(Thread::Current());
+  OsrVisitor visitor(soa.Self());
+  visitor.WalkStack();
+  return visitor.in_osr_method_;
+}
+
+}  // namespace art
diff --git a/test/570-checker-osr/smali/Osr.smali b/test/570-checker-osr/smali/Osr.smali
new file mode 100644
index 0000000..869c7c3
--- /dev/null
+++ b/test/570-checker-osr/smali/Osr.smali
@@ -0,0 +1,35 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LOsr;
+
+.super Ljava/lang/Object;
+
+# Check that blocks only havig nops are not merged when they are loop headers.
+# This ensures we can do on-stack replacement for branches to those nop blocks.
+
+## CHECK-START: int Osr.simpleLoop(int, int) dead_code_elimination_final (after)
+## CHECK-DAG:                     SuspendCheck loop:<<OuterLoop:B\d+>> outer_loop:none
+## CHECK-DAG:                     SuspendCheck loop:{{B\d+}} outer_loop:<<OuterLoop>>
+.method public static simpleLoop(II)I
+   .registers 3
+   const/16 v0, 0
+   :nop_entry
+   nop
+   :loop_entry
+   add-int v0, v0, v0
+   if-eq v0, v1, :loop_entry
+   if-eq v0, v2, :nop_entry
+   return v0
+.end method
diff --git a/test/570-checker-osr/src/DeoptimizationController.java b/test/570-checker-osr/src/DeoptimizationController.java
new file mode 100644
index 0000000..907d133
--- /dev/null
+++ b/test/570-checker-osr/src/DeoptimizationController.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file is a copy of 802-deoptimization/src/DeoptimizationController.java
+// because run-test requires standalone individual test.
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Method;
+
+/**
+ * Controls deoptimization using dalvik.system.VMDebug class.
+ */
+public class DeoptimizationController {
+  private static final String TEMP_FILE_NAME_PREFIX = "test";
+  private static final String TEMP_FILE_NAME_SUFFIX = ".trace";
+
+  private static File createTempFile() throws Exception {
+    try {
+      return  File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+    } catch (IOException e) {
+      System.setProperty("java.io.tmpdir", "/data/local/tmp");
+      try {
+        return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+      } catch (IOException e2) {
+        System.setProperty("java.io.tmpdir", "/sdcard");
+        return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+      }
+    }
+  }
+
+  public static void startDeoptimization() {
+    File tempFile = null;
+    try {
+      tempFile = createTempFile();
+      String tempFileName = tempFile.getPath();
+
+      VMDebug.startMethodTracing(tempFileName, 0, 0, false, 1000);
+      if (VMDebug.getMethodTracingMode() == 0) {
+        throw new IllegalStateException("Not tracing.");
+      }
+    } catch (Exception exc) {
+      exc.printStackTrace(System.err);
+    } finally {
+      if (tempFile != null) {
+        tempFile.delete();
+      }
+    }
+  }
+
+  public static void stopDeoptimization() {
+    try {
+      VMDebug.stopMethodTracing();
+      if (VMDebug.getMethodTracingMode() != 0) {
+        throw new IllegalStateException("Still tracing.");
+      }
+    } catch (Exception exc) {
+      exc.printStackTrace(System.err);
+    }
+  }
+
+  private static class VMDebug {
+    private static final Method startMethodTracingMethod;
+    private static final Method stopMethodTracingMethod;
+    private static final Method getMethodTracingModeMethod;
+
+    static {
+      try {
+        Class<?> c = Class.forName("dalvik.system.VMDebug");
+        startMethodTracingMethod = c.getDeclaredMethod("startMethodTracing", String.class,
+            Integer.TYPE, Integer.TYPE, Boolean.TYPE, Integer.TYPE);
+        stopMethodTracingMethod = c.getDeclaredMethod("stopMethodTracing");
+        getMethodTracingModeMethod = c.getDeclaredMethod("getMethodTracingMode");
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    }
+
+    public static void startMethodTracing(String filename, int bufferSize, int flags,
+        boolean samplingEnabled, int intervalUs) throws Exception {
+      startMethodTracingMethod.invoke(null, filename, bufferSize, flags, samplingEnabled,
+          intervalUs);
+    }
+    public static void stopMethodTracing() throws Exception {
+      stopMethodTracingMethod.invoke(null);
+    }
+    public static int getMethodTracingMode() throws Exception {
+      return (int) getMethodTracingModeMethod.invoke(null);
+    }
+  }
+}
diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java
new file mode 100644
index 0000000..7485163
--- /dev/null
+++ b/test/570-checker-osr/src/Main.java
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    System.loadLibrary(args[0]);
+    if ($noinline$returnInt() != 53) {
+      throw new Error("Unexpected return value");
+    }
+    if ($noinline$returnFloat() != 42.2f) {
+      throw new Error("Unexpected return value");
+    }
+    if ($noinline$returnDouble() != Double.longBitsToDouble(0xF000000000001111L)) {
+      throw new Error("Unexpected return value ");
+    }
+    if ($noinline$returnLong() != 0xFFFF000000001111L) {
+      throw new Error("Unexpected return value");
+    }
+
+    try {
+      $noinline$deopt();
+    } catch (Exception e) {}
+  }
+
+  public static int $noinline$returnInt() {
+    if (doThrow) throw new Error("");
+    int i = 0;
+    for (; i < 100000000; ++i) {
+    }
+    while (!ensureInOsrCode()) {}
+    System.out.println(i);
+    return 53;
+  }
+
+  public static float $noinline$returnFloat() {
+    if (doThrow) throw new Error("");
+    int i = 0;
+    for (; i < 200000000; ++i) {
+    }
+    while (!ensureInOsrCode()) {}
+    System.out.println(i);
+    return 42.2f;
+  }
+
+  public static double $noinline$returnDouble() {
+    if (doThrow) throw new Error("");
+    int i = 0;
+    for (; i < 300000000; ++i) {
+    }
+    while (!ensureInOsrCode()) {}
+    System.out.println(i);
+    return Double.longBitsToDouble(0xF000000000001111L);
+  }
+
+  public static long $noinline$returnLong() {
+    if (doThrow) throw new Error("");
+    int i = 1000000;
+    for (; i < 400000000; ++i) {
+    }
+    while (!ensureInOsrCode()) {}
+    System.out.println(i);
+    return 0xFFFF000000001111L;
+  }
+
+  public static void $noinline$deopt() {
+    if (doThrow) throw new Error("");
+    int i = 0;
+    for (; i < 100000000; ++i) {
+    }
+    while (!ensureInOsrCode()) {}
+    DeoptimizationController.startDeoptimization();
+  }
+
+  public static int[] array = new int[4];
+
+  public static native boolean ensureInOsrCode();
+
+  public static boolean doThrow = false;
+}
diff --git a/test/570-checker-select/src/Main.java b/test/570-checker-select/src/Main.java
index 2f8094d..ec60240 100644
--- a/test/570-checker-select/src/Main.java
+++ b/test/570-checker-select/src/Main.java
@@ -19,6 +19,11 @@
   /// CHECK-START: int Main.BoolCond_IntVarVar(boolean, int, int) register (after)
   /// CHECK:               Select [{{i\d+}},{{i\d+}},{{z\d+}}]
 
+  /// CHECK-START-X86_64: int Main.BoolCond_IntVarVar(boolean, int, int) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> ParameterValue
+  /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
+  /// CHECK:                          cmovnz/ne
+
   public static int BoolCond_IntVarVar(boolean cond, int x, int y) {
     return cond ? x : y;
   }
@@ -26,6 +31,11 @@
   /// CHECK-START: int Main.BoolCond_IntVarCst(boolean, int) register (after)
   /// CHECK:               Select [{{i\d+}},{{i\d+}},{{z\d+}}]
 
+  /// CHECK-START-X86_64: int Main.BoolCond_IntVarCst(boolean, int) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> ParameterValue
+  /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
+  /// CHECK:                          cmovnz/ne
+
   public static int BoolCond_IntVarCst(boolean cond, int x) {
     return cond ? x : 1;
   }
@@ -33,10 +43,51 @@
   /// CHECK-START: int Main.BoolCond_IntCstVar(boolean, int) register (after)
   /// CHECK:               Select [{{i\d+}},{{i\d+}},{{z\d+}}]
 
+  /// CHECK-START-X86_64: int Main.BoolCond_IntCstVar(boolean, int) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> ParameterValue
+  /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
+  /// CHECK:                          cmovnz/ne
+
   public static int BoolCond_IntCstVar(boolean cond, int y) {
     return cond ? 1 : y;
   }
 
+  /// CHECK-START: long Main.BoolCond_LongVarVar(boolean, long, long) register (after)
+  /// CHECK:               Select [{{j\d+}},{{j\d+}},{{z\d+}}]
+
+  /// CHECK-START-X86_64: long Main.BoolCond_LongVarVar(boolean, long, long) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> ParameterValue
+  /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          cmovnz/neq
+
+  public static long BoolCond_LongVarVar(boolean cond, long x, long y) {
+    return cond ? x : y;
+  }
+
+  /// CHECK-START: long Main.BoolCond_LongVarCst(boolean, long) register (after)
+  /// CHECK:               Select [{{j\d+}},{{j\d+}},{{z\d+}}]
+
+  /// CHECK-START-X86_64: long Main.BoolCond_LongVarCst(boolean, long) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> ParameterValue
+  /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          cmovnz/neq
+
+  public static long BoolCond_LongVarCst(boolean cond, long x) {
+    return cond ? x : 1L;
+  }
+
+  /// CHECK-START: long Main.BoolCond_LongCstVar(boolean, long) register (after)
+  /// CHECK:               Select [{{j\d+}},{{j\d+}},{{z\d+}}]
+
+  /// CHECK-START-X86_64: long Main.BoolCond_LongCstVar(boolean, long) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> ParameterValue
+  /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          cmovnz/neq
+
+  public static long BoolCond_LongCstVar(boolean cond, long y) {
+    return cond ? 1L : y;
+  }
+
   /// CHECK-START: float Main.BoolCond_FloatVarVar(boolean, float, float) register (after)
   /// CHECK:               Select [{{f\d+}},{{f\d+}},{{z\d+}}]
 
@@ -62,6 +113,11 @@
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK-NEXT:                     Select [{{i\d+}},{{i\d+}},<<Cond>>]
 
+  /// CHECK-START-X86_64: int Main.IntNonmatCond_IntVarVar(int, int, int, int) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
+  /// CHECK-NEXT:                     Select [{{i\d+}},{{i\d+}},<<Cond>>]
+  /// CHECK:                          cmovle/ng
+
   public static int IntNonmatCond_IntVarVar(int a, int b, int x, int y) {
     return a > b ? x : y;
   }
@@ -71,11 +127,78 @@
   /// CHECK-NEXT:       <<Sel:i\d+>>  Select [{{i\d+}},{{i\d+}},{{z\d+}}]
   /// CHECK-NEXT:                     Add [<<Cond>>,<<Sel>>]
 
+  /// CHECK-START-X86_64: int Main.IntMatCond_IntVarVar(int, int, int, int) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
+  /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
+  /// CHECK:                          cmovle/ng
+
   public static int IntMatCond_IntVarVar(int a, int b, int x, int y) {
     int result = (a > b ? x : y);
     return result + (a > b ? 0 : 1);
   }
 
+  /// CHECK-START: long Main.IntNonmatCond_LongVarVar(int, int, long, long) register (after)
+  /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
+  /// CHECK-NEXT:                     Select [{{j\d+}},{{j\d+}},<<Cond>>]
+
+  /// CHECK-START-X86_64: long Main.IntNonmatCond_LongVarVar(int, int, long, long) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
+  /// CHECK-NEXT:                     Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          cmovle/ngq
+
+  public static long IntNonmatCond_LongVarVar(int a, int b, long x, long y) {
+    return a > b ? x : y;
+  }
+
+  /// CHECK-START: long Main.IntMatCond_LongVarVar(int, int, long, long) register (after)
+  /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
+  /// CHECK:            <<Sel1:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:            <<Sel2:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          Add [<<Sel2>>,<<Sel1>>]
+
+  /// CHECK-START-X86_64: long Main.IntMatCond_LongVarVar(int, int, long, long) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
+  /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          cmovle/ngq
+  /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          cmovnz/neq
+
+  public static long IntMatCond_LongVarVar(int a, int b, long x, long y) {
+    long result = (a > b ? x : y);
+    return result + (a > b ? 0L : 1L);
+  }
+
+  /// CHECK-START: long Main.LongNonmatCond_LongVarVar(long, long, long, long) register (after)
+  /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
+  /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
+
+  /// CHECK-START-X86_64: long Main.LongNonmatCond_LongVarVar(long, long, long, long) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
+  /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          cmovle/ngq
+
+  public static long LongNonmatCond_LongVarVar(long a, long b, long x, long y) {
+    return a > b ? x : y;
+  }
+
+  /// CHECK-START: long Main.LongMatCond_LongVarVar(long, long, long, long) register (after)
+  /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
+  /// CHECK:            <<Sel1:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:            <<Sel2:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          Add [<<Sel2>>,<<Sel1>>]
+
+  /// CHECK-START-X86_64: long Main.LongMatCond_LongVarVar(long, long, long, long) disassembly (after)
+  /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
+  /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          cmovle/ngq
+  /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
+  /// CHECK:                          cmovnz/neq
+
+  public static long LongMatCond_LongVarVar(long a, long b, long x, long y) {
+    long result = (a > b ? x : y);
+    return result + (a > b ? 0L : 1L);
+  }
+
   /// CHECK-START: int Main.FloatLtNonmatCond_IntVarVar(float, float, int, int) register (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{f\d+}},{{f\d+}}]
   /// CHECK-NEXT:                     Select [{{i\d+}},{{i\d+}},<<Cond>>]
@@ -150,6 +273,13 @@
     assertEqual(1, BoolCond_IntCstVar(true, 7));
     assertEqual(7, BoolCond_IntCstVar(false, 7));
 
+    assertEqual(5L, BoolCond_LongVarVar(true, 5L, 7L));
+    assertEqual(7L, BoolCond_LongVarVar(false, 5L, 7L));
+    assertEqual(5L, BoolCond_LongVarCst(true, 5L));
+    assertEqual(1L, BoolCond_LongVarCst(false, 5L));
+    assertEqual(1L, BoolCond_LongCstVar(true, 7L));
+    assertEqual(7L, BoolCond_LongCstVar(false, 7L));
+
     assertEqual(5, BoolCond_FloatVarVar(true, 5, 7));
     assertEqual(7, BoolCond_FloatVarVar(false, 5, 7));
     assertEqual(5, BoolCond_FloatVarCst(true, 5));
diff --git a/test/572-checker-array-get-regression/expected.txt b/test/572-checker-array-get-regression/expected.txt
new file mode 100644
index 0000000..f7d1ad4
--- /dev/null
+++ b/test/572-checker-array-get-regression/expected.txt
@@ -0,0 +1 @@
+524287
diff --git a/test/572-checker-array-get-regression/info.txt b/test/572-checker-array-get-regression/info.txt
new file mode 100644
index 0000000..d06feee
--- /dev/null
+++ b/test/572-checker-array-get-regression/info.txt
@@ -0,0 +1,3 @@
+Regression test for the ARM64 Baker's read barrier fast path compiler
+instrumentation of array loads with a large constant index, where we
+used to require too many scratch (temporary) registers.
diff --git a/test/572-checker-array-get-regression/src/Main.java b/test/572-checker-array-get-regression/src/Main.java
new file mode 100644
index 0000000..a9bf326
--- /dev/null
+++ b/test/572-checker-array-get-regression/src/Main.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  public static void main(String[] args) {
+    System.out.println(test().intValue());
+  }
+
+  /// CHECK-START: java.lang.Integer Main.test() ssa_builder (after)
+  /// CHECK-DAG:     <<Method:[ij]\d+>>    CurrentMethod
+  /// CHECK-DAG:     <<Const2P19:i\d+>>    IntConstant 524288
+  /// CHECK-DAG:     <<ConstM1:i\d+>>      IntConstant -1
+  /// CHECK-DAG:     <<Array:l\d+>>        NewArray [<<Const2P19>>,<<Method>>]
+  /// CHECK-DAG:     <<NullCheck1:l\d+>>   NullCheck [<<Array>>]
+  /// CHECK-DAG:     <<Length1:i\d+>>      ArrayLength [<<NullCheck1>>]
+  /// CHECK-DAG:     <<Index:i\d+>>        Add [<<Length1>>,<<ConstM1>>]
+  /// CHECK-DAG:     <<NullCheck2:l\d+>>   NullCheck [<<Array>>]
+  /// CHECK-DAG:     <<Length2:i\d+>>      ArrayLength [<<NullCheck2>>]
+  /// CHECK-DAG:     <<BoundsCheck:i\d+>>  BoundsCheck [<<Index>>,<<Length2>>]
+  /// CHECK-DAG:     <<LastElement:l\d+>>  ArrayGet [<<NullCheck2>>,<<BoundsCheck>>]
+  /// CHECK-DAG:                           Return [<<LastElement>>]
+
+
+  /// CHECK-START: java.lang.Integer Main.test() register (before)
+  /// CHECK-DAG:     <<Method:[ij]\d+>>    CurrentMethod
+  /// CHECK-DAG:     <<Const2P19:i\d+>>    IntConstant 524288
+  /// CHECK-DAG:     <<Const2P19M1:i\d+>>  IntConstant 524287
+  /// CHECK-DAG:     <<Array:l\d+>>        NewArray [<<Const2P19>>,<<Method>>]
+  /// CHECK-DAG:     <<LastElement:l\d+>>  ArrayGet [<<Array>>,<<Const2P19M1>>]
+  /// CHECK-DAG:                           Return [<<LastElement>>]
+
+  public static Integer test() {
+    Integer[] integers = new Integer[1 << 19];
+    initIntegerArray(integers);
+    // Array load with a large constant index (after constant folding
+    // and bounds check elimination).
+    Integer last_integer = integers[integers.length - 1];
+    return last_integer;
+  }
+
+  public static void initIntegerArray(Integer[] integers) {
+    for (int i = 0; i < integers.length; ++i) {
+      integers[i] = new Integer(i);
+    }
+  }
+
+}
diff --git a/test/971-iface-super/util-src/generate_smali.py b/test/971-iface-super/util-src/generate_smali.py
index f01c904..3681411 100755
--- a/test/971-iface-super/util-src/generate_smali.py
+++ b/test/971-iface-super/util-src/generate_smali.py
@@ -39,7 +39,7 @@
 import string
 
 # The max depth the type tree can have.
-MAX_IFACE_DEPTH = 3
+MAX_IFACE_DEPTH = 2
 
 class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin):
   """
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index faaf1f0..e547c72 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -40,7 +40,8 @@
   466-get-live-vreg/get_live_vreg_jni.cc \
   497-inlining-and-class-loader/clear_dex_cache.cc \
   543-env-long-ref/env_long_ref.cc \
-  566-polymorphic-inlining/polymorphic_inline.cc
+  566-polymorphic-inlining/polymorphic_inline.cc \
+  570-checker-osr/osr.cc
 
 ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
 ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 870b514..9dcd4dc 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -235,6 +235,18 @@
         $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
 endif
 
+# 569-checker-pattern-replacement tests behaviour present only on host.
+TEST_ART_BROKEN_TARGET_TESTS := \
+  569-checker-pattern-replacement
+
+ifneq (,$(filter target,$(TARGET_TYPES)))
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
+      $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+      $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_TARGET_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_TARGET_TESTS :=
+
 # Tests that require python3.
 TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
   960-default-smali \
@@ -302,12 +314,7 @@
 
 # Temporarily disable some broken tests when forcing access checks in interpreter b/22414682
 TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := \
-  135-MirandaDispatch \
-  137-cfi \
-  412-new-array \
-  471-uninitialized-locals \
-  506-verify-aput \
-  800-smali
+  137-cfi
 
 ifneq (,$(filter interp-ac,$(COMPILER_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -406,13 +413,14 @@
 
 # 137:
 # This test unrolls and expects managed frames, but tracing means we run the interpreter.
-# 802:
+# 802 and 570-checker-osr:
 # This test dynamically enables tracing to force a deoptimization. This makes the test meaningless
 # when already tracing, and writes an error message that we do not want to check for.
 TEST_ART_BROKEN_TRACING_RUN_TESTS := \
   087-gc-after-link \
   137-cfi \
   141-class-unload \
+  570-checker-osr \
   802-deoptimization
 
 ifneq (,$(filter trace stream,$(TRACE_TYPES)))
@@ -438,7 +446,9 @@
 # Known broken tests for the JIT.
 # CFI unwinding expects managed frames, and the test does not iterate enough to even compile. JIT
 # also uses Generic JNI instead of the JNI compiler.
+# 570 is disabled while investigating osr flakiness.
 TEST_ART_BROKEN_JIT_RUN_TESTS := \
+  570-checker-osr \
   137-cfi
 
 ifneq (,$(filter jit,$(COMPILER_TYPES)))
@@ -464,10 +474,7 @@
 
 # Known broken tests for the mips32 optimizing compiler backend.
 TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS := \
-    441-checker-inliner \
     510-checker-try-catch \
-    536-checker-intrinsic-optimization \
-    557-checker-instruction-simplifier-ror \
 
 ifeq (mips,$(TARGET_ARCH))
   ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
@@ -482,7 +489,6 @@
 
 # Known broken tests for the mips64 optimizing compiler backend.
 TEST_ART_BROKEN_OPTIMIZING_MIPS64_RUN_TESTS := \
-    557-checker-instruction-simplifier-ror \
 
 ifeq (mips64,$(TARGET_ARCH))
   ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 6d67f84..44206df 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -171,7 +171,7 @@
   bug: 25437292
 },
 {
-  description: "Failing tests after enso move.",
+  description: "Failing tests after OpenJDK move.",
   result: EXEC_FAILED,
   bug: 26326992,
   names: ["libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeDateTimeStringDST",
@@ -196,6 +196,12 @@
           "org.apache.harmony.tests.java.text.DecimalFormatSymbolsTest#test_setInternationalCurrencySymbolLjava_lang_String",
           "org.apache.harmony.tests.java.text.DecimalFormatTest#testSerializationHarmonyRICompatible",
           "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition",
+          "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_W_w_dd_MMMM_yyyy_EEEE",
+          "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_dayOfYearPatterns",
+          "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_m_z",
+          "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_z_2DigitOffsetFromGMT",
+          "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_z_4DigitOffsetFromGMT",
+          "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_h_z_4DigitOffsetNoGMT",
           "org.apache.harmony.tests.java.util.jar.JarFileTest#test_getInputStreamLjava_util_jar_JarEntry_subtest0",
           "libcore.java.util.CalendarTest#test_clear_45877",
           "org.apache.harmony.crypto.tests.javax.crypto.spec.SecretKeySpecTest#testGetFormat",
diff --git a/tools/libcore_failures_concurrent_collector.txt b/tools/libcore_failures_concurrent_collector.txt
index 95d1292..d8ef9ba 100644
--- a/tools/libcore_failures_concurrent_collector.txt
+++ b/tools/libcore_failures_concurrent_collector.txt
@@ -27,7 +27,8 @@
   description: "TimeoutException on host-{x86,x86-64}-concurrent-collector",
   result: EXEC_FAILED,
   modes: [host],
-  names: ["libcore.java.util.zip.DeflaterOutputStreamTest#testSyncFlushDisabled",
+  names: ["libcore.java.util.zip.DeflaterOutputStreamTest#testSyncFlushEnabled",
+          "libcore.java.util.zip.DeflaterOutputStreamTest#testSyncFlushDisabled",
           "libcore.java.util.zip.GZIPOutputStreamTest#testSyncFlushEnabled",
           "libcore.java.util.zip.OldAndroidGZIPStreamTest#testGZIPStream",
           "libcore.java.util.zip.OldAndroidZipStreamTest#testZipStream",
@@ -40,7 +41,8 @@
   result: EXEC_FAILED,
   modes: [device],
   names: ["libcore.icu.RelativeDateTimeFormatterTest#test_bug25821045",
-          "libcore.java.text.SimpleDateFormatTest#testLocales"],
+          "libcore.java.text.SimpleDateFormatTest#testLocales",
+          "libcore.java.util.zip.ZipFileTest#testZipFileWithLotsOfEntries"],
   bug: 26711853
 }
 ]